1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /** 53 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 54 * @mrioc: Adapter instance reference 55 * @scmd: SCSI command reference 56 * 57 * Calculate the host tag based on block tag for a given scmd. 58 * 59 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 60 */ 61 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 62 struct scsi_cmnd *scmd) 63 { 64 struct scmd_priv *priv = NULL; 65 u32 unique_tag; 66 u16 host_tag, hw_queue; 67 68 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 69 70 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 71 if (hw_queue >= mrioc->num_op_reply_q) 72 return MPI3MR_HOSTTAG_INVALID; 73 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 74 75 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 76 return MPI3MR_HOSTTAG_INVALID; 77 78 priv = scsi_cmd_priv(scmd); 79 /*host_tag 0 is invalid hence incrementing by 1*/ 80 priv->host_tag = host_tag + 1; 81 priv->scmd = scmd; 82 priv->in_lld_scope = 1; 83 priv->req_q_idx = hw_queue; 84 priv->meta_chain_idx = -1; 85 priv->chain_idx = -1; 86 priv->meta_sg_valid = 0; 87 return priv->host_tag; 88 } 89 90 /** 91 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 92 * @mrioc: Adapter instance reference 93 * @host_tag: Host tag 94 * @qidx: Operational queue index 95 * 96 * Identify the block tag from the host tag and queue index and 97 * retrieve associated scsi command using scsi_host_find_tag(). 98 * 99 * Return: SCSI command reference or NULL. 100 */ 101 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 102 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 103 { 104 struct scsi_cmnd *scmd = NULL; 105 struct scmd_priv *priv = NULL; 106 u32 unique_tag = host_tag - 1; 107 108 if (WARN_ON(host_tag > mrioc->max_host_ios)) 109 goto out; 110 111 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 112 113 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 114 if (scmd) { 115 priv = scsi_cmd_priv(scmd); 116 if (!priv->in_lld_scope) 117 scmd = NULL; 118 } 119 out: 120 return scmd; 121 } 122 123 /** 124 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 125 * @mrioc: Adapter instance reference 126 * @scmd: SCSI command reference 127 * 128 * Invalidate the SCSI command private data to mark the command 129 * is not in LLD scope anymore. 130 * 131 * Return: Nothing. 132 */ 133 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 134 struct scsi_cmnd *scmd) 135 { 136 struct scmd_priv *priv = NULL; 137 138 priv = scsi_cmd_priv(scmd); 139 140 if (WARN_ON(priv->in_lld_scope == 0)) 141 return; 142 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 143 priv->req_q_idx = 0xFFFF; 144 priv->scmd = NULL; 145 priv->in_lld_scope = 0; 146 priv->meta_sg_valid = 0; 147 if (priv->chain_idx >= 0) { 148 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 149 priv->chain_idx = -1; 150 } 151 if (priv->meta_chain_idx >= 0) { 152 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 153 priv->meta_chain_idx = -1; 154 } 155 } 156 157 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 158 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 159 static void mpi3mr_fwevt_worker(struct work_struct *work); 160 161 /** 162 * mpi3mr_fwevt_free - firmware event memory dealloctor 163 * @r: k reference pointer of the firmware event 164 * 165 * Free firmware event memory when no reference. 166 */ 167 static void mpi3mr_fwevt_free(struct kref *r) 168 { 169 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 170 } 171 172 /** 173 * mpi3mr_fwevt_get - k reference incrementor 174 * @fwevt: Firmware event reference 175 * 176 * Increment firmware event reference count. 177 */ 178 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 179 { 180 kref_get(&fwevt->ref_count); 181 } 182 183 /** 184 * mpi3mr_fwevt_put - k reference decrementor 185 * @fwevt: Firmware event reference 186 * 187 * decrement firmware event reference count. 188 */ 189 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 190 { 191 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 192 } 193 194 /** 195 * mpi3mr_alloc_fwevt - Allocate firmware event 196 * @len: length of firmware event data to allocate 197 * 198 * Allocate firmware event with required length and initialize 199 * the reference counter. 200 * 201 * Return: firmware event reference. 202 */ 203 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 204 { 205 struct mpi3mr_fwevt *fwevt; 206 207 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 208 if (!fwevt) 209 return NULL; 210 211 kref_init(&fwevt->ref_count); 212 return fwevt; 213 } 214 215 /** 216 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 217 * @mrioc: Adapter instance reference 218 * @fwevt: Firmware event reference 219 * 220 * Add the given firmware event to the firmware event list. 221 * 222 * Return: Nothing. 223 */ 224 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 225 struct mpi3mr_fwevt *fwevt) 226 { 227 unsigned long flags; 228 229 if (!mrioc->fwevt_worker_thread) 230 return; 231 232 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 233 /* get fwevt reference count while adding it to fwevt_list */ 234 mpi3mr_fwevt_get(fwevt); 235 INIT_LIST_HEAD(&fwevt->list); 236 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 237 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 238 /* get fwevt reference count while enqueueing it to worker queue */ 239 mpi3mr_fwevt_get(fwevt); 240 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 241 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 242 } 243 244 /** 245 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 246 * @mrioc: Adapter instance reference 247 * @fwevt: Firmware event reference 248 * 249 * Delete the given firmware event from the firmware event list. 250 * 251 * Return: Nothing. 252 */ 253 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 254 struct mpi3mr_fwevt *fwevt) 255 { 256 unsigned long flags; 257 258 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 259 if (!list_empty(&fwevt->list)) { 260 list_del_init(&fwevt->list); 261 /* 262 * Put fwevt reference count after 263 * removing it from fwevt_list 264 */ 265 mpi3mr_fwevt_put(fwevt); 266 } 267 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 268 } 269 270 /** 271 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 272 * @mrioc: Adapter instance reference 273 * 274 * Dequeue a firmware event from the firmware event list. 275 * 276 * Return: firmware event. 277 */ 278 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 279 struct mpi3mr_ioc *mrioc) 280 { 281 unsigned long flags; 282 struct mpi3mr_fwevt *fwevt = NULL; 283 284 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 285 if (!list_empty(&mrioc->fwevt_list)) { 286 fwevt = list_first_entry(&mrioc->fwevt_list, 287 struct mpi3mr_fwevt, list); 288 list_del_init(&fwevt->list); 289 /* 290 * Put fwevt reference count after 291 * removing it from fwevt_list 292 */ 293 mpi3mr_fwevt_put(fwevt); 294 } 295 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 296 297 return fwevt; 298 } 299 300 /** 301 * mpi3mr_cancel_work - cancel firmware event 302 * @fwevt: fwevt object which needs to be canceled 303 * 304 * Return: Nothing. 305 */ 306 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 307 { 308 /* 309 * Wait on the fwevt to complete. If this returns 1, then 310 * the event was never executed. 311 * 312 * If it did execute, we wait for it to finish, and the put will 313 * happen from mpi3mr_process_fwevt() 314 */ 315 if (cancel_work_sync(&fwevt->work)) { 316 /* 317 * Put fwevt reference count after 318 * dequeuing it from worker queue 319 */ 320 mpi3mr_fwevt_put(fwevt); 321 /* 322 * Put fwevt reference count to neutralize 323 * kref_init increment 324 */ 325 mpi3mr_fwevt_put(fwevt); 326 } 327 } 328 329 /** 330 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 331 * @mrioc: Adapter instance reference 332 * 333 * Flush all pending firmware events from the firmware event 334 * list. 335 * 336 * Return: Nothing. 337 */ 338 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 339 { 340 struct mpi3mr_fwevt *fwevt = NULL; 341 342 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 343 !mrioc->fwevt_worker_thread) 344 return; 345 346 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 347 mpi3mr_cancel_work(fwevt); 348 349 if (mrioc->current_event) { 350 fwevt = mrioc->current_event; 351 /* 352 * Don't call cancel_work_sync() API for the 353 * fwevt work if the controller reset is 354 * get called as part of processing the 355 * same fwevt work (or) when worker thread is 356 * waiting for device add/remove APIs to complete. 357 * Otherwise we will see deadlock. 358 */ 359 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 360 fwevt->discard = 1; 361 return; 362 } 363 364 mpi3mr_cancel_work(fwevt); 365 } 366 } 367 368 /** 369 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 370 * @mrioc: Adapter instance reference 371 * @tg: Throttle group information pointer 372 * 373 * Accessor to queue on synthetically generated driver event to 374 * the event worker thread, the driver event will be used to 375 * reduce the QD of all VDs in the TG from the worker thread. 376 * 377 * Return: None. 378 */ 379 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 380 struct mpi3mr_throttle_group_info *tg) 381 { 382 struct mpi3mr_fwevt *fwevt; 383 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 384 385 /* 386 * If the QD reduction event is already queued due to throttle and if 387 * the QD is not restored through device info change event 388 * then dont queue further reduction events 389 */ 390 if (tg->fw_qd != tg->modified_qd) 391 return; 392 393 fwevt = mpi3mr_alloc_fwevt(sz); 394 if (!fwevt) { 395 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 396 return; 397 } 398 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 399 fwevt->mrioc = mrioc; 400 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 401 fwevt->send_ack = 0; 402 fwevt->process_evt = 1; 403 fwevt->evt_ctx = 0; 404 fwevt->event_data_size = sz; 405 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 406 407 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 408 tg->id); 409 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 410 } 411 412 /** 413 * mpi3mr_invalidate_devhandles -Invalidate device handles 414 * @mrioc: Adapter instance reference 415 * 416 * Invalidate the device handles in the target device structures 417 * . Called post reset prior to reinitializing the controller. 418 * 419 * Return: Nothing. 420 */ 421 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 422 { 423 struct mpi3mr_tgt_dev *tgtdev; 424 struct mpi3mr_stgt_priv_data *tgt_priv; 425 426 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 427 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 428 if (tgtdev->starget && tgtdev->starget->hostdata) { 429 tgt_priv = tgtdev->starget->hostdata; 430 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 431 tgt_priv->io_throttle_enabled = 0; 432 tgt_priv->io_divert = 0; 433 tgt_priv->throttle_group = NULL; 434 tgt_priv->wslen = 0; 435 if (tgtdev->host_exposed) 436 atomic_set(&tgt_priv->block_io, 1); 437 } 438 } 439 } 440 441 /** 442 * mpi3mr_print_scmd - print individual SCSI command 443 * @rq: Block request 444 * @data: Adapter instance reference 445 * 446 * Print the SCSI command details if it is in LLD scope. 447 * 448 * Return: true always. 449 */ 450 static bool mpi3mr_print_scmd(struct request *rq, void *data) 451 { 452 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 453 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 454 struct scmd_priv *priv = NULL; 455 456 if (scmd) { 457 priv = scsi_cmd_priv(scmd); 458 if (!priv->in_lld_scope) 459 goto out; 460 461 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 462 __func__, priv->host_tag, priv->req_q_idx + 1); 463 scsi_print_command(scmd); 464 } 465 466 out: 467 return(true); 468 } 469 470 /** 471 * mpi3mr_flush_scmd - Flush individual SCSI command 472 * @rq: Block request 473 * @data: Adapter instance reference 474 * 475 * Return the SCSI command to the upper layers if it is in LLD 476 * scope. 477 * 478 * Return: true always. 479 */ 480 481 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 482 { 483 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 484 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 485 struct scmd_priv *priv = NULL; 486 487 if (scmd) { 488 priv = scsi_cmd_priv(scmd); 489 if (!priv->in_lld_scope) 490 goto out; 491 492 if (priv->meta_sg_valid) 493 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 494 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 495 mpi3mr_clear_scmd_priv(mrioc, scmd); 496 scsi_dma_unmap(scmd); 497 scmd->result = DID_RESET << 16; 498 scsi_print_command(scmd); 499 scsi_done(scmd); 500 mrioc->flush_io_count++; 501 } 502 503 out: 504 return(true); 505 } 506 507 /** 508 * mpi3mr_count_dev_pending - Count commands pending for a lun 509 * @rq: Block request 510 * @data: SCSI device reference 511 * 512 * This is an iterator function called for each SCSI command in 513 * a host and if the command is pending in the LLD for the 514 * specific device(lun) then device specific pending I/O counter 515 * is updated in the device structure. 516 * 517 * Return: true always. 518 */ 519 520 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 521 { 522 struct scsi_device *sdev = (struct scsi_device *)data; 523 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 524 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 525 struct scmd_priv *priv; 526 527 if (scmd) { 528 priv = scsi_cmd_priv(scmd); 529 if (!priv->in_lld_scope) 530 goto out; 531 if (scmd->device == sdev) 532 sdev_priv_data->pend_count++; 533 } 534 535 out: 536 return true; 537 } 538 539 /** 540 * mpi3mr_count_tgt_pending - Count commands pending for target 541 * @rq: Block request 542 * @data: SCSI target reference 543 * 544 * This is an iterator function called for each SCSI command in 545 * a host and if the command is pending in the LLD for the 546 * specific target then target specific pending I/O counter is 547 * updated in the target structure. 548 * 549 * Return: true always. 550 */ 551 552 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 553 { 554 struct scsi_target *starget = (struct scsi_target *)data; 555 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 556 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 557 struct scmd_priv *priv; 558 559 if (scmd) { 560 priv = scsi_cmd_priv(scmd); 561 if (!priv->in_lld_scope) 562 goto out; 563 if (scmd->device && (scsi_target(scmd->device) == starget)) 564 stgt_priv_data->pend_count++; 565 } 566 567 out: 568 return true; 569 } 570 571 /** 572 * mpi3mr_flush_host_io - Flush host I/Os 573 * @mrioc: Adapter instance reference 574 * 575 * Flush all of the pending I/Os by calling 576 * blk_mq_tagset_busy_iter() for each possible tag. This is 577 * executed post controller reset 578 * 579 * Return: Nothing. 580 */ 581 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 582 { 583 struct Scsi_Host *shost = mrioc->shost; 584 585 mrioc->flush_io_count = 0; 586 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 587 blk_mq_tagset_busy_iter(&shost->tag_set, 588 mpi3mr_flush_scmd, (void *)mrioc); 589 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 590 mrioc->flush_io_count); 591 } 592 593 /** 594 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 595 * @mrioc: Adapter instance reference 596 * 597 * This function waits for currently running IO poll threads to 598 * exit and then flushes all host I/Os and any internal pending 599 * cmds. This is executed after controller is marked as 600 * unrecoverable. 601 * 602 * Return: Nothing. 603 */ 604 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 605 { 606 struct Scsi_Host *shost = mrioc->shost; 607 int i; 608 609 if (!mrioc->unrecoverable) 610 return; 611 612 if (mrioc->op_reply_qinfo) { 613 for (i = 0; i < mrioc->num_queues; i++) { 614 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 615 udelay(500); 616 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 617 } 618 } 619 mrioc->flush_io_count = 0; 620 blk_mq_tagset_busy_iter(&shost->tag_set, 621 mpi3mr_flush_scmd, (void *)mrioc); 622 mpi3mr_flush_delayed_cmd_lists(mrioc); 623 mpi3mr_flush_drv_cmds(mrioc); 624 } 625 626 /** 627 * mpi3mr_alloc_tgtdev - target device allocator 628 * 629 * Allocate target device instance and initialize the reference 630 * count 631 * 632 * Return: target device instance. 633 */ 634 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 635 { 636 struct mpi3mr_tgt_dev *tgtdev; 637 638 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 639 if (!tgtdev) 640 return NULL; 641 kref_init(&tgtdev->ref_count); 642 return tgtdev; 643 } 644 645 /** 646 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 647 * @mrioc: Adapter instance reference 648 * @tgtdev: Target device 649 * 650 * Add the target device to the target device list 651 * 652 * Return: Nothing. 653 */ 654 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 655 struct mpi3mr_tgt_dev *tgtdev) 656 { 657 unsigned long flags; 658 659 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 660 mpi3mr_tgtdev_get(tgtdev); 661 INIT_LIST_HEAD(&tgtdev->list); 662 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 663 tgtdev->state = MPI3MR_DEV_CREATED; 664 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 665 } 666 667 /** 668 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 669 * @mrioc: Adapter instance reference 670 * @tgtdev: Target device 671 * @must_delete: Must delete the target device from the list irrespective 672 * of the device state. 673 * 674 * Remove the target device from the target device list 675 * 676 * Return: Nothing. 677 */ 678 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 679 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 680 { 681 unsigned long flags; 682 683 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 684 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 685 if (!list_empty(&tgtdev->list)) { 686 list_del_init(&tgtdev->list); 687 tgtdev->state = MPI3MR_DEV_DELETED; 688 mpi3mr_tgtdev_put(tgtdev); 689 } 690 } 691 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 692 } 693 694 /** 695 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 696 * @mrioc: Adapter instance reference 697 * @handle: Device handle 698 * 699 * Accessor to retrieve target device from the device handle. 700 * Non Lock version 701 * 702 * Return: Target device reference. 703 */ 704 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 705 struct mpi3mr_ioc *mrioc, u16 handle) 706 { 707 struct mpi3mr_tgt_dev *tgtdev; 708 709 assert_spin_locked(&mrioc->tgtdev_lock); 710 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 711 if (tgtdev->dev_handle == handle) 712 goto found_tgtdev; 713 return NULL; 714 715 found_tgtdev: 716 mpi3mr_tgtdev_get(tgtdev); 717 return tgtdev; 718 } 719 720 /** 721 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 722 * @mrioc: Adapter instance reference 723 * @handle: Device handle 724 * 725 * Accessor to retrieve target device from the device handle. 726 * Lock version 727 * 728 * Return: Target device reference. 729 */ 730 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 731 struct mpi3mr_ioc *mrioc, u16 handle) 732 { 733 struct mpi3mr_tgt_dev *tgtdev; 734 unsigned long flags; 735 736 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 737 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 738 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 739 return tgtdev; 740 } 741 742 /** 743 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 744 * @mrioc: Adapter instance reference 745 * @persist_id: Persistent ID 746 * 747 * Accessor to retrieve target device from the Persistent ID. 748 * Non Lock version 749 * 750 * Return: Target device reference. 751 */ 752 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 753 struct mpi3mr_ioc *mrioc, u16 persist_id) 754 { 755 struct mpi3mr_tgt_dev *tgtdev; 756 757 assert_spin_locked(&mrioc->tgtdev_lock); 758 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 759 if (tgtdev->perst_id == persist_id) 760 goto found_tgtdev; 761 return NULL; 762 763 found_tgtdev: 764 mpi3mr_tgtdev_get(tgtdev); 765 return tgtdev; 766 } 767 768 /** 769 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 770 * @mrioc: Adapter instance reference 771 * @persist_id: Persistent ID 772 * 773 * Accessor to retrieve target device from the Persistent ID. 774 * Lock version 775 * 776 * Return: Target device reference. 777 */ 778 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 779 struct mpi3mr_ioc *mrioc, u16 persist_id) 780 { 781 struct mpi3mr_tgt_dev *tgtdev; 782 unsigned long flags; 783 784 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 785 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 786 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 787 return tgtdev; 788 } 789 790 /** 791 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 792 * @mrioc: Adapter instance reference 793 * @tgt_priv: Target private data 794 * 795 * Accessor to return target device from the target private 796 * data. Non Lock version 797 * 798 * Return: Target device reference. 799 */ 800 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 801 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 802 { 803 struct mpi3mr_tgt_dev *tgtdev; 804 805 assert_spin_locked(&mrioc->tgtdev_lock); 806 tgtdev = tgt_priv->tgt_dev; 807 if (tgtdev) 808 mpi3mr_tgtdev_get(tgtdev); 809 return tgtdev; 810 } 811 812 /** 813 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 814 * @mrioc: Adapter instance reference 815 * @tg: Throttle group information pointer 816 * @divert_value: 1 or 0 817 * 818 * Accessor to set io_divert flag for each device associated 819 * with the given throttle group with the given value. 820 * 821 * Return: None. 822 */ 823 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 824 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 825 { 826 unsigned long flags; 827 struct mpi3mr_tgt_dev *tgtdev; 828 struct mpi3mr_stgt_priv_data *tgt_priv; 829 830 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 831 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 832 if (tgtdev->starget && tgtdev->starget->hostdata) { 833 tgt_priv = tgtdev->starget->hostdata; 834 if (tgt_priv->throttle_group == tg) 835 tgt_priv->io_divert = divert_value; 836 } 837 } 838 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 839 } 840 841 /** 842 * mpi3mr_print_device_event_notice - print notice related to post processing of 843 * device event after controller reset. 844 * 845 * @mrioc: Adapter instance reference 846 * @device_add: true for device add event and false for device removal event 847 * 848 * Return: None. 849 */ 850 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 851 bool device_add) 852 { 853 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 854 (device_add ? "addition" : "removal")); 855 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 856 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 857 } 858 859 /** 860 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 861 * @mrioc: Adapter instance reference 862 * @tgtdev: Target device structure 863 * 864 * Checks whether the device is exposed to upper layers and if it 865 * is then remove the device from upper layers by calling 866 * scsi_remove_target(). 867 * 868 * Return: 0 on success, non zero on failure. 869 */ 870 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 871 struct mpi3mr_tgt_dev *tgtdev) 872 { 873 struct mpi3mr_stgt_priv_data *tgt_priv; 874 875 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 876 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 877 if (tgtdev->starget && tgtdev->starget->hostdata) { 878 tgt_priv = tgtdev->starget->hostdata; 879 atomic_set(&tgt_priv->block_io, 0); 880 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 881 } 882 883 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 884 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 885 if (tgtdev->starget) { 886 if (mrioc->current_event) 887 mrioc->current_event->pending_at_sml = 1; 888 scsi_remove_target(&tgtdev->starget->dev); 889 tgtdev->host_exposed = 0; 890 if (mrioc->current_event) { 891 mrioc->current_event->pending_at_sml = 0; 892 if (mrioc->current_event->discard) { 893 mpi3mr_print_device_event_notice(mrioc, 894 false); 895 return; 896 } 897 } 898 } 899 } else 900 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 901 902 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 903 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 904 } 905 906 /** 907 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 908 * @mrioc: Adapter instance reference 909 * @perst_id: Persistent ID of the device 910 * 911 * Checks whether the device can be exposed to upper layers and 912 * if it is not then expose the device to upper layers by 913 * calling scsi_scan_target(). 914 * 915 * Return: 0 on success, non zero on failure. 916 */ 917 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 918 u16 perst_id) 919 { 920 int retval = 0; 921 struct mpi3mr_tgt_dev *tgtdev; 922 923 if (mrioc->reset_in_progress) 924 return -1; 925 926 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 927 if (!tgtdev) { 928 retval = -1; 929 goto out; 930 } 931 if (tgtdev->is_hidden || tgtdev->host_exposed) { 932 retval = -1; 933 goto out; 934 } 935 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 936 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 937 tgtdev->host_exposed = 1; 938 if (mrioc->current_event) 939 mrioc->current_event->pending_at_sml = 1; 940 scsi_scan_target(&mrioc->shost->shost_gendev, 941 mrioc->scsi_device_channel, tgtdev->perst_id, 942 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 943 if (!tgtdev->starget) 944 tgtdev->host_exposed = 0; 945 if (mrioc->current_event) { 946 mrioc->current_event->pending_at_sml = 0; 947 if (mrioc->current_event->discard) { 948 mpi3mr_print_device_event_notice(mrioc, true); 949 goto out; 950 } 951 } 952 } else 953 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 954 out: 955 if (tgtdev) 956 mpi3mr_tgtdev_put(tgtdev); 957 958 return retval; 959 } 960 961 /** 962 * mpi3mr_change_queue_depth- Change QD callback handler 963 * @sdev: SCSI device reference 964 * @q_depth: Queue depth 965 * 966 * Validate and limit QD and call scsi_change_queue_depth. 967 * 968 * Return: return value of scsi_change_queue_depth 969 */ 970 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 971 int q_depth) 972 { 973 struct scsi_target *starget = scsi_target(sdev); 974 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 975 int retval = 0; 976 977 if (!sdev->tagged_supported) 978 q_depth = 1; 979 if (q_depth > shost->can_queue) 980 q_depth = shost->can_queue; 981 else if (!q_depth) 982 q_depth = MPI3MR_DEFAULT_SDEV_QD; 983 retval = scsi_change_queue_depth(sdev, q_depth); 984 sdev->max_queue_depth = sdev->queue_depth; 985 986 return retval; 987 } 988 989 /** 990 * mpi3mr_update_sdev - Update SCSI device information 991 * @sdev: SCSI device reference 992 * @data: target device reference 993 * 994 * This is an iterator function called for each SCSI device in a 995 * target to update the target specific information into each 996 * SCSI device. 997 * 998 * Return: Nothing. 999 */ 1000 static void 1001 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1002 { 1003 struct mpi3mr_tgt_dev *tgtdev; 1004 1005 tgtdev = (struct mpi3mr_tgt_dev *)data; 1006 if (!tgtdev) 1007 return; 1008 1009 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1010 switch (tgtdev->dev_type) { 1011 case MPI3_DEVICE_DEVFORM_PCIE: 1012 /*The block layer hw sector size = 512*/ 1013 if ((tgtdev->dev_spec.pcie_inf.dev_info & 1014 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1015 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 1016 blk_queue_max_hw_sectors(sdev->request_queue, 1017 tgtdev->dev_spec.pcie_inf.mdts / 512); 1018 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 1019 blk_queue_virt_boundary(sdev->request_queue, 1020 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 1021 else 1022 blk_queue_virt_boundary(sdev->request_queue, 1023 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 1024 } 1025 break; 1026 default: 1027 break; 1028 } 1029 } 1030 1031 /** 1032 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 1033 * @mrioc: Adapter instance reference 1034 * 1035 * This is executed post controller reset to identify any 1036 * missing devices during reset and remove from the upper layers 1037 * or expose any newly detected device to the upper layers. 1038 * 1039 * Return: Nothing. 1040 */ 1041 1042 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1043 { 1044 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1045 struct mpi3mr_stgt_priv_data *tgt_priv; 1046 1047 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1048 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1049 list) { 1050 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && 1051 tgtdev->is_hidden && 1052 tgtdev->host_exposed && tgtdev->starget && 1053 tgtdev->starget->hostdata) { 1054 tgt_priv = tgtdev->starget->hostdata; 1055 tgt_priv->dev_removed = 1; 1056 atomic_set(&tgt_priv->block_io, 0); 1057 } 1058 } 1059 1060 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1061 list) { 1062 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1063 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1064 tgtdev->perst_id); 1065 if (tgtdev->host_exposed) 1066 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1067 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1068 mpi3mr_tgtdev_put(tgtdev); 1069 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1070 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1071 tgtdev->perst_id); 1072 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1073 } 1074 } 1075 1076 tgtdev = NULL; 1077 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1078 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1079 !tgtdev->is_hidden) { 1080 if (!tgtdev->host_exposed) 1081 mpi3mr_report_tgtdev_to_host(mrioc, 1082 tgtdev->perst_id); 1083 else if (tgtdev->starget) 1084 starget_for_each_device(tgtdev->starget, 1085 (void *)tgtdev, mpi3mr_update_sdev); 1086 } 1087 } 1088 } 1089 1090 /** 1091 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1092 * @mrioc: Adapter instance reference 1093 * @tgtdev: Target device internal structure 1094 * @dev_pg0: New device page0 1095 * @is_added: Flag to indicate the device is just added 1096 * 1097 * Update the information from the device page0 into the driver 1098 * cached target device structure. 1099 * 1100 * Return: Nothing. 1101 */ 1102 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1103 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1104 bool is_added) 1105 { 1106 u16 flags = 0; 1107 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1108 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1109 u8 prot_mask = 0; 1110 1111 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1112 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1113 tgtdev->dev_type = dev_pg0->device_form; 1114 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1115 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1116 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1117 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1118 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1119 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1120 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1121 1122 if (tgtdev->encl_handle) 1123 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1124 tgtdev->encl_handle); 1125 if (enclosure_dev) 1126 tgtdev->enclosure_logical_id = le64_to_cpu( 1127 enclosure_dev->pg0.enclosure_logical_id); 1128 1129 flags = tgtdev->devpg0_flag; 1130 1131 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1132 1133 if (is_added == true) 1134 tgtdev->io_throttle_enabled = 1135 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1136 1137 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1138 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1139 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1140 break; 1141 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1142 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1143 break; 1144 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1145 default: 1146 tgtdev->wslen = 0; 1147 break; 1148 } 1149 1150 if (tgtdev->starget && tgtdev->starget->hostdata) { 1151 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1152 tgtdev->starget->hostdata; 1153 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1154 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1155 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1156 scsi_tgt_priv_data->io_throttle_enabled = 1157 tgtdev->io_throttle_enabled; 1158 if (is_added == true) 1159 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1160 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1161 } 1162 1163 switch (dev_pg0->access_status) { 1164 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1165 case MPI3_DEVICE0_ASTATUS_PREPARE: 1166 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1167 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1168 break; 1169 default: 1170 tgtdev->is_hidden = 1; 1171 break; 1172 } 1173 1174 switch (tgtdev->dev_type) { 1175 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1176 { 1177 struct mpi3_device0_sas_sata_format *sasinf = 1178 &dev_pg0->device_specific.sas_sata_format; 1179 u16 dev_info = le16_to_cpu(sasinf->device_info); 1180 1181 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1182 tgtdev->dev_spec.sas_sata_inf.sas_address = 1183 le64_to_cpu(sasinf->sas_address); 1184 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1185 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1186 sasinf->attached_phy_identifier; 1187 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1188 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1189 tgtdev->is_hidden = 1; 1190 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1191 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1192 tgtdev->is_hidden = 1; 1193 1194 if (((tgtdev->devpg0_flag & 1195 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1196 && (tgtdev->devpg0_flag & 1197 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1198 (tgtdev->parent_handle == 0xFFFF)) 1199 tgtdev->non_stl = 1; 1200 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1201 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1202 dev_pg0->io_unit_port; 1203 break; 1204 } 1205 case MPI3_DEVICE_DEVFORM_PCIE: 1206 { 1207 struct mpi3_device0_pcie_format *pcieinf = 1208 &dev_pg0->device_specific.pcie_format; 1209 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1210 1211 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1212 tgtdev->dev_spec.pcie_inf.capb = 1213 le32_to_cpu(pcieinf->capabilities); 1214 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1215 /* 2^12 = 4096 */ 1216 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1217 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1218 tgtdev->dev_spec.pcie_inf.mdts = 1219 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1220 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1221 tgtdev->dev_spec.pcie_inf.reset_to = 1222 max_t(u8, pcieinf->controller_reset_to, 1223 MPI3MR_INTADMCMD_TIMEOUT); 1224 tgtdev->dev_spec.pcie_inf.abort_to = 1225 max_t(u8, pcieinf->nvme_abort_to, 1226 MPI3MR_INTADMCMD_TIMEOUT); 1227 } 1228 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1229 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1230 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1231 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1232 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1233 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1234 tgtdev->is_hidden = 1; 1235 tgtdev->non_stl = 1; 1236 if (!mrioc->shost) 1237 break; 1238 prot_mask = scsi_host_get_prot(mrioc->shost); 1239 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1240 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1241 ioc_info(mrioc, 1242 "%s : Disabling DIX0 prot capability\n", __func__); 1243 ioc_info(mrioc, 1244 "because HBA does not support DIX0 operation on NVME drives\n"); 1245 } 1246 break; 1247 } 1248 case MPI3_DEVICE_DEVFORM_VD: 1249 { 1250 struct mpi3_device0_vd_format *vdinf = 1251 &dev_pg0->device_specific.vd_format; 1252 struct mpi3mr_throttle_group_info *tg = NULL; 1253 u16 vdinf_io_throttle_group = 1254 le16_to_cpu(vdinf->io_throttle_group); 1255 1256 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1257 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1258 tgtdev->is_hidden = 1; 1259 tgtdev->non_stl = 1; 1260 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1261 tgtdev->dev_spec.vd_inf.tg_high = 1262 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1263 tgtdev->dev_spec.vd_inf.tg_low = 1264 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1265 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1266 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1267 tg->id = vdinf_io_throttle_group; 1268 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1269 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1270 tg->qd_reduction = 1271 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1272 if (is_added == true) 1273 tg->fw_qd = tgtdev->q_depth; 1274 tg->modified_qd = tgtdev->q_depth; 1275 } 1276 tgtdev->dev_spec.vd_inf.tg = tg; 1277 if (scsi_tgt_priv_data) 1278 scsi_tgt_priv_data->throttle_group = tg; 1279 break; 1280 } 1281 default: 1282 break; 1283 } 1284 } 1285 1286 /** 1287 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1288 * @mrioc: Adapter instance reference 1289 * @fwevt: Firmware event information. 1290 * 1291 * Process Device status Change event and based on device's new 1292 * information, either expose the device to the upper layers, or 1293 * remove the device from upper layers. 1294 * 1295 * Return: Nothing. 1296 */ 1297 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1298 struct mpi3mr_fwevt *fwevt) 1299 { 1300 u16 dev_handle = 0; 1301 u8 uhide = 0, delete = 0, cleanup = 0; 1302 struct mpi3mr_tgt_dev *tgtdev = NULL; 1303 struct mpi3_event_data_device_status_change *evtdata = 1304 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1305 1306 dev_handle = le16_to_cpu(evtdata->dev_handle); 1307 ioc_info(mrioc, 1308 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1309 __func__, dev_handle, evtdata->reason_code); 1310 switch (evtdata->reason_code) { 1311 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1312 delete = 1; 1313 break; 1314 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1315 uhide = 1; 1316 break; 1317 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1318 delete = 1; 1319 cleanup = 1; 1320 break; 1321 default: 1322 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1323 evtdata->reason_code); 1324 break; 1325 } 1326 1327 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1328 if (!tgtdev) 1329 goto out; 1330 if (uhide) { 1331 tgtdev->is_hidden = 0; 1332 if (!tgtdev->host_exposed) 1333 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1334 } 1335 1336 if (delete) 1337 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1338 1339 if (cleanup) { 1340 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1341 mpi3mr_tgtdev_put(tgtdev); 1342 } 1343 1344 out: 1345 if (tgtdev) 1346 mpi3mr_tgtdev_put(tgtdev); 1347 } 1348 1349 /** 1350 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1351 * @mrioc: Adapter instance reference 1352 * @dev_pg0: New device page0 1353 * 1354 * Process Device Info Change event and based on device's new 1355 * information, either expose the device to the upper layers, or 1356 * remove the device from upper layers or update the details of 1357 * the device. 1358 * 1359 * Return: Nothing. 1360 */ 1361 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1362 struct mpi3_device_page0 *dev_pg0) 1363 { 1364 struct mpi3mr_tgt_dev *tgtdev = NULL; 1365 u16 dev_handle = 0, perst_id = 0; 1366 1367 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1368 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1369 ioc_info(mrioc, 1370 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1371 __func__, dev_handle, perst_id); 1372 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1373 if (!tgtdev) 1374 goto out; 1375 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1376 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1377 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1378 if (tgtdev->is_hidden && tgtdev->host_exposed) 1379 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1380 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1381 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1382 mpi3mr_update_sdev); 1383 out: 1384 if (tgtdev) 1385 mpi3mr_tgtdev_put(tgtdev); 1386 } 1387 1388 /** 1389 * mpi3mr_free_enclosure_list - release enclosures 1390 * @mrioc: Adapter instance reference 1391 * 1392 * Free memory allocated during encloure add. 1393 * 1394 * Return nothing. 1395 */ 1396 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1397 { 1398 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1399 1400 list_for_each_entry_safe(enclosure_dev, 1401 enclosure_dev_next, &mrioc->enclosure_list, list) { 1402 list_del(&enclosure_dev->list); 1403 kfree(enclosure_dev); 1404 } 1405 } 1406 1407 /** 1408 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1409 * @mrioc: Adapter instance reference 1410 * @handle: Firmware device handle of the enclosure 1411 * 1412 * This searches for enclosure device based on handle, then returns the 1413 * enclosure object. 1414 * 1415 * Return: Enclosure object reference or NULL 1416 */ 1417 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1418 struct mpi3mr_ioc *mrioc, u16 handle) 1419 { 1420 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1421 1422 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1423 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1424 continue; 1425 r = enclosure_dev; 1426 goto out; 1427 } 1428 out: 1429 return r; 1430 } 1431 1432 /** 1433 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1434 * @mrioc: Adapter instance reference 1435 * @encl_pg0: Enclosure page 0. 1436 * @is_added: Added event or not 1437 * 1438 * Return nothing. 1439 */ 1440 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1441 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1442 { 1443 char *reason_str = NULL; 1444 1445 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1446 return; 1447 1448 if (is_added) 1449 reason_str = "enclosure added"; 1450 else 1451 reason_str = "enclosure dev status changed"; 1452 1453 ioc_info(mrioc, 1454 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1455 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1456 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1457 ioc_info(mrioc, 1458 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1459 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1460 le16_to_cpu(encl_pg0->flags), 1461 ((le16_to_cpu(encl_pg0->flags) & 1462 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1463 } 1464 1465 /** 1466 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1467 * @mrioc: Adapter instance reference 1468 * @fwevt: Firmware event reference 1469 * 1470 * Prints information about the Enclosure device status or 1471 * Enclosure add events if logging is enabled and add or remove 1472 * the enclosure from the controller's internal list of 1473 * enclosures. 1474 * 1475 * Return: Nothing. 1476 */ 1477 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1478 struct mpi3mr_fwevt *fwevt) 1479 { 1480 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1481 struct mpi3_enclosure_page0 *encl_pg0; 1482 u16 encl_handle; 1483 u8 added, present; 1484 1485 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1486 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1487 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1488 1489 1490 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1491 present = ((le16_to_cpu(encl_pg0->flags) & 1492 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1493 1494 if (encl_handle) 1495 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1496 encl_handle); 1497 if (!enclosure_dev && present) { 1498 enclosure_dev = 1499 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1500 GFP_KERNEL); 1501 if (!enclosure_dev) 1502 return; 1503 list_add_tail(&enclosure_dev->list, 1504 &mrioc->enclosure_list); 1505 } 1506 if (enclosure_dev) { 1507 if (!present) { 1508 list_del(&enclosure_dev->list); 1509 kfree(enclosure_dev); 1510 } else 1511 memcpy(&enclosure_dev->pg0, encl_pg0, 1512 sizeof(enclosure_dev->pg0)); 1513 1514 } 1515 } 1516 1517 /** 1518 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1519 * @mrioc: Adapter instance reference 1520 * @event_data: SAS topology change list event data 1521 * 1522 * Prints information about the SAS topology change event. 1523 * 1524 * Return: Nothing. 1525 */ 1526 static void 1527 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1528 struct mpi3_event_data_sas_topology_change_list *event_data) 1529 { 1530 int i; 1531 u16 handle; 1532 u8 reason_code, phy_number; 1533 char *status_str = NULL; 1534 u8 link_rate, prev_link_rate; 1535 1536 switch (event_data->exp_status) { 1537 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1538 status_str = "remove"; 1539 break; 1540 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1541 status_str = "responding"; 1542 break; 1543 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1544 status_str = "remove delay"; 1545 break; 1546 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1547 status_str = "direct attached"; 1548 break; 1549 default: 1550 status_str = "unknown status"; 1551 break; 1552 } 1553 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1554 __func__, status_str); 1555 ioc_info(mrioc, 1556 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1557 __func__, le16_to_cpu(event_data->expander_dev_handle), 1558 event_data->io_unit_port, 1559 le16_to_cpu(event_data->enclosure_handle), 1560 event_data->start_phy_num, event_data->num_entries); 1561 for (i = 0; i < event_data->num_entries; i++) { 1562 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1563 if (!handle) 1564 continue; 1565 phy_number = event_data->start_phy_num + i; 1566 reason_code = event_data->phy_entry[i].status & 1567 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1568 switch (reason_code) { 1569 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1570 status_str = "target remove"; 1571 break; 1572 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1573 status_str = "delay target remove"; 1574 break; 1575 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1576 status_str = "link status change"; 1577 break; 1578 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1579 status_str = "link status no change"; 1580 break; 1581 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1582 status_str = "target responding"; 1583 break; 1584 default: 1585 status_str = "unknown"; 1586 break; 1587 } 1588 link_rate = event_data->phy_entry[i].link_rate >> 4; 1589 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1590 ioc_info(mrioc, 1591 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1592 __func__, phy_number, handle, status_str, link_rate, 1593 prev_link_rate); 1594 } 1595 } 1596 1597 /** 1598 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1599 * @mrioc: Adapter instance reference 1600 * @fwevt: Firmware event reference 1601 * 1602 * Prints information about the SAS topology change event and 1603 * for "not responding" event code, removes the device from the 1604 * upper layers. 1605 * 1606 * Return: Nothing. 1607 */ 1608 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1609 struct mpi3mr_fwevt *fwevt) 1610 { 1611 struct mpi3_event_data_sas_topology_change_list *event_data = 1612 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1613 int i; 1614 u16 handle; 1615 u8 reason_code; 1616 u64 exp_sas_address = 0, parent_sas_address = 0; 1617 struct mpi3mr_hba_port *hba_port = NULL; 1618 struct mpi3mr_tgt_dev *tgtdev = NULL; 1619 struct mpi3mr_sas_node *sas_expander = NULL; 1620 unsigned long flags; 1621 u8 link_rate, prev_link_rate, parent_phy_number; 1622 1623 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1624 if (mrioc->sas_transport_enabled) { 1625 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1626 event_data->io_unit_port); 1627 if (le16_to_cpu(event_data->expander_dev_handle)) { 1628 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1629 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1630 le16_to_cpu(event_data->expander_dev_handle)); 1631 if (sas_expander) { 1632 exp_sas_address = sas_expander->sas_address; 1633 hba_port = sas_expander->hba_port; 1634 } 1635 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1636 parent_sas_address = exp_sas_address; 1637 } else 1638 parent_sas_address = mrioc->sas_hba.sas_address; 1639 } 1640 1641 for (i = 0; i < event_data->num_entries; i++) { 1642 if (fwevt->discard) 1643 return; 1644 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1645 if (!handle) 1646 continue; 1647 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1648 if (!tgtdev) 1649 continue; 1650 1651 reason_code = event_data->phy_entry[i].status & 1652 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1653 1654 switch (reason_code) { 1655 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1656 if (tgtdev->host_exposed) 1657 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1658 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1659 mpi3mr_tgtdev_put(tgtdev); 1660 break; 1661 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1662 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1663 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1664 { 1665 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1666 || tgtdev->is_hidden) 1667 break; 1668 link_rate = event_data->phy_entry[i].link_rate >> 4; 1669 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1670 if (link_rate == prev_link_rate) 1671 break; 1672 if (!parent_sas_address) 1673 break; 1674 parent_phy_number = event_data->start_phy_num + i; 1675 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1676 parent_phy_number, link_rate, hba_port); 1677 break; 1678 } 1679 default: 1680 break; 1681 } 1682 if (tgtdev) 1683 mpi3mr_tgtdev_put(tgtdev); 1684 } 1685 1686 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1687 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1688 if (sas_expander) 1689 mpi3mr_expander_remove(mrioc, exp_sas_address, 1690 hba_port); 1691 } 1692 } 1693 1694 /** 1695 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1696 * @mrioc: Adapter instance reference 1697 * @event_data: PCIe topology change list event data 1698 * 1699 * Prints information about the PCIe topology change event. 1700 * 1701 * Return: Nothing. 1702 */ 1703 static void 1704 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1705 struct mpi3_event_data_pcie_topology_change_list *event_data) 1706 { 1707 int i; 1708 u16 handle; 1709 u16 reason_code; 1710 u8 port_number; 1711 char *status_str = NULL; 1712 u8 link_rate, prev_link_rate; 1713 1714 switch (event_data->switch_status) { 1715 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1716 status_str = "remove"; 1717 break; 1718 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1719 status_str = "responding"; 1720 break; 1721 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1722 status_str = "remove delay"; 1723 break; 1724 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1725 status_str = "direct attached"; 1726 break; 1727 default: 1728 status_str = "unknown status"; 1729 break; 1730 } 1731 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1732 __func__, status_str); 1733 ioc_info(mrioc, 1734 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1735 __func__, le16_to_cpu(event_data->switch_dev_handle), 1736 le16_to_cpu(event_data->enclosure_handle), 1737 event_data->start_port_num, event_data->num_entries); 1738 for (i = 0; i < event_data->num_entries; i++) { 1739 handle = 1740 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1741 if (!handle) 1742 continue; 1743 port_number = event_data->start_port_num + i; 1744 reason_code = event_data->port_entry[i].port_status; 1745 switch (reason_code) { 1746 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1747 status_str = "target remove"; 1748 break; 1749 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1750 status_str = "delay target remove"; 1751 break; 1752 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1753 status_str = "link status change"; 1754 break; 1755 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1756 status_str = "link status no change"; 1757 break; 1758 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1759 status_str = "target responding"; 1760 break; 1761 default: 1762 status_str = "unknown"; 1763 break; 1764 } 1765 link_rate = event_data->port_entry[i].current_port_info & 1766 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1767 prev_link_rate = event_data->port_entry[i].previous_port_info & 1768 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1769 ioc_info(mrioc, 1770 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1771 __func__, port_number, handle, status_str, link_rate, 1772 prev_link_rate); 1773 } 1774 } 1775 1776 /** 1777 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1778 * @mrioc: Adapter instance reference 1779 * @fwevt: Firmware event reference 1780 * 1781 * Prints information about the PCIe topology change event and 1782 * for "not responding" event code, removes the device from the 1783 * upper layers. 1784 * 1785 * Return: Nothing. 1786 */ 1787 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1788 struct mpi3mr_fwevt *fwevt) 1789 { 1790 struct mpi3_event_data_pcie_topology_change_list *event_data = 1791 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1792 int i; 1793 u16 handle; 1794 u8 reason_code; 1795 struct mpi3mr_tgt_dev *tgtdev = NULL; 1796 1797 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1798 1799 for (i = 0; i < event_data->num_entries; i++) { 1800 if (fwevt->discard) 1801 return; 1802 handle = 1803 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1804 if (!handle) 1805 continue; 1806 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1807 if (!tgtdev) 1808 continue; 1809 1810 reason_code = event_data->port_entry[i].port_status; 1811 1812 switch (reason_code) { 1813 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1814 if (tgtdev->host_exposed) 1815 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1816 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1817 mpi3mr_tgtdev_put(tgtdev); 1818 break; 1819 default: 1820 break; 1821 } 1822 if (tgtdev) 1823 mpi3mr_tgtdev_put(tgtdev); 1824 } 1825 } 1826 1827 /** 1828 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1829 * @mrioc: Adapter instance reference 1830 * @fwevt: Firmware event reference 1831 * 1832 * Extracts the event data and calls application interfacing 1833 * function to process the event further. 1834 * 1835 * Return: Nothing. 1836 */ 1837 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1838 struct mpi3mr_fwevt *fwevt) 1839 { 1840 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1841 fwevt->event_data_size); 1842 } 1843 1844 /** 1845 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1846 * @sdev: SCSI device reference 1847 * @data: Queue depth reference 1848 * 1849 * This is an iterator function called for each SCSI device in a 1850 * target to update the QD of each SCSI device. 1851 * 1852 * Return: Nothing. 1853 */ 1854 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1855 { 1856 u16 *q_depth = (u16 *)data; 1857 1858 scsi_change_queue_depth(sdev, (int)*q_depth); 1859 sdev->max_queue_depth = sdev->queue_depth; 1860 } 1861 1862 /** 1863 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1864 * @mrioc: Adapter instance reference 1865 * @tg: Throttle group information pointer 1866 * 1867 * Accessor to reduce QD for each device associated with the 1868 * given throttle group. 1869 * 1870 * Return: None. 1871 */ 1872 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1873 struct mpi3mr_throttle_group_info *tg) 1874 { 1875 unsigned long flags; 1876 struct mpi3mr_tgt_dev *tgtdev; 1877 struct mpi3mr_stgt_priv_data *tgt_priv; 1878 1879 1880 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1881 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1882 if (tgtdev->starget && tgtdev->starget->hostdata) { 1883 tgt_priv = tgtdev->starget->hostdata; 1884 if (tgt_priv->throttle_group == tg) { 1885 dprint_event_bh(mrioc, 1886 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1887 tgt_priv->perst_id, tgtdev->q_depth, 1888 tg->modified_qd); 1889 starget_for_each_device(tgtdev->starget, 1890 (void *)&tg->modified_qd, 1891 mpi3mr_update_sdev_qd); 1892 } 1893 } 1894 } 1895 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1896 } 1897 1898 /** 1899 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1900 * @mrioc: Adapter instance reference 1901 * @fwevt: Firmware event reference 1902 * 1903 * Identifies the firmware event and calls corresponding bottomg 1904 * half handler and sends event acknowledgment if required. 1905 * 1906 * Return: Nothing. 1907 */ 1908 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1909 struct mpi3mr_fwevt *fwevt) 1910 { 1911 struct mpi3_device_page0 *dev_pg0 = NULL; 1912 u16 perst_id, handle, dev_info; 1913 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1914 1915 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1916 mrioc->current_event = fwevt; 1917 1918 if (mrioc->stop_drv_processing) 1919 goto out; 1920 1921 if (mrioc->unrecoverable) { 1922 dprint_event_bh(mrioc, 1923 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 1924 fwevt->event_id); 1925 goto out; 1926 } 1927 1928 if (!fwevt->process_evt) 1929 goto evt_ack; 1930 1931 switch (fwevt->event_id) { 1932 case MPI3_EVENT_DEVICE_ADDED: 1933 { 1934 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1935 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1936 handle = le16_to_cpu(dev_pg0->dev_handle); 1937 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1938 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1939 else if (mrioc->sas_transport_enabled && 1940 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 1941 sasinf = &dev_pg0->device_specific.sas_sata_format; 1942 dev_info = le16_to_cpu(sasinf->device_info); 1943 if (!mrioc->sas_hba.num_phys) 1944 mpi3mr_sas_host_add(mrioc); 1945 else 1946 mpi3mr_sas_host_refresh(mrioc); 1947 1948 if (mpi3mr_is_expander_device(dev_info)) 1949 mpi3mr_expander_add(mrioc, handle); 1950 } 1951 break; 1952 } 1953 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1954 { 1955 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1956 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1957 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1958 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 1959 break; 1960 } 1961 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1962 { 1963 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1964 break; 1965 } 1966 case MPI3_EVENT_ENCL_DEVICE_ADDED: 1967 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1968 { 1969 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 1970 break; 1971 } 1972 1973 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1974 { 1975 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1976 break; 1977 } 1978 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1979 { 1980 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1981 break; 1982 } 1983 case MPI3_EVENT_LOG_DATA: 1984 { 1985 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1986 break; 1987 } 1988 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1989 { 1990 struct mpi3mr_throttle_group_info *tg; 1991 1992 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1993 dprint_event_bh(mrioc, 1994 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1995 tg->id, tg->need_qd_reduction); 1996 if (tg->need_qd_reduction) { 1997 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1998 tg->need_qd_reduction = 0; 1999 } 2000 break; 2001 } 2002 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2003 { 2004 while (mrioc->device_refresh_on) 2005 msleep(500); 2006 2007 dprint_event_bh(mrioc, 2008 "scan for non responding and newly added devices after soft reset started\n"); 2009 if (mrioc->sas_transport_enabled) { 2010 mpi3mr_refresh_sas_ports(mrioc); 2011 mpi3mr_refresh_expanders(mrioc); 2012 } 2013 mpi3mr_rfresh_tgtdevs(mrioc); 2014 ioc_info(mrioc, 2015 "scan for non responding and newly added devices after soft reset completed\n"); 2016 break; 2017 } 2018 default: 2019 break; 2020 } 2021 2022 evt_ack: 2023 if (fwevt->send_ack) 2024 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2025 fwevt->evt_ctx); 2026 out: 2027 /* Put fwevt reference count to neutralize kref_init increment */ 2028 mpi3mr_fwevt_put(fwevt); 2029 mrioc->current_event = NULL; 2030 } 2031 2032 /** 2033 * mpi3mr_fwevt_worker - Firmware event worker 2034 * @work: Work struct containing firmware event 2035 * 2036 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2037 * 2038 * Return: Nothing. 2039 */ 2040 static void mpi3mr_fwevt_worker(struct work_struct *work) 2041 { 2042 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2043 work); 2044 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2045 /* 2046 * Put fwevt reference count after 2047 * dequeuing it from worker queue 2048 */ 2049 mpi3mr_fwevt_put(fwevt); 2050 } 2051 2052 /** 2053 * mpi3mr_create_tgtdev - Create and add a target device 2054 * @mrioc: Adapter instance reference 2055 * @dev_pg0: Device Page 0 data 2056 * 2057 * If the device specified by the device page 0 data is not 2058 * present in the driver's internal list, allocate the memory 2059 * for the device, populate the data and add to the list, else 2060 * update the device data. The key is persistent ID. 2061 * 2062 * Return: 0 on success, -ENOMEM on memory allocation failure 2063 */ 2064 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2065 struct mpi3_device_page0 *dev_pg0) 2066 { 2067 int retval = 0; 2068 struct mpi3mr_tgt_dev *tgtdev = NULL; 2069 u16 perst_id = 0; 2070 unsigned long flags; 2071 2072 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2073 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2074 return retval; 2075 2076 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2077 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2078 if (tgtdev) 2079 tgtdev->state = MPI3MR_DEV_CREATED; 2080 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2081 2082 if (tgtdev) { 2083 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2084 mpi3mr_tgtdev_put(tgtdev); 2085 } else { 2086 tgtdev = mpi3mr_alloc_tgtdev(); 2087 if (!tgtdev) 2088 return -ENOMEM; 2089 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2090 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2091 } 2092 2093 return retval; 2094 } 2095 2096 /** 2097 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2098 * @mrioc: Adapter instance reference 2099 * 2100 * Flush pending commands in the delayed lists due to a 2101 * controller reset or driver removal as a cleanup. 2102 * 2103 * Return: Nothing 2104 */ 2105 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2106 { 2107 struct delayed_dev_rmhs_node *_rmhs_node; 2108 struct delayed_evt_ack_node *_evtack_node; 2109 2110 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2111 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2112 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2113 struct delayed_dev_rmhs_node, list); 2114 list_del(&_rmhs_node->list); 2115 kfree(_rmhs_node); 2116 } 2117 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2118 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2119 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2120 struct delayed_evt_ack_node, list); 2121 list_del(&_evtack_node->list); 2122 kfree(_evtack_node); 2123 } 2124 } 2125 2126 /** 2127 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2128 * @mrioc: Adapter instance reference 2129 * @drv_cmd: Internal command tracker 2130 * 2131 * Issues a target reset TM to the firmware from the device 2132 * removal TM pend list or retry the removal handshake sequence 2133 * based on the IOU control request IOC status. 2134 * 2135 * Return: Nothing 2136 */ 2137 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2138 struct mpi3mr_drv_cmd *drv_cmd) 2139 { 2140 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2141 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2142 2143 if (drv_cmd->state & MPI3MR_CMD_RESET) 2144 goto clear_drv_cmd; 2145 2146 ioc_info(mrioc, 2147 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2148 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2149 drv_cmd->ioc_loginfo); 2150 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2151 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2152 drv_cmd->retry_count++; 2153 ioc_info(mrioc, 2154 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2155 __func__, drv_cmd->dev_handle, 2156 drv_cmd->retry_count); 2157 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2158 drv_cmd, drv_cmd->iou_rc); 2159 return; 2160 } 2161 ioc_err(mrioc, 2162 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2163 __func__, drv_cmd->dev_handle); 2164 } else { 2165 ioc_info(mrioc, 2166 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2167 __func__, drv_cmd->dev_handle); 2168 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2169 } 2170 2171 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2172 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2173 struct delayed_dev_rmhs_node, list); 2174 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2175 drv_cmd->retry_count = 0; 2176 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2177 ioc_info(mrioc, 2178 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2179 __func__, drv_cmd->dev_handle); 2180 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2181 drv_cmd->iou_rc); 2182 list_del(&delayed_dev_rmhs->list); 2183 kfree(delayed_dev_rmhs); 2184 return; 2185 } 2186 2187 clear_drv_cmd: 2188 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2189 drv_cmd->callback = NULL; 2190 drv_cmd->retry_count = 0; 2191 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2192 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2193 } 2194 2195 /** 2196 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2197 * @mrioc: Adapter instance reference 2198 * @drv_cmd: Internal command tracker 2199 * 2200 * Issues a target reset TM to the firmware from the device 2201 * removal TM pend list or issue IO unit control request as 2202 * part of device removal or hidden acknowledgment handshake. 2203 * 2204 * Return: Nothing 2205 */ 2206 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2207 struct mpi3mr_drv_cmd *drv_cmd) 2208 { 2209 struct mpi3_iounit_control_request iou_ctrl; 2210 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2211 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2212 int retval; 2213 2214 if (drv_cmd->state & MPI3MR_CMD_RESET) 2215 goto clear_drv_cmd; 2216 2217 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2218 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2219 2220 if (tm_reply) 2221 pr_info(IOCNAME 2222 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2223 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2224 drv_cmd->ioc_loginfo, 2225 le32_to_cpu(tm_reply->termination_count)); 2226 2227 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2228 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2229 2230 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2231 2232 drv_cmd->state = MPI3MR_CMD_PENDING; 2233 drv_cmd->is_waiting = 0; 2234 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2235 iou_ctrl.operation = drv_cmd->iou_rc; 2236 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2237 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2238 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2239 2240 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2241 1); 2242 if (retval) { 2243 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2244 mrioc->name); 2245 goto clear_drv_cmd; 2246 } 2247 2248 return; 2249 clear_drv_cmd: 2250 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2251 drv_cmd->callback = NULL; 2252 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2253 drv_cmd->retry_count = 0; 2254 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2255 } 2256 2257 /** 2258 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2259 * @mrioc: Adapter instance reference 2260 * @handle: Device handle 2261 * @cmdparam: Internal command tracker 2262 * @iou_rc: IO unit reason code 2263 * 2264 * Issues a target reset TM to the firmware or add it to a pend 2265 * list as part of device removal or hidden acknowledgment 2266 * handshake. 2267 * 2268 * Return: Nothing 2269 */ 2270 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2271 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2272 { 2273 struct mpi3_scsi_task_mgmt_request tm_req; 2274 int retval = 0; 2275 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2276 u8 retrycount = 5; 2277 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2278 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2279 struct mpi3mr_tgt_dev *tgtdev = NULL; 2280 unsigned long flags; 2281 2282 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2283 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2284 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2285 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2286 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2287 2288 if (drv_cmd) 2289 goto issue_cmd; 2290 do { 2291 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2292 MPI3MR_NUM_DEVRMCMD); 2293 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2294 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2295 break; 2296 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2297 } 2298 } while (retrycount--); 2299 2300 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2301 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2302 GFP_ATOMIC); 2303 if (!delayed_dev_rmhs) 2304 return; 2305 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2306 delayed_dev_rmhs->handle = handle; 2307 delayed_dev_rmhs->iou_rc = iou_rc; 2308 list_add_tail(&delayed_dev_rmhs->list, 2309 &mrioc->delayed_rmhs_list); 2310 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2311 __func__, handle); 2312 return; 2313 } 2314 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2315 2316 issue_cmd: 2317 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2318 ioc_info(mrioc, 2319 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2320 __func__, handle, cmd_idx); 2321 2322 memset(&tm_req, 0, sizeof(tm_req)); 2323 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2324 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2325 goto out; 2326 } 2327 drv_cmd->state = MPI3MR_CMD_PENDING; 2328 drv_cmd->is_waiting = 0; 2329 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2330 drv_cmd->dev_handle = handle; 2331 drv_cmd->iou_rc = iou_rc; 2332 tm_req.dev_handle = cpu_to_le16(handle); 2333 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2334 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2335 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2336 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2337 2338 set_bit(handle, mrioc->removepend_bitmap); 2339 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2340 if (retval) { 2341 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2342 __func__); 2343 goto out_failed; 2344 } 2345 out: 2346 return; 2347 out_failed: 2348 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2349 drv_cmd->callback = NULL; 2350 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2351 drv_cmd->retry_count = 0; 2352 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2353 } 2354 2355 /** 2356 * mpi3mr_complete_evt_ack - event ack request completion 2357 * @mrioc: Adapter instance reference 2358 * @drv_cmd: Internal command tracker 2359 * 2360 * This is the completion handler for non blocking event 2361 * acknowledgment sent to the firmware and this will issue any 2362 * pending event acknowledgment request. 2363 * 2364 * Return: Nothing 2365 */ 2366 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2367 struct mpi3mr_drv_cmd *drv_cmd) 2368 { 2369 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2370 struct delayed_evt_ack_node *delayed_evtack = NULL; 2371 2372 if (drv_cmd->state & MPI3MR_CMD_RESET) 2373 goto clear_drv_cmd; 2374 2375 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2376 dprint_event_th(mrioc, 2377 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2378 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2379 drv_cmd->ioc_loginfo); 2380 } 2381 2382 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2383 delayed_evtack = 2384 list_entry(mrioc->delayed_evtack_cmds_list.next, 2385 struct delayed_evt_ack_node, list); 2386 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2387 delayed_evtack->event_ctx); 2388 list_del(&delayed_evtack->list); 2389 kfree(delayed_evtack); 2390 return; 2391 } 2392 clear_drv_cmd: 2393 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2394 drv_cmd->callback = NULL; 2395 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2396 } 2397 2398 /** 2399 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2400 * @mrioc: Adapter instance reference 2401 * @event: MPI3 event id 2402 * @cmdparam: Internal command tracker 2403 * @event_ctx: event context 2404 * 2405 * Issues event acknowledgment request to the firmware if there 2406 * is a free command to send the event ack else it to a pend 2407 * list so that it will be processed on a completion of a prior 2408 * event acknowledgment . 2409 * 2410 * Return: Nothing 2411 */ 2412 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2413 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2414 { 2415 struct mpi3_event_ack_request evtack_req; 2416 int retval = 0; 2417 u8 retrycount = 5; 2418 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2419 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2420 struct delayed_evt_ack_node *delayed_evtack = NULL; 2421 2422 if (drv_cmd) { 2423 dprint_event_th(mrioc, 2424 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2425 event, event_ctx); 2426 goto issue_cmd; 2427 } 2428 dprint_event_th(mrioc, 2429 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2430 event, event_ctx); 2431 do { 2432 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2433 MPI3MR_NUM_EVTACKCMD); 2434 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2435 if (!test_and_set_bit(cmd_idx, 2436 mrioc->evtack_cmds_bitmap)) 2437 break; 2438 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2439 } 2440 } while (retrycount--); 2441 2442 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2443 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2444 GFP_ATOMIC); 2445 if (!delayed_evtack) 2446 return; 2447 INIT_LIST_HEAD(&delayed_evtack->list); 2448 delayed_evtack->event = event; 2449 delayed_evtack->event_ctx = event_ctx; 2450 list_add_tail(&delayed_evtack->list, 2451 &mrioc->delayed_evtack_cmds_list); 2452 dprint_event_th(mrioc, 2453 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2454 event, event_ctx); 2455 return; 2456 } 2457 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2458 2459 issue_cmd: 2460 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2461 2462 memset(&evtack_req, 0, sizeof(evtack_req)); 2463 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2464 dprint_event_th(mrioc, 2465 "sending event ack failed due to command in use\n"); 2466 goto out; 2467 } 2468 drv_cmd->state = MPI3MR_CMD_PENDING; 2469 drv_cmd->is_waiting = 0; 2470 drv_cmd->callback = mpi3mr_complete_evt_ack; 2471 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2472 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2473 evtack_req.event = event; 2474 evtack_req.event_context = cpu_to_le32(event_ctx); 2475 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2476 sizeof(evtack_req), 1); 2477 if (retval) { 2478 dprint_event_th(mrioc, 2479 "posting event ack request is failed\n"); 2480 goto out_failed; 2481 } 2482 2483 dprint_event_th(mrioc, 2484 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2485 event, event_ctx); 2486 out: 2487 return; 2488 out_failed: 2489 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2490 drv_cmd->callback = NULL; 2491 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2492 } 2493 2494 /** 2495 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2496 * @mrioc: Adapter instance reference 2497 * @event_reply: event data 2498 * 2499 * Checks for the reason code and based on that either block I/O 2500 * to device, or unblock I/O to the device, or start the device 2501 * removal handshake with reason as remove with the firmware for 2502 * PCIe devices. 2503 * 2504 * Return: Nothing 2505 */ 2506 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2507 struct mpi3_event_notification_reply *event_reply) 2508 { 2509 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2510 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2511 int i; 2512 u16 handle; 2513 u8 reason_code; 2514 struct mpi3mr_tgt_dev *tgtdev = NULL; 2515 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2516 2517 for (i = 0; i < topo_evt->num_entries; i++) { 2518 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2519 if (!handle) 2520 continue; 2521 reason_code = topo_evt->port_entry[i].port_status; 2522 scsi_tgt_priv_data = NULL; 2523 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2524 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2525 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2526 tgtdev->starget->hostdata; 2527 switch (reason_code) { 2528 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2529 if (scsi_tgt_priv_data) { 2530 scsi_tgt_priv_data->dev_removed = 1; 2531 scsi_tgt_priv_data->dev_removedelay = 0; 2532 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2533 } 2534 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2535 MPI3_CTRL_OP_REMOVE_DEVICE); 2536 break; 2537 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2538 if (scsi_tgt_priv_data) { 2539 scsi_tgt_priv_data->dev_removedelay = 1; 2540 atomic_inc(&scsi_tgt_priv_data->block_io); 2541 } 2542 break; 2543 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2544 if (scsi_tgt_priv_data && 2545 scsi_tgt_priv_data->dev_removedelay) { 2546 scsi_tgt_priv_data->dev_removedelay = 0; 2547 atomic_dec_if_positive 2548 (&scsi_tgt_priv_data->block_io); 2549 } 2550 break; 2551 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2552 default: 2553 break; 2554 } 2555 if (tgtdev) 2556 mpi3mr_tgtdev_put(tgtdev); 2557 } 2558 } 2559 2560 /** 2561 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2562 * @mrioc: Adapter instance reference 2563 * @event_reply: event data 2564 * 2565 * Checks for the reason code and based on that either block I/O 2566 * to device, or unblock I/O to the device, or start the device 2567 * removal handshake with reason as remove with the firmware for 2568 * SAS/SATA devices. 2569 * 2570 * Return: Nothing 2571 */ 2572 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2573 struct mpi3_event_notification_reply *event_reply) 2574 { 2575 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2576 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2577 int i; 2578 u16 handle; 2579 u8 reason_code; 2580 struct mpi3mr_tgt_dev *tgtdev = NULL; 2581 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2582 2583 for (i = 0; i < topo_evt->num_entries; i++) { 2584 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2585 if (!handle) 2586 continue; 2587 reason_code = topo_evt->phy_entry[i].status & 2588 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2589 scsi_tgt_priv_data = NULL; 2590 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2591 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2592 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2593 tgtdev->starget->hostdata; 2594 switch (reason_code) { 2595 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2596 if (scsi_tgt_priv_data) { 2597 scsi_tgt_priv_data->dev_removed = 1; 2598 scsi_tgt_priv_data->dev_removedelay = 0; 2599 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2600 } 2601 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2602 MPI3_CTRL_OP_REMOVE_DEVICE); 2603 break; 2604 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2605 if (scsi_tgt_priv_data) { 2606 scsi_tgt_priv_data->dev_removedelay = 1; 2607 atomic_inc(&scsi_tgt_priv_data->block_io); 2608 } 2609 break; 2610 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2611 if (scsi_tgt_priv_data && 2612 scsi_tgt_priv_data->dev_removedelay) { 2613 scsi_tgt_priv_data->dev_removedelay = 0; 2614 atomic_dec_if_positive 2615 (&scsi_tgt_priv_data->block_io); 2616 } 2617 break; 2618 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2619 default: 2620 break; 2621 } 2622 if (tgtdev) 2623 mpi3mr_tgtdev_put(tgtdev); 2624 } 2625 } 2626 2627 /** 2628 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2629 * @mrioc: Adapter instance reference 2630 * @event_reply: event data 2631 * 2632 * Checks for the reason code and based on that either block I/O 2633 * to device, or unblock I/O to the device, or start the device 2634 * removal handshake with reason as remove/hide acknowledgment 2635 * with the firmware. 2636 * 2637 * Return: Nothing 2638 */ 2639 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2640 struct mpi3_event_notification_reply *event_reply) 2641 { 2642 u16 dev_handle = 0; 2643 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2644 struct mpi3mr_tgt_dev *tgtdev = NULL; 2645 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2646 struct mpi3_event_data_device_status_change *evtdata = 2647 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2648 2649 if (mrioc->stop_drv_processing) 2650 goto out; 2651 2652 dev_handle = le16_to_cpu(evtdata->dev_handle); 2653 2654 switch (evtdata->reason_code) { 2655 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2656 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2657 block = 1; 2658 break; 2659 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2660 delete = 1; 2661 hide = 1; 2662 break; 2663 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2664 delete = 1; 2665 remove = 1; 2666 break; 2667 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2668 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2669 ublock = 1; 2670 break; 2671 default: 2672 break; 2673 } 2674 2675 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2676 if (!tgtdev) 2677 goto out; 2678 if (hide) 2679 tgtdev->is_hidden = hide; 2680 if (tgtdev->starget && tgtdev->starget->hostdata) { 2681 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2682 tgtdev->starget->hostdata; 2683 if (block) 2684 atomic_inc(&scsi_tgt_priv_data->block_io); 2685 if (delete) 2686 scsi_tgt_priv_data->dev_removed = 1; 2687 if (ublock) 2688 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2689 } 2690 if (remove) 2691 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2692 MPI3_CTRL_OP_REMOVE_DEVICE); 2693 if (hide) 2694 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2695 MPI3_CTRL_OP_HIDDEN_ACK); 2696 2697 out: 2698 if (tgtdev) 2699 mpi3mr_tgtdev_put(tgtdev); 2700 } 2701 2702 /** 2703 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2704 * @mrioc: Adapter instance reference 2705 * @event_reply: event data 2706 * 2707 * Blocks and unblocks host level I/O based on the reason code 2708 * 2709 * Return: Nothing 2710 */ 2711 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2712 struct mpi3_event_notification_reply *event_reply) 2713 { 2714 struct mpi3_event_data_prepare_for_reset *evtdata = 2715 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2716 2717 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2718 dprint_event_th(mrioc, 2719 "prepare for reset event top half with rc=start\n"); 2720 if (mrioc->prepare_for_reset) 2721 return; 2722 mrioc->prepare_for_reset = 1; 2723 mrioc->prepare_for_reset_timeout_counter = 0; 2724 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2725 dprint_event_th(mrioc, 2726 "prepare for reset top half with rc=abort\n"); 2727 mrioc->prepare_for_reset = 0; 2728 mrioc->prepare_for_reset_timeout_counter = 0; 2729 } 2730 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2731 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2732 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2733 le32_to_cpu(event_reply->event_context)); 2734 } 2735 2736 /** 2737 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2738 * @mrioc: Adapter instance reference 2739 * @event_reply: event data 2740 * 2741 * Identifies the new shutdown timeout value and update. 2742 * 2743 * Return: Nothing 2744 */ 2745 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2746 struct mpi3_event_notification_reply *event_reply) 2747 { 2748 struct mpi3_event_data_energy_pack_change *evtdata = 2749 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2750 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2751 2752 if (shutdown_timeout <= 0) { 2753 ioc_warn(mrioc, 2754 "%s :Invalid Shutdown Timeout received = %d\n", 2755 __func__, shutdown_timeout); 2756 return; 2757 } 2758 2759 ioc_info(mrioc, 2760 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2761 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2762 mrioc->facts.shutdown_timeout = shutdown_timeout; 2763 } 2764 2765 /** 2766 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2767 * @mrioc: Adapter instance reference 2768 * @event_reply: event data 2769 * 2770 * Displays Cable manegemt event details. 2771 * 2772 * Return: Nothing 2773 */ 2774 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2775 struct mpi3_event_notification_reply *event_reply) 2776 { 2777 struct mpi3_event_data_cable_management *evtdata = 2778 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2779 2780 switch (evtdata->status) { 2781 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2782 { 2783 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2784 "Devices connected to this cable are not detected.\n" 2785 "This cable requires %d mW of power.\n", 2786 evtdata->receptacle_id, 2787 le32_to_cpu(evtdata->active_cable_power_requirement)); 2788 break; 2789 } 2790 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2791 { 2792 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2793 evtdata->receptacle_id); 2794 break; 2795 } 2796 default: 2797 break; 2798 } 2799 } 2800 2801 /** 2802 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2803 * @mrioc: Adapter instance reference 2804 * 2805 * Add driver specific event to make sure that the driver won't process the 2806 * events until all the devices are refreshed during soft reset. 2807 * 2808 * Return: Nothing 2809 */ 2810 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2811 { 2812 struct mpi3mr_fwevt *fwevt = NULL; 2813 2814 fwevt = mpi3mr_alloc_fwevt(0); 2815 if (!fwevt) { 2816 dprint_event_th(mrioc, 2817 "failed to schedule bottom half handler for event(0x%02x)\n", 2818 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2819 return; 2820 } 2821 fwevt->mrioc = mrioc; 2822 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2823 fwevt->send_ack = 0; 2824 fwevt->process_evt = 1; 2825 fwevt->evt_ctx = 0; 2826 fwevt->event_data_size = 0; 2827 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2828 } 2829 2830 /** 2831 * mpi3mr_os_handle_events - Firmware event handler 2832 * @mrioc: Adapter instance reference 2833 * @event_reply: event data 2834 * 2835 * Identify whteher the event has to handled and acknowledged 2836 * and either process the event in the tophalf and/or schedule a 2837 * bottom half through mpi3mr_fwevt_worker. 2838 * 2839 * Return: Nothing 2840 */ 2841 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2842 struct mpi3_event_notification_reply *event_reply) 2843 { 2844 u16 evt_type, sz; 2845 struct mpi3mr_fwevt *fwevt = NULL; 2846 bool ack_req = 0, process_evt_bh = 0; 2847 2848 if (mrioc->stop_drv_processing) 2849 return; 2850 2851 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2852 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2853 ack_req = 1; 2854 2855 evt_type = event_reply->event; 2856 2857 switch (evt_type) { 2858 case MPI3_EVENT_DEVICE_ADDED: 2859 { 2860 struct mpi3_device_page0 *dev_pg0 = 2861 (struct mpi3_device_page0 *)event_reply->event_data; 2862 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2863 ioc_err(mrioc, 2864 "%s :Failed to add device in the device add event\n", 2865 __func__); 2866 else 2867 process_evt_bh = 1; 2868 break; 2869 } 2870 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2871 { 2872 process_evt_bh = 1; 2873 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2874 break; 2875 } 2876 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2877 { 2878 process_evt_bh = 1; 2879 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2880 break; 2881 } 2882 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2883 { 2884 process_evt_bh = 1; 2885 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2886 break; 2887 } 2888 case MPI3_EVENT_PREPARE_FOR_RESET: 2889 { 2890 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2891 ack_req = 0; 2892 break; 2893 } 2894 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2895 case MPI3_EVENT_LOG_DATA: 2896 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2897 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2898 { 2899 process_evt_bh = 1; 2900 break; 2901 } 2902 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2903 { 2904 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2905 break; 2906 } 2907 case MPI3_EVENT_CABLE_MGMT: 2908 { 2909 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2910 break; 2911 } 2912 case MPI3_EVENT_SAS_DISCOVERY: 2913 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2914 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2915 case MPI3_EVENT_PCIE_ENUMERATION: 2916 break; 2917 default: 2918 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2919 __func__, evt_type); 2920 break; 2921 } 2922 if (process_evt_bh || ack_req) { 2923 sz = event_reply->event_data_length * 4; 2924 fwevt = mpi3mr_alloc_fwevt(sz); 2925 if (!fwevt) { 2926 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2927 __func__, __FILE__, __LINE__, __func__); 2928 return; 2929 } 2930 2931 memcpy(fwevt->event_data, event_reply->event_data, sz); 2932 fwevt->mrioc = mrioc; 2933 fwevt->event_id = evt_type; 2934 fwevt->send_ack = ack_req; 2935 fwevt->process_evt = process_evt_bh; 2936 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2937 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2938 } 2939 } 2940 2941 /** 2942 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2943 * @mrioc: Adapter instance reference 2944 * @scmd: SCSI command reference 2945 * @scsiio_req: MPI3 SCSI IO request 2946 * 2947 * Identifies the protection information flags from the SCSI 2948 * command and set appropriate flags in the MPI3 SCSI IO 2949 * request. 2950 * 2951 * Return: Nothing 2952 */ 2953 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2954 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2955 { 2956 u16 eedp_flags = 0; 2957 unsigned char prot_op = scsi_get_prot_op(scmd); 2958 2959 switch (prot_op) { 2960 case SCSI_PROT_NORMAL: 2961 return; 2962 case SCSI_PROT_READ_STRIP: 2963 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2964 break; 2965 case SCSI_PROT_WRITE_INSERT: 2966 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2967 break; 2968 case SCSI_PROT_READ_INSERT: 2969 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2970 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2971 break; 2972 case SCSI_PROT_WRITE_STRIP: 2973 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2974 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2975 break; 2976 case SCSI_PROT_READ_PASS: 2977 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2978 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2979 break; 2980 case SCSI_PROT_WRITE_PASS: 2981 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2982 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2983 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2984 0xffff; 2985 } else 2986 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2987 2988 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2989 break; 2990 default: 2991 return; 2992 } 2993 2994 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2995 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2996 2997 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2998 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2999 3000 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3001 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3002 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3003 scsiio_req->cdb.eedp32.primary_reference_tag = 3004 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3005 } 3006 3007 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3008 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3009 3010 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3011 3012 switch (scsi_prot_interval(scmd)) { 3013 case 512: 3014 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3015 break; 3016 case 520: 3017 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3018 break; 3019 case 4080: 3020 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3021 break; 3022 case 4088: 3023 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3024 break; 3025 case 4096: 3026 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3027 break; 3028 case 4104: 3029 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3030 break; 3031 case 4160: 3032 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3033 break; 3034 default: 3035 break; 3036 } 3037 3038 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3039 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3040 } 3041 3042 /** 3043 * mpi3mr_build_sense_buffer - Map sense information 3044 * @desc: Sense type 3045 * @buf: Sense buffer to populate 3046 * @key: Sense key 3047 * @asc: Additional sense code 3048 * @ascq: Additional sense code qualifier 3049 * 3050 * Maps the given sense information into either descriptor or 3051 * fixed format sense data. 3052 * 3053 * Return: Nothing 3054 */ 3055 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3056 u8 asc, u8 ascq) 3057 { 3058 if (desc) { 3059 buf[0] = 0x72; /* descriptor, current */ 3060 buf[1] = key; 3061 buf[2] = asc; 3062 buf[3] = ascq; 3063 buf[7] = 0; 3064 } else { 3065 buf[0] = 0x70; /* fixed, current */ 3066 buf[2] = key; 3067 buf[7] = 0xa; 3068 buf[12] = asc; 3069 buf[13] = ascq; 3070 } 3071 } 3072 3073 /** 3074 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3075 * @scmd: SCSI command reference 3076 * @ioc_status: status of MPI3 request 3077 * 3078 * Maps the EEDP error status of the SCSI IO request to sense 3079 * data. 3080 * 3081 * Return: Nothing 3082 */ 3083 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3084 u16 ioc_status) 3085 { 3086 u8 ascq = 0; 3087 3088 switch (ioc_status) { 3089 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3090 ascq = 0x01; 3091 break; 3092 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3093 ascq = 0x02; 3094 break; 3095 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3096 ascq = 0x03; 3097 break; 3098 default: 3099 ascq = 0x00; 3100 break; 3101 } 3102 3103 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3104 0x10, ascq); 3105 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3106 } 3107 3108 /** 3109 * mpi3mr_process_op_reply_desc - reply descriptor handler 3110 * @mrioc: Adapter instance reference 3111 * @reply_desc: Operational reply descriptor 3112 * @reply_dma: place holder for reply DMA address 3113 * @qidx: Operational queue index 3114 * 3115 * Process the operational reply descriptor and identifies the 3116 * descriptor type. Based on the descriptor map the MPI3 request 3117 * status to a SCSI command status and calls scsi_done call 3118 * back. 3119 * 3120 * Return: Nothing 3121 */ 3122 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3123 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3124 { 3125 u16 reply_desc_type, host_tag = 0; 3126 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3127 u32 ioc_loginfo = 0; 3128 struct mpi3_status_reply_descriptor *status_desc = NULL; 3129 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3130 struct mpi3_success_reply_descriptor *success_desc = NULL; 3131 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3132 struct scsi_cmnd *scmd = NULL; 3133 struct scmd_priv *priv = NULL; 3134 u8 *sense_buf = NULL; 3135 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3136 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3137 u16 dev_handle = 0xFFFF; 3138 struct scsi_sense_hdr sshdr; 3139 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3140 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3141 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3142 struct mpi3mr_throttle_group_info *tg = NULL; 3143 u8 throttle_enabled_dev = 0; 3144 3145 *reply_dma = 0; 3146 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3147 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3148 switch (reply_desc_type) { 3149 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3150 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3151 host_tag = le16_to_cpu(status_desc->host_tag); 3152 ioc_status = le16_to_cpu(status_desc->ioc_status); 3153 if (ioc_status & 3154 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3155 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3156 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3157 break; 3158 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3159 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3160 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3161 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3162 *reply_dma); 3163 if (!scsi_reply) { 3164 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3165 mrioc->name); 3166 goto out; 3167 } 3168 host_tag = le16_to_cpu(scsi_reply->host_tag); 3169 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3170 scsi_status = scsi_reply->scsi_status; 3171 scsi_state = scsi_reply->scsi_state; 3172 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3173 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3174 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3175 sense_count = le32_to_cpu(scsi_reply->sense_count); 3176 resp_data = le32_to_cpu(scsi_reply->response_data); 3177 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3178 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3179 if (ioc_status & 3180 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3181 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3182 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3183 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3184 panic("%s: Ran out of sense buffers\n", mrioc->name); 3185 break; 3186 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3187 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3188 host_tag = le16_to_cpu(success_desc->host_tag); 3189 break; 3190 default: 3191 break; 3192 } 3193 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3194 if (!scmd) { 3195 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3196 mrioc->name, host_tag); 3197 goto out; 3198 } 3199 priv = scsi_cmd_priv(scmd); 3200 3201 data_len_blks = scsi_bufflen(scmd) >> 9; 3202 sdev_priv_data = scmd->device->hostdata; 3203 if (sdev_priv_data) { 3204 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3205 if (stgt_priv_data) { 3206 tg = stgt_priv_data->throttle_group; 3207 throttle_enabled_dev = 3208 stgt_priv_data->io_throttle_enabled; 3209 dev_handle = stgt_priv_data->dev_handle; 3210 } 3211 } 3212 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3213 throttle_enabled_dev)) { 3214 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3215 &mrioc->pend_large_data_sz); 3216 if (tg) { 3217 tg_pend_data_len = atomic_sub_return(data_len_blks, 3218 &tg->pend_large_data_sz); 3219 if (tg->io_divert && ((ioc_pend_data_len <= 3220 mrioc->io_throttle_low) && 3221 (tg_pend_data_len <= tg->low))) { 3222 tg->io_divert = 0; 3223 mpi3mr_set_io_divert_for_all_vd_in_tg( 3224 mrioc, tg, 0); 3225 } 3226 } else { 3227 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3228 stgt_priv_data->io_divert = 0; 3229 } 3230 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3231 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3232 if (!tg) { 3233 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3234 stgt_priv_data->io_divert = 0; 3235 3236 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3237 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3238 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3239 tg->io_divert = 0; 3240 mpi3mr_set_io_divert_for_all_vd_in_tg( 3241 mrioc, tg, 0); 3242 } 3243 } 3244 } 3245 3246 if (success_desc) { 3247 scmd->result = DID_OK << 16; 3248 goto out_success; 3249 } 3250 3251 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3252 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3253 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3254 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3255 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3256 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3257 3258 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3259 sense_buf) { 3260 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3261 3262 memcpy(scmd->sense_buffer, sense_buf, sz); 3263 } 3264 3265 switch (ioc_status) { 3266 case MPI3_IOCSTATUS_BUSY: 3267 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3268 scmd->result = SAM_STAT_BUSY; 3269 break; 3270 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3271 scmd->result = DID_NO_CONNECT << 16; 3272 break; 3273 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3274 scmd->result = DID_SOFT_ERROR << 16; 3275 break; 3276 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3277 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3278 scmd->result = DID_RESET << 16; 3279 break; 3280 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3281 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3282 scmd->result = DID_SOFT_ERROR << 16; 3283 else 3284 scmd->result = (DID_OK << 16) | scsi_status; 3285 break; 3286 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3287 scmd->result = (DID_OK << 16) | scsi_status; 3288 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3289 break; 3290 if (xfer_count < scmd->underflow) { 3291 if (scsi_status == SAM_STAT_BUSY) 3292 scmd->result = SAM_STAT_BUSY; 3293 else 3294 scmd->result = DID_SOFT_ERROR << 16; 3295 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3296 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3297 scmd->result = DID_SOFT_ERROR << 16; 3298 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3299 scmd->result = DID_RESET << 16; 3300 break; 3301 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3302 scsi_set_resid(scmd, 0); 3303 fallthrough; 3304 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3305 case MPI3_IOCSTATUS_SUCCESS: 3306 scmd->result = (DID_OK << 16) | scsi_status; 3307 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3308 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3309 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3310 scmd->result = DID_SOFT_ERROR << 16; 3311 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3312 scmd->result = DID_RESET << 16; 3313 break; 3314 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3315 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3316 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3317 mpi3mr_map_eedp_error(scmd, ioc_status); 3318 break; 3319 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3320 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3321 case MPI3_IOCSTATUS_INVALID_SGL: 3322 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3323 case MPI3_IOCSTATUS_INVALID_FIELD: 3324 case MPI3_IOCSTATUS_INVALID_STATE: 3325 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3326 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3327 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3328 default: 3329 scmd->result = DID_SOFT_ERROR << 16; 3330 break; 3331 } 3332 3333 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3334 (scmd->cmnd[0] != ATA_16) && 3335 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3336 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3337 scmd->result); 3338 scsi_print_command(scmd); 3339 ioc_info(mrioc, 3340 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3341 __func__, dev_handle, ioc_status, ioc_loginfo, 3342 priv->req_q_idx + 1); 3343 ioc_info(mrioc, 3344 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3345 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3346 if (sense_buf) { 3347 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3348 ioc_info(mrioc, 3349 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3350 __func__, sense_count, sshdr.sense_key, 3351 sshdr.asc, sshdr.ascq); 3352 } 3353 } 3354 out_success: 3355 if (priv->meta_sg_valid) { 3356 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3357 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3358 } 3359 mpi3mr_clear_scmd_priv(mrioc, scmd); 3360 scsi_dma_unmap(scmd); 3361 scsi_done(scmd); 3362 out: 3363 if (sense_buf) 3364 mpi3mr_repost_sense_buf(mrioc, 3365 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3366 } 3367 3368 /** 3369 * mpi3mr_get_chain_idx - get free chain buffer index 3370 * @mrioc: Adapter instance reference 3371 * 3372 * Try to get a free chain buffer index from the free pool. 3373 * 3374 * Return: -1 on failure or the free chain buffer index 3375 */ 3376 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3377 { 3378 u8 retry_count = 5; 3379 int cmd_idx = -1; 3380 unsigned long flags; 3381 3382 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3383 do { 3384 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3385 mrioc->chain_buf_count); 3386 if (cmd_idx < mrioc->chain_buf_count) { 3387 set_bit(cmd_idx, mrioc->chain_bitmap); 3388 break; 3389 } 3390 cmd_idx = -1; 3391 } while (retry_count--); 3392 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3393 return cmd_idx; 3394 } 3395 3396 /** 3397 * mpi3mr_prepare_sg_scmd - build scatter gather list 3398 * @mrioc: Adapter instance reference 3399 * @scmd: SCSI command reference 3400 * @scsiio_req: MPI3 SCSI IO request 3401 * 3402 * This function maps SCSI command's data and protection SGEs to 3403 * MPI request SGEs. If required additional 4K chain buffer is 3404 * used to send the SGEs. 3405 * 3406 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3407 */ 3408 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3409 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3410 { 3411 dma_addr_t chain_dma; 3412 struct scatterlist *sg_scmd; 3413 void *sg_local, *chain; 3414 u32 chain_length; 3415 int sges_left, chain_idx; 3416 u32 sges_in_segment; 3417 u8 simple_sgl_flags; 3418 u8 simple_sgl_flags_last; 3419 u8 last_chain_sgl_flags; 3420 struct chain_element *chain_req; 3421 struct scmd_priv *priv = NULL; 3422 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3423 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3424 3425 priv = scsi_cmd_priv(scmd); 3426 3427 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3428 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3429 simple_sgl_flags_last = simple_sgl_flags | 3430 MPI3_SGE_FLAGS_END_OF_LIST; 3431 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3432 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3433 3434 if (meta_sg) 3435 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3436 else 3437 sg_local = &scsiio_req->sgl; 3438 3439 if (!scsiio_req->data_length && !meta_sg) { 3440 mpi3mr_build_zero_len_sge(sg_local); 3441 return 0; 3442 } 3443 3444 if (meta_sg) { 3445 sg_scmd = scsi_prot_sglist(scmd); 3446 sges_left = dma_map_sg(&mrioc->pdev->dev, 3447 scsi_prot_sglist(scmd), 3448 scsi_prot_sg_count(scmd), 3449 scmd->sc_data_direction); 3450 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3451 } else { 3452 sg_scmd = scsi_sglist(scmd); 3453 sges_left = scsi_dma_map(scmd); 3454 } 3455 3456 if (sges_left < 0) { 3457 sdev_printk(KERN_ERR, scmd->device, 3458 "scsi_dma_map failed: request for %d bytes!\n", 3459 scsi_bufflen(scmd)); 3460 return -ENOMEM; 3461 } 3462 if (sges_left > mrioc->max_sgl_entries) { 3463 sdev_printk(KERN_ERR, scmd->device, 3464 "scsi_dma_map returned unsupported sge count %d!\n", 3465 sges_left); 3466 return -ENOMEM; 3467 } 3468 3469 sges_in_segment = (mrioc->facts.op_req_sz - 3470 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3471 3472 if (scsiio_req->sgl[0].eedp.flags == 3473 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3474 sg_local += sizeof(struct mpi3_sge_common); 3475 sges_in_segment--; 3476 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3477 } 3478 3479 if (scsiio_req->msg_flags == 3480 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3481 sges_in_segment--; 3482 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3483 } 3484 3485 if (meta_sg) 3486 sges_in_segment = 1; 3487 3488 if (sges_left <= sges_in_segment) 3489 goto fill_in_last_segment; 3490 3491 /* fill in main message segment when there is a chain following */ 3492 while (sges_in_segment > 1) { 3493 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3494 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3495 sg_scmd = sg_next(sg_scmd); 3496 sg_local += sizeof(struct mpi3_sge_common); 3497 sges_left--; 3498 sges_in_segment--; 3499 } 3500 3501 chain_idx = mpi3mr_get_chain_idx(mrioc); 3502 if (chain_idx < 0) 3503 return -1; 3504 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3505 if (meta_sg) 3506 priv->meta_chain_idx = chain_idx; 3507 else 3508 priv->chain_idx = chain_idx; 3509 3510 chain = chain_req->addr; 3511 chain_dma = chain_req->dma_addr; 3512 sges_in_segment = sges_left; 3513 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3514 3515 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3516 chain_length, chain_dma); 3517 3518 sg_local = chain; 3519 3520 fill_in_last_segment: 3521 while (sges_left > 0) { 3522 if (sges_left == 1) 3523 mpi3mr_add_sg_single(sg_local, 3524 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3525 sg_dma_address(sg_scmd)); 3526 else 3527 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3528 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3529 sg_scmd = sg_next(sg_scmd); 3530 sg_local += sizeof(struct mpi3_sge_common); 3531 sges_left--; 3532 } 3533 3534 return 0; 3535 } 3536 3537 /** 3538 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3539 * @mrioc: Adapter instance reference 3540 * @scmd: SCSI command reference 3541 * @scsiio_req: MPI3 SCSI IO request 3542 * 3543 * This function calls mpi3mr_prepare_sg_scmd for constructing 3544 * both data SGEs and protection information SGEs in the MPI 3545 * format from the SCSI Command as appropriate . 3546 * 3547 * Return: return value of mpi3mr_prepare_sg_scmd. 3548 */ 3549 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3550 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3551 { 3552 int ret; 3553 3554 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3555 if (ret) 3556 return ret; 3557 3558 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3559 /* There is a valid meta sg */ 3560 scsiio_req->flags |= 3561 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3562 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3563 } 3564 3565 return ret; 3566 } 3567 3568 /** 3569 * mpi3mr_tm_response_name - get TM response as a string 3570 * @resp_code: TM response code 3571 * 3572 * Convert known task management response code as a readable 3573 * string. 3574 * 3575 * Return: response code string. 3576 */ 3577 static const char *mpi3mr_tm_response_name(u8 resp_code) 3578 { 3579 char *desc; 3580 3581 switch (resp_code) { 3582 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3583 desc = "task management request completed"; 3584 break; 3585 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3586 desc = "invalid frame"; 3587 break; 3588 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3589 desc = "task management request not supported"; 3590 break; 3591 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3592 desc = "task management request failed"; 3593 break; 3594 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3595 desc = "task management request succeeded"; 3596 break; 3597 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3598 desc = "invalid LUN"; 3599 break; 3600 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3601 desc = "overlapped tag attempted"; 3602 break; 3603 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3604 desc = "task queued, however not sent to target"; 3605 break; 3606 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3607 desc = "task management request denied by NVMe device"; 3608 break; 3609 default: 3610 desc = "unknown"; 3611 break; 3612 } 3613 3614 return desc; 3615 } 3616 3617 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3618 { 3619 int i; 3620 int num_of_reply_queues = 3621 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3622 3623 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3624 mpi3mr_process_op_reply_q(mrioc, 3625 mrioc->intr_info[i].op_reply_q); 3626 } 3627 3628 /** 3629 * mpi3mr_issue_tm - Issue Task Management request 3630 * @mrioc: Adapter instance reference 3631 * @tm_type: Task Management type 3632 * @handle: Device handle 3633 * @lun: lun ID 3634 * @htag: Host tag of the TM request 3635 * @timeout: TM timeout value 3636 * @drv_cmd: Internal command tracker 3637 * @resp_code: Response code place holder 3638 * @scmd: SCSI command 3639 * 3640 * Issues a Task Management Request to the controller for a 3641 * specified target, lun and command and wait for its completion 3642 * and check TM response. Recover the TM if it timed out by 3643 * issuing controller reset. 3644 * 3645 * Return: 0 on success, non-zero on errors 3646 */ 3647 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3648 u16 handle, uint lun, u16 htag, ulong timeout, 3649 struct mpi3mr_drv_cmd *drv_cmd, 3650 u8 *resp_code, struct scsi_cmnd *scmd) 3651 { 3652 struct mpi3_scsi_task_mgmt_request tm_req; 3653 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3654 int retval = 0; 3655 struct mpi3mr_tgt_dev *tgtdev = NULL; 3656 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3657 struct scmd_priv *cmd_priv = NULL; 3658 struct scsi_device *sdev = NULL; 3659 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3660 3661 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3662 __func__, tm_type, handle); 3663 if (mrioc->unrecoverable) { 3664 retval = -1; 3665 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3666 __func__); 3667 goto out; 3668 } 3669 3670 memset(&tm_req, 0, sizeof(tm_req)); 3671 mutex_lock(&drv_cmd->mutex); 3672 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3673 retval = -1; 3674 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3675 mutex_unlock(&drv_cmd->mutex); 3676 goto out; 3677 } 3678 if (mrioc->reset_in_progress) { 3679 retval = -1; 3680 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3681 mutex_unlock(&drv_cmd->mutex); 3682 goto out; 3683 } 3684 3685 drv_cmd->state = MPI3MR_CMD_PENDING; 3686 drv_cmd->is_waiting = 1; 3687 drv_cmd->callback = NULL; 3688 tm_req.dev_handle = cpu_to_le16(handle); 3689 tm_req.task_type = tm_type; 3690 tm_req.host_tag = cpu_to_le16(htag); 3691 3692 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3693 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3694 3695 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3696 3697 if (scmd) { 3698 sdev = scmd->device; 3699 sdev_priv_data = sdev->hostdata; 3700 scsi_tgt_priv_data = ((sdev_priv_data) ? 3701 sdev_priv_data->tgt_priv_data : NULL); 3702 } else { 3703 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3704 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3705 tgtdev->starget->hostdata; 3706 } 3707 3708 if (scsi_tgt_priv_data) 3709 atomic_inc(&scsi_tgt_priv_data->block_io); 3710 3711 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3712 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3713 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3714 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3715 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3716 } 3717 3718 init_completion(&drv_cmd->done); 3719 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3720 if (retval) { 3721 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3722 goto out_unlock; 3723 } 3724 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3725 3726 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3727 drv_cmd->is_waiting = 0; 3728 retval = -1; 3729 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3730 dprint_tm(mrioc, 3731 "task management request timed out after %ld seconds\n", 3732 timeout); 3733 if (mrioc->logging_level & MPI3_DEBUG_TM) 3734 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3735 mpi3mr_soft_reset_handler(mrioc, 3736 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3737 } 3738 goto out_unlock; 3739 } 3740 3741 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3742 dprint_tm(mrioc, "invalid task management reply message\n"); 3743 retval = -1; 3744 goto out_unlock; 3745 } 3746 3747 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3748 3749 switch (drv_cmd->ioc_status) { 3750 case MPI3_IOCSTATUS_SUCCESS: 3751 *resp_code = le32_to_cpu(tm_reply->response_data) & 3752 MPI3MR_RI_MASK_RESPCODE; 3753 break; 3754 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3755 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3756 break; 3757 default: 3758 dprint_tm(mrioc, 3759 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3760 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3761 retval = -1; 3762 goto out_unlock; 3763 } 3764 3765 switch (*resp_code) { 3766 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3767 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3768 break; 3769 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3770 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3771 retval = -1; 3772 break; 3773 default: 3774 retval = -1; 3775 break; 3776 } 3777 3778 dprint_tm(mrioc, 3779 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3780 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3781 le32_to_cpu(tm_reply->termination_count), 3782 mpi3mr_tm_response_name(*resp_code), *resp_code); 3783 3784 if (!retval) { 3785 mpi3mr_ioc_disable_intr(mrioc); 3786 mpi3mr_poll_pend_io_completions(mrioc); 3787 mpi3mr_ioc_enable_intr(mrioc); 3788 mpi3mr_poll_pend_io_completions(mrioc); 3789 mpi3mr_process_admin_reply_q(mrioc); 3790 } 3791 switch (tm_type) { 3792 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3793 if (!scsi_tgt_priv_data) 3794 break; 3795 scsi_tgt_priv_data->pend_count = 0; 3796 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3797 mpi3mr_count_tgt_pending, 3798 (void *)scsi_tgt_priv_data->starget); 3799 break; 3800 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3801 if (!sdev_priv_data) 3802 break; 3803 sdev_priv_data->pend_count = 0; 3804 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3805 mpi3mr_count_dev_pending, (void *)sdev); 3806 break; 3807 default: 3808 break; 3809 } 3810 3811 out_unlock: 3812 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3813 mutex_unlock(&drv_cmd->mutex); 3814 if (scsi_tgt_priv_data) 3815 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3816 if (tgtdev) 3817 mpi3mr_tgtdev_put(tgtdev); 3818 out: 3819 return retval; 3820 } 3821 3822 /** 3823 * mpi3mr_bios_param - BIOS param callback 3824 * @sdev: SCSI device reference 3825 * @bdev: Block device reference 3826 * @capacity: Capacity in logical sectors 3827 * @params: Parameter array 3828 * 3829 * Just the parameters with heads/secots/cylinders. 3830 * 3831 * Return: 0 always 3832 */ 3833 static int mpi3mr_bios_param(struct scsi_device *sdev, 3834 struct block_device *bdev, sector_t capacity, int params[]) 3835 { 3836 int heads; 3837 int sectors; 3838 sector_t cylinders; 3839 ulong dummy; 3840 3841 heads = 64; 3842 sectors = 32; 3843 3844 dummy = heads * sectors; 3845 cylinders = capacity; 3846 sector_div(cylinders, dummy); 3847 3848 if ((ulong)capacity >= 0x200000) { 3849 heads = 255; 3850 sectors = 63; 3851 dummy = heads * sectors; 3852 cylinders = capacity; 3853 sector_div(cylinders, dummy); 3854 } 3855 3856 params[0] = heads; 3857 params[1] = sectors; 3858 params[2] = cylinders; 3859 return 0; 3860 } 3861 3862 /** 3863 * mpi3mr_map_queues - Map queues callback handler 3864 * @shost: SCSI host reference 3865 * 3866 * Maps default and poll queues. 3867 * 3868 * Return: return zero. 3869 */ 3870 static void mpi3mr_map_queues(struct Scsi_Host *shost) 3871 { 3872 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3873 int i, qoff, offset; 3874 struct blk_mq_queue_map *map = NULL; 3875 3876 offset = mrioc->op_reply_q_offset; 3877 3878 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3879 map = &shost->tag_set.map[i]; 3880 3881 map->nr_queues = 0; 3882 3883 if (i == HCTX_TYPE_DEFAULT) 3884 map->nr_queues = mrioc->default_qcount; 3885 else if (i == HCTX_TYPE_POLL) 3886 map->nr_queues = mrioc->active_poll_qcount; 3887 3888 if (!map->nr_queues) { 3889 BUG_ON(i == HCTX_TYPE_DEFAULT); 3890 continue; 3891 } 3892 3893 /* 3894 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3895 * affinity), so use the regular blk-mq cpu mapping 3896 */ 3897 map->queue_offset = qoff; 3898 if (i != HCTX_TYPE_POLL) 3899 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3900 else 3901 blk_mq_map_queues(map); 3902 3903 qoff += map->nr_queues; 3904 offset += map->nr_queues; 3905 } 3906 } 3907 3908 /** 3909 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3910 * @mrioc: Adapter instance reference 3911 * 3912 * Calculate the pending I/Os for the controller and return. 3913 * 3914 * Return: Number of pending I/Os 3915 */ 3916 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3917 { 3918 u16 i; 3919 uint pend_ios = 0; 3920 3921 for (i = 0; i < mrioc->num_op_reply_q; i++) 3922 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3923 return pend_ios; 3924 } 3925 3926 /** 3927 * mpi3mr_print_pending_host_io - print pending I/Os 3928 * @mrioc: Adapter instance reference 3929 * 3930 * Print number of pending I/Os and each I/O details prior to 3931 * reset for debug purpose. 3932 * 3933 * Return: Nothing 3934 */ 3935 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3936 { 3937 struct Scsi_Host *shost = mrioc->shost; 3938 3939 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3940 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3941 blk_mq_tagset_busy_iter(&shost->tag_set, 3942 mpi3mr_print_scmd, (void *)mrioc); 3943 } 3944 3945 /** 3946 * mpi3mr_wait_for_host_io - block for I/Os to complete 3947 * @mrioc: Adapter instance reference 3948 * @timeout: time out in seconds 3949 * Waits for pending I/Os for the given adapter to complete or 3950 * to hit the timeout. 3951 * 3952 * Return: Nothing 3953 */ 3954 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3955 { 3956 enum mpi3mr_iocstate iocstate; 3957 int i = 0; 3958 3959 iocstate = mpi3mr_get_iocstate(mrioc); 3960 if (iocstate != MRIOC_STATE_READY) 3961 return; 3962 3963 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3964 return; 3965 ioc_info(mrioc, 3966 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3967 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3968 3969 for (i = 0; i < timeout; i++) { 3970 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3971 break; 3972 iocstate = mpi3mr_get_iocstate(mrioc); 3973 if (iocstate != MRIOC_STATE_READY) 3974 break; 3975 msleep(1000); 3976 } 3977 3978 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3979 mpi3mr_get_fw_pending_ios(mrioc)); 3980 } 3981 3982 /** 3983 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 3984 * @mrioc: Adapter instance reference 3985 * @scmd: SCSI command reference 3986 * @scsiio_req: MPI3 SCSI IO request 3987 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 3988 * @wslen: write same max length 3989 * 3990 * Gets values of unmap, ndob and number of blocks from write 3991 * same scsi io and based on these values it sets divert IO flag 3992 * and reason for diverting IO to firmware. 3993 * 3994 * Return: Nothing 3995 */ 3996 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 3997 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 3998 u32 *scsiio_flags, u16 wslen) 3999 { 4000 u8 unmap = 0, ndob = 0; 4001 u8 opcode = scmd->cmnd[0]; 4002 u32 num_blocks = 0; 4003 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4004 4005 if (opcode == WRITE_SAME_16) { 4006 unmap = scmd->cmnd[1] & 0x08; 4007 ndob = scmd->cmnd[1] & 0x01; 4008 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4009 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4010 unmap = scmd->cmnd[10] & 0x08; 4011 ndob = scmd->cmnd[10] & 0x01; 4012 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4013 } else 4014 return; 4015 4016 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4017 scsiio_req->msg_flags |= 4018 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4019 *scsiio_flags |= 4020 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4021 } 4022 } 4023 4024 /** 4025 * mpi3mr_eh_host_reset - Host reset error handling callback 4026 * @scmd: SCSI command reference 4027 * 4028 * Issue controller reset 4029 * 4030 * Return: SUCCESS of successful reset else FAILED 4031 */ 4032 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4033 { 4034 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4035 int retval = FAILED, ret; 4036 4037 ret = mpi3mr_soft_reset_handler(mrioc, 4038 MPI3MR_RESET_FROM_EH_HOS, 1); 4039 if (ret) 4040 goto out; 4041 4042 retval = SUCCESS; 4043 out: 4044 sdev_printk(KERN_INFO, scmd->device, 4045 "Host reset is %s for scmd(%p)\n", 4046 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4047 4048 return retval; 4049 } 4050 4051 /** 4052 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4053 * @scmd: SCSI command reference 4054 * 4055 * Checks whether pending I/Os are present for the RAID volume; 4056 * if not there's no need to reset the adapter. 4057 * 4058 * Return: SUCCESS of successful reset else FAILED 4059 */ 4060 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4061 { 4062 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4063 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4064 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4065 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4066 int retval = FAILED; 4067 4068 sdev_priv_data = scmd->device->hostdata; 4069 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4070 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4071 dev_type = stgt_priv_data->dev_type; 4072 } 4073 4074 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4075 mpi3mr_wait_for_host_io(mrioc, 4076 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4077 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4078 retval = SUCCESS; 4079 } 4080 if (retval == FAILED) 4081 mpi3mr_print_pending_host_io(mrioc); 4082 4083 sdev_printk(KERN_INFO, scmd->device, 4084 "Bus reset is %s for scmd(%p)\n", 4085 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4086 return retval; 4087 } 4088 4089 /** 4090 * mpi3mr_eh_target_reset - Target reset error handling callback 4091 * @scmd: SCSI command reference 4092 * 4093 * Issue Target reset Task Management and verify the scmd is 4094 * terminated successfully and return status accordingly. 4095 * 4096 * Return: SUCCESS of successful termination of the scmd else 4097 * FAILED 4098 */ 4099 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4100 { 4101 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4102 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4103 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4104 u16 dev_handle; 4105 u8 resp_code = 0; 4106 int retval = FAILED, ret = 0; 4107 4108 sdev_printk(KERN_INFO, scmd->device, 4109 "Attempting Target Reset! scmd(%p)\n", scmd); 4110 scsi_print_command(scmd); 4111 4112 sdev_priv_data = scmd->device->hostdata; 4113 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4114 sdev_printk(KERN_INFO, scmd->device, 4115 "SCSI device is not available\n"); 4116 retval = SUCCESS; 4117 goto out; 4118 } 4119 4120 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4121 dev_handle = stgt_priv_data->dev_handle; 4122 if (stgt_priv_data->dev_removed) { 4123 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4124 sdev_printk(KERN_INFO, scmd->device, 4125 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4126 mrioc->name, dev_handle); 4127 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4128 retval = SUCCESS; 4129 else 4130 retval = FAILED; 4131 goto out; 4132 } 4133 sdev_printk(KERN_INFO, scmd->device, 4134 "Target Reset is issued to handle(0x%04x)\n", 4135 dev_handle); 4136 4137 ret = mpi3mr_issue_tm(mrioc, 4138 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4139 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4140 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4141 4142 if (ret) 4143 goto out; 4144 4145 if (stgt_priv_data->pend_count) { 4146 sdev_printk(KERN_INFO, scmd->device, 4147 "%s: target has %d pending commands, target reset is failed\n", 4148 mrioc->name, stgt_priv_data->pend_count); 4149 goto out; 4150 } 4151 4152 retval = SUCCESS; 4153 out: 4154 sdev_printk(KERN_INFO, scmd->device, 4155 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4156 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4157 4158 return retval; 4159 } 4160 4161 /** 4162 * mpi3mr_eh_dev_reset- Device reset error handling callback 4163 * @scmd: SCSI command reference 4164 * 4165 * Issue lun reset Task Management and verify the scmd is 4166 * terminated successfully and return status accordingly. 4167 * 4168 * Return: SUCCESS of successful termination of the scmd else 4169 * FAILED 4170 */ 4171 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4172 { 4173 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4174 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4175 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4176 u16 dev_handle; 4177 u8 resp_code = 0; 4178 int retval = FAILED, ret = 0; 4179 4180 sdev_printk(KERN_INFO, scmd->device, 4181 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4182 scsi_print_command(scmd); 4183 4184 sdev_priv_data = scmd->device->hostdata; 4185 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4186 sdev_printk(KERN_INFO, scmd->device, 4187 "SCSI device is not available\n"); 4188 retval = SUCCESS; 4189 goto out; 4190 } 4191 4192 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4193 dev_handle = stgt_priv_data->dev_handle; 4194 if (stgt_priv_data->dev_removed) { 4195 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4196 sdev_printk(KERN_INFO, scmd->device, 4197 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4198 mrioc->name, dev_handle); 4199 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4200 retval = SUCCESS; 4201 else 4202 retval = FAILED; 4203 goto out; 4204 } 4205 sdev_printk(KERN_INFO, scmd->device, 4206 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4207 4208 ret = mpi3mr_issue_tm(mrioc, 4209 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4210 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4211 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4212 4213 if (ret) 4214 goto out; 4215 4216 if (sdev_priv_data->pend_count) { 4217 sdev_printk(KERN_INFO, scmd->device, 4218 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4219 mrioc->name, sdev_priv_data->pend_count); 4220 goto out; 4221 } 4222 retval = SUCCESS; 4223 out: 4224 sdev_printk(KERN_INFO, scmd->device, 4225 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4226 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4227 4228 return retval; 4229 } 4230 4231 /** 4232 * mpi3mr_scan_start - Scan start callback handler 4233 * @shost: SCSI host reference 4234 * 4235 * Issue port enable request asynchronously. 4236 * 4237 * Return: Nothing 4238 */ 4239 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4240 { 4241 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4242 4243 mrioc->scan_started = 1; 4244 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4245 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4246 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4247 mrioc->scan_started = 0; 4248 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4249 } 4250 } 4251 4252 /** 4253 * mpi3mr_scan_finished - Scan finished callback handler 4254 * @shost: SCSI host reference 4255 * @time: Jiffies from the scan start 4256 * 4257 * Checks whether the port enable is completed or timedout or 4258 * failed and set the scan status accordingly after taking any 4259 * recovery if required. 4260 * 4261 * Return: 1 on scan finished or timed out, 0 for in progress 4262 */ 4263 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4264 unsigned long time) 4265 { 4266 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4267 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4268 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4269 4270 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4271 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4272 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4273 mpi3mr_print_fault_info(mrioc); 4274 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4275 mrioc->scan_started = 0; 4276 mrioc->init_cmds.is_waiting = 0; 4277 mrioc->init_cmds.callback = NULL; 4278 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4279 } 4280 4281 if (time >= (pe_timeout * HZ)) { 4282 ioc_err(mrioc, "port enable failed due to time out\n"); 4283 mpi3mr_check_rh_fault_ioc(mrioc, 4284 MPI3MR_RESET_FROM_PE_TIMEOUT); 4285 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4286 mrioc->scan_started = 0; 4287 mrioc->init_cmds.is_waiting = 0; 4288 mrioc->init_cmds.callback = NULL; 4289 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4290 } 4291 4292 if (mrioc->scan_started) 4293 return 0; 4294 4295 if (mrioc->scan_failed) { 4296 ioc_err(mrioc, 4297 "port enable failed with status=0x%04x\n", 4298 mrioc->scan_failed); 4299 } else 4300 ioc_info(mrioc, "port enable is successfully completed\n"); 4301 4302 mpi3mr_start_watchdog(mrioc); 4303 mrioc->is_driver_loading = 0; 4304 mrioc->stop_bsgs = 0; 4305 return 1; 4306 } 4307 4308 /** 4309 * mpi3mr_slave_destroy - Slave destroy callback handler 4310 * @sdev: SCSI device reference 4311 * 4312 * Cleanup and free per device(lun) private data. 4313 * 4314 * Return: Nothing. 4315 */ 4316 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 4317 { 4318 struct Scsi_Host *shost; 4319 struct mpi3mr_ioc *mrioc; 4320 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4321 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4322 unsigned long flags; 4323 struct scsi_target *starget; 4324 struct sas_rphy *rphy = NULL; 4325 4326 if (!sdev->hostdata) 4327 return; 4328 4329 starget = scsi_target(sdev); 4330 shost = dev_to_shost(&starget->dev); 4331 mrioc = shost_priv(shost); 4332 scsi_tgt_priv_data = starget->hostdata; 4333 4334 scsi_tgt_priv_data->num_luns--; 4335 4336 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4337 if (starget->channel == mrioc->scsi_device_channel) 4338 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4339 else if (mrioc->sas_transport_enabled && !starget->channel) { 4340 rphy = dev_to_rphy(starget->dev.parent); 4341 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4342 rphy->identify.sas_address, rphy); 4343 } 4344 4345 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4346 tgt_dev->starget = NULL; 4347 if (tgt_dev) 4348 mpi3mr_tgtdev_put(tgt_dev); 4349 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4350 4351 kfree(sdev->hostdata); 4352 sdev->hostdata = NULL; 4353 } 4354 4355 /** 4356 * mpi3mr_target_destroy - Target destroy callback handler 4357 * @starget: SCSI target reference 4358 * 4359 * Cleanup and free per target private data. 4360 * 4361 * Return: Nothing. 4362 */ 4363 static void mpi3mr_target_destroy(struct scsi_target *starget) 4364 { 4365 struct Scsi_Host *shost; 4366 struct mpi3mr_ioc *mrioc; 4367 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4368 struct mpi3mr_tgt_dev *tgt_dev; 4369 unsigned long flags; 4370 4371 if (!starget->hostdata) 4372 return; 4373 4374 shost = dev_to_shost(&starget->dev); 4375 mrioc = shost_priv(shost); 4376 scsi_tgt_priv_data = starget->hostdata; 4377 4378 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4379 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4380 if (tgt_dev && (tgt_dev->starget == starget) && 4381 (tgt_dev->perst_id == starget->id)) 4382 tgt_dev->starget = NULL; 4383 if (tgt_dev) { 4384 scsi_tgt_priv_data->tgt_dev = NULL; 4385 scsi_tgt_priv_data->perst_id = 0; 4386 mpi3mr_tgtdev_put(tgt_dev); 4387 mpi3mr_tgtdev_put(tgt_dev); 4388 } 4389 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4390 4391 kfree(starget->hostdata); 4392 starget->hostdata = NULL; 4393 } 4394 4395 /** 4396 * mpi3mr_slave_configure - Slave configure callback handler 4397 * @sdev: SCSI device reference 4398 * 4399 * Configure queue depth, max hardware sectors and virt boundary 4400 * as required 4401 * 4402 * Return: 0 always. 4403 */ 4404 static int mpi3mr_slave_configure(struct scsi_device *sdev) 4405 { 4406 struct scsi_target *starget; 4407 struct Scsi_Host *shost; 4408 struct mpi3mr_ioc *mrioc; 4409 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4410 unsigned long flags; 4411 int retval = 0; 4412 struct sas_rphy *rphy = NULL; 4413 4414 starget = scsi_target(sdev); 4415 shost = dev_to_shost(&starget->dev); 4416 mrioc = shost_priv(shost); 4417 4418 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4419 if (starget->channel == mrioc->scsi_device_channel) 4420 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4421 else if (mrioc->sas_transport_enabled && !starget->channel) { 4422 rphy = dev_to_rphy(starget->dev.parent); 4423 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4424 rphy->identify.sas_address, rphy); 4425 } 4426 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4427 if (!tgt_dev) 4428 return -ENXIO; 4429 4430 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4431 4432 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4433 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4434 4435 switch (tgt_dev->dev_type) { 4436 case MPI3_DEVICE_DEVFORM_PCIE: 4437 /*The block layer hw sector size = 512*/ 4438 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 4439 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4440 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 4441 blk_queue_max_hw_sectors(sdev->request_queue, 4442 tgt_dev->dev_spec.pcie_inf.mdts / 512); 4443 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 4444 blk_queue_virt_boundary(sdev->request_queue, 4445 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 4446 else 4447 blk_queue_virt_boundary(sdev->request_queue, 4448 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 4449 } 4450 break; 4451 default: 4452 break; 4453 } 4454 4455 mpi3mr_tgtdev_put(tgt_dev); 4456 4457 return retval; 4458 } 4459 4460 /** 4461 * mpi3mr_slave_alloc -Slave alloc callback handler 4462 * @sdev: SCSI device reference 4463 * 4464 * Allocate per device(lun) private data and initialize it. 4465 * 4466 * Return: 0 on success -ENOMEM on memory allocation failure. 4467 */ 4468 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 4469 { 4470 struct Scsi_Host *shost; 4471 struct mpi3mr_ioc *mrioc; 4472 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4473 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4474 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4475 unsigned long flags; 4476 struct scsi_target *starget; 4477 int retval = 0; 4478 struct sas_rphy *rphy = NULL; 4479 4480 starget = scsi_target(sdev); 4481 shost = dev_to_shost(&starget->dev); 4482 mrioc = shost_priv(shost); 4483 scsi_tgt_priv_data = starget->hostdata; 4484 4485 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4486 4487 if (starget->channel == mrioc->scsi_device_channel) 4488 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4489 else if (mrioc->sas_transport_enabled && !starget->channel) { 4490 rphy = dev_to_rphy(starget->dev.parent); 4491 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4492 rphy->identify.sas_address, rphy); 4493 } 4494 4495 if (tgt_dev) { 4496 if (tgt_dev->starget == NULL) 4497 tgt_dev->starget = starget; 4498 mpi3mr_tgtdev_put(tgt_dev); 4499 retval = 0; 4500 } else { 4501 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4502 return -ENXIO; 4503 } 4504 4505 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4506 4507 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4508 if (!scsi_dev_priv_data) 4509 return -ENOMEM; 4510 4511 scsi_dev_priv_data->lun_id = sdev->lun; 4512 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4513 sdev->hostdata = scsi_dev_priv_data; 4514 4515 scsi_tgt_priv_data->num_luns++; 4516 4517 return retval; 4518 } 4519 4520 /** 4521 * mpi3mr_target_alloc - Target alloc callback handler 4522 * @starget: SCSI target reference 4523 * 4524 * Allocate per target private data and initialize it. 4525 * 4526 * Return: 0 on success -ENOMEM on memory allocation failure. 4527 */ 4528 static int mpi3mr_target_alloc(struct scsi_target *starget) 4529 { 4530 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4531 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4532 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4533 struct mpi3mr_tgt_dev *tgt_dev; 4534 unsigned long flags; 4535 int retval = 0; 4536 struct sas_rphy *rphy = NULL; 4537 4538 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4539 if (!scsi_tgt_priv_data) 4540 return -ENOMEM; 4541 4542 starget->hostdata = scsi_tgt_priv_data; 4543 4544 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4545 if (starget->channel == mrioc->scsi_device_channel) { 4546 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4547 if (tgt_dev && !tgt_dev->is_hidden) { 4548 scsi_tgt_priv_data->starget = starget; 4549 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4550 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4551 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4552 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4553 tgt_dev->starget = starget; 4554 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4555 retval = 0; 4556 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4557 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4558 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4559 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4560 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4561 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4562 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4563 scsi_tgt_priv_data->dev_nvme_dif = 1; 4564 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4565 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4566 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4567 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4568 } else 4569 retval = -ENXIO; 4570 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4571 rphy = dev_to_rphy(starget->dev.parent); 4572 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4573 rphy->identify.sas_address, rphy); 4574 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4575 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4576 scsi_tgt_priv_data->starget = starget; 4577 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4578 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4579 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4580 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4581 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4582 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4583 tgt_dev->starget = starget; 4584 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4585 retval = 0; 4586 } else 4587 retval = -ENXIO; 4588 } 4589 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4590 4591 return retval; 4592 } 4593 4594 /** 4595 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4596 * @mrioc: Adapter instance reference 4597 * @scmd: SCSI Command reference 4598 * 4599 * The controller hardware cannot handle certain unmap commands 4600 * for NVMe drives, this routine checks those and return true 4601 * and completes the SCSI command with proper status and sense 4602 * data. 4603 * 4604 * Return: TRUE for not allowed unmap, FALSE otherwise. 4605 */ 4606 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4607 struct scsi_cmnd *scmd) 4608 { 4609 unsigned char *buf; 4610 u16 param_len, desc_len, trunc_param_len; 4611 4612 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4613 4614 if (mrioc->pdev->revision) { 4615 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4616 trunc_param_len -= (param_len - 8) & 0xF; 4617 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4618 dprint_scsi_err(mrioc, 4619 "truncating param_len from (%d) to (%d)\n", 4620 param_len, trunc_param_len); 4621 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4622 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4623 } 4624 return false; 4625 } 4626 4627 if (!param_len) { 4628 ioc_warn(mrioc, 4629 "%s: cdb received with zero parameter length\n", 4630 __func__); 4631 scsi_print_command(scmd); 4632 scmd->result = DID_OK << 16; 4633 scsi_done(scmd); 4634 return true; 4635 } 4636 4637 if (param_len < 24) { 4638 ioc_warn(mrioc, 4639 "%s: cdb received with invalid param_len: %d\n", 4640 __func__, param_len); 4641 scsi_print_command(scmd); 4642 scmd->result = SAM_STAT_CHECK_CONDITION; 4643 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4644 0x1A, 0); 4645 scsi_done(scmd); 4646 return true; 4647 } 4648 if (param_len != scsi_bufflen(scmd)) { 4649 ioc_warn(mrioc, 4650 "%s: cdb received with param_len: %d bufflen: %d\n", 4651 __func__, param_len, scsi_bufflen(scmd)); 4652 scsi_print_command(scmd); 4653 scmd->result = SAM_STAT_CHECK_CONDITION; 4654 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4655 0x1A, 0); 4656 scsi_done(scmd); 4657 return true; 4658 } 4659 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4660 if (!buf) { 4661 scsi_print_command(scmd); 4662 scmd->result = SAM_STAT_CHECK_CONDITION; 4663 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4664 0x55, 0x03); 4665 scsi_done(scmd); 4666 return true; 4667 } 4668 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4669 desc_len = get_unaligned_be16(&buf[2]); 4670 4671 if (desc_len < 16) { 4672 ioc_warn(mrioc, 4673 "%s: Invalid descriptor length in param list: %d\n", 4674 __func__, desc_len); 4675 scsi_print_command(scmd); 4676 scmd->result = SAM_STAT_CHECK_CONDITION; 4677 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4678 0x26, 0); 4679 scsi_done(scmd); 4680 kfree(buf); 4681 return true; 4682 } 4683 4684 if (param_len > (desc_len + 8)) { 4685 trunc_param_len = desc_len + 8; 4686 scsi_print_command(scmd); 4687 dprint_scsi_err(mrioc, 4688 "truncating param_len(%d) to desc_len+8(%d)\n", 4689 param_len, trunc_param_len); 4690 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4691 scsi_print_command(scmd); 4692 } 4693 4694 kfree(buf); 4695 return false; 4696 } 4697 4698 /** 4699 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4700 * @scmd: SCSI Command reference 4701 * 4702 * Checks whether a cdb is allowed during shutdown or not. 4703 * 4704 * Return: TRUE for allowed commands, FALSE otherwise. 4705 */ 4706 4707 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4708 { 4709 switch (scmd->cmnd[0]) { 4710 case SYNCHRONIZE_CACHE: 4711 case START_STOP: 4712 return true; 4713 default: 4714 return false; 4715 } 4716 } 4717 4718 /** 4719 * mpi3mr_qcmd - I/O request despatcher 4720 * @shost: SCSI Host reference 4721 * @scmd: SCSI Command reference 4722 * 4723 * Issues the SCSI Command as an MPI3 request. 4724 * 4725 * Return: 0 on successful queueing of the request or if the 4726 * request is completed with failure. 4727 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4728 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4729 */ 4730 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4731 struct scsi_cmnd *scmd) 4732 { 4733 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4734 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4735 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4736 struct scmd_priv *scmd_priv_data = NULL; 4737 struct mpi3_scsi_io_request *scsiio_req = NULL; 4738 struct op_req_qinfo *op_req_q = NULL; 4739 int retval = 0; 4740 u16 dev_handle; 4741 u16 host_tag; 4742 u32 scsiio_flags = 0, data_len_blks = 0; 4743 struct request *rq = scsi_cmd_to_rq(scmd); 4744 int iprio_class; 4745 u8 is_pcie_dev = 0; 4746 u32 tracked_io_sz = 0; 4747 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4748 struct mpi3mr_throttle_group_info *tg = NULL; 4749 4750 if (mrioc->unrecoverable) { 4751 scmd->result = DID_ERROR << 16; 4752 scsi_done(scmd); 4753 goto out; 4754 } 4755 4756 sdev_priv_data = scmd->device->hostdata; 4757 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4758 scmd->result = DID_NO_CONNECT << 16; 4759 scsi_done(scmd); 4760 goto out; 4761 } 4762 4763 if (mrioc->stop_drv_processing && 4764 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4765 scmd->result = DID_NO_CONNECT << 16; 4766 scsi_done(scmd); 4767 goto out; 4768 } 4769 4770 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4771 dev_handle = stgt_priv_data->dev_handle; 4772 4773 /* Avoid error handling escalation when device is removed or blocked */ 4774 4775 if (scmd->device->host->shost_state == SHOST_RECOVERY && 4776 scmd->cmnd[0] == TEST_UNIT_READY && 4777 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 4778 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 4779 scsi_done(scmd); 4780 goto out; 4781 } 4782 4783 if (mrioc->reset_in_progress) { 4784 retval = SCSI_MLQUEUE_HOST_BUSY; 4785 goto out; 4786 } 4787 4788 if (atomic_read(&stgt_priv_data->block_io)) { 4789 if (mrioc->stop_drv_processing) { 4790 scmd->result = DID_NO_CONNECT << 16; 4791 scsi_done(scmd); 4792 goto out; 4793 } 4794 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4795 goto out; 4796 } 4797 4798 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4799 scmd->result = DID_NO_CONNECT << 16; 4800 scsi_done(scmd); 4801 goto out; 4802 } 4803 if (stgt_priv_data->dev_removed) { 4804 scmd->result = DID_NO_CONNECT << 16; 4805 scsi_done(scmd); 4806 goto out; 4807 } 4808 4809 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4810 is_pcie_dev = 1; 4811 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4812 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4813 mpi3mr_check_return_unmap(mrioc, scmd)) 4814 goto out; 4815 4816 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4817 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4818 scmd->result = DID_ERROR << 16; 4819 scsi_done(scmd); 4820 goto out; 4821 } 4822 4823 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4824 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4825 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4826 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4827 else 4828 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4829 4830 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4831 4832 if (sdev_priv_data->ncq_prio_enable) { 4833 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4834 if (iprio_class == IOPRIO_CLASS_RT) 4835 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4836 } 4837 4838 if (scmd->cmd_len > 16) 4839 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4840 4841 scmd_priv_data = scsi_cmd_priv(scmd); 4842 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4843 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4844 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4845 scsiio_req->host_tag = cpu_to_le16(host_tag); 4846 4847 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4848 4849 if (stgt_priv_data->wslen) 4850 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 4851 stgt_priv_data->wslen); 4852 4853 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4854 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4855 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4856 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4857 int_to_scsilun(sdev_priv_data->lun_id, 4858 (struct scsi_lun *)scsiio_req->lun); 4859 4860 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4861 mpi3mr_clear_scmd_priv(mrioc, scmd); 4862 retval = SCSI_MLQUEUE_HOST_BUSY; 4863 goto out; 4864 } 4865 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4866 data_len_blks = scsi_bufflen(scmd) >> 9; 4867 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4868 stgt_priv_data->io_throttle_enabled) { 4869 tracked_io_sz = data_len_blks; 4870 tg = stgt_priv_data->throttle_group; 4871 if (tg) { 4872 ioc_pend_data_len = atomic_add_return(data_len_blks, 4873 &mrioc->pend_large_data_sz); 4874 tg_pend_data_len = atomic_add_return(data_len_blks, 4875 &tg->pend_large_data_sz); 4876 if (!tg->io_divert && ((ioc_pend_data_len >= 4877 mrioc->io_throttle_high) || 4878 (tg_pend_data_len >= tg->high))) { 4879 tg->io_divert = 1; 4880 tg->need_qd_reduction = 1; 4881 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4882 tg, 1); 4883 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4884 } 4885 } else { 4886 ioc_pend_data_len = atomic_add_return(data_len_blks, 4887 &mrioc->pend_large_data_sz); 4888 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4889 stgt_priv_data->io_divert = 1; 4890 } 4891 } 4892 4893 if (stgt_priv_data->io_divert) { 4894 scsiio_req->msg_flags |= 4895 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4896 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4897 } 4898 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4899 4900 if (mpi3mr_op_request_post(mrioc, op_req_q, 4901 scmd_priv_data->mpi3mr_scsiio_req)) { 4902 mpi3mr_clear_scmd_priv(mrioc, scmd); 4903 retval = SCSI_MLQUEUE_HOST_BUSY; 4904 if (tracked_io_sz) { 4905 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4906 if (tg) 4907 atomic_sub(tracked_io_sz, 4908 &tg->pend_large_data_sz); 4909 } 4910 goto out; 4911 } 4912 4913 out: 4914 return retval; 4915 } 4916 4917 static const struct scsi_host_template mpi3mr_driver_template = { 4918 .module = THIS_MODULE, 4919 .name = "MPI3 Storage Controller", 4920 .proc_name = MPI3MR_DRIVER_NAME, 4921 .queuecommand = mpi3mr_qcmd, 4922 .target_alloc = mpi3mr_target_alloc, 4923 .slave_alloc = mpi3mr_slave_alloc, 4924 .slave_configure = mpi3mr_slave_configure, 4925 .target_destroy = mpi3mr_target_destroy, 4926 .slave_destroy = mpi3mr_slave_destroy, 4927 .scan_finished = mpi3mr_scan_finished, 4928 .scan_start = mpi3mr_scan_start, 4929 .change_queue_depth = mpi3mr_change_queue_depth, 4930 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4931 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4932 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 4933 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4934 .bios_param = mpi3mr_bios_param, 4935 .map_queues = mpi3mr_map_queues, 4936 .mq_poll = mpi3mr_blk_mq_poll, 4937 .no_write_same = 1, 4938 .can_queue = 1, 4939 .this_id = -1, 4940 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 4941 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4942 */ 4943 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 4944 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4945 .max_segment_size = 0xffffffff, 4946 .track_queue_depth = 1, 4947 .cmd_size = sizeof(struct scmd_priv), 4948 .shost_groups = mpi3mr_host_groups, 4949 .sdev_groups = mpi3mr_dev_groups, 4950 }; 4951 4952 /** 4953 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4954 * @cmdptr: Internal command tracker 4955 * @host_tag: Host tag used for the specific command 4956 * 4957 * Initialize the internal command tracker structure with 4958 * specified host tag. 4959 * 4960 * Return: Nothing. 4961 */ 4962 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4963 u16 host_tag) 4964 { 4965 mutex_init(&cmdptr->mutex); 4966 cmdptr->reply = NULL; 4967 cmdptr->state = MPI3MR_CMD_NOTUSED; 4968 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4969 cmdptr->host_tag = host_tag; 4970 } 4971 4972 /** 4973 * osintfc_mrioc_security_status -Check controller secure status 4974 * @pdev: PCI device instance 4975 * 4976 * Read the Device Serial Number capability from PCI config 4977 * space and decide whether the controller is secure or not. 4978 * 4979 * Return: 0 on success, non-zero on failure. 4980 */ 4981 static int 4982 osintfc_mrioc_security_status(struct pci_dev *pdev) 4983 { 4984 u32 cap_data; 4985 int base; 4986 u32 ctlr_status; 4987 u32 debug_status; 4988 int retval = 0; 4989 4990 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4991 if (!base) { 4992 dev_err(&pdev->dev, 4993 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4994 return -1; 4995 } 4996 4997 pci_read_config_dword(pdev, base + 4, &cap_data); 4998 4999 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 5000 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5001 5002 switch (ctlr_status) { 5003 case MPI3MR_INVALID_DEVICE: 5004 dev_err(&pdev->dev, 5005 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5006 __func__, pdev->device, pdev->subsystem_vendor, 5007 pdev->subsystem_device); 5008 retval = -1; 5009 break; 5010 case MPI3MR_CONFIG_SECURE_DEVICE: 5011 if (!debug_status) 5012 dev_info(&pdev->dev, 5013 "%s: Config secure ctlr is detected\n", 5014 __func__); 5015 break; 5016 case MPI3MR_HARD_SECURE_DEVICE: 5017 break; 5018 case MPI3MR_TAMPERED_DEVICE: 5019 dev_err(&pdev->dev, 5020 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5021 __func__, pdev->device, pdev->subsystem_vendor, 5022 pdev->subsystem_device); 5023 retval = -1; 5024 break; 5025 default: 5026 retval = -1; 5027 break; 5028 } 5029 5030 if (!retval && debug_status) { 5031 dev_err(&pdev->dev, 5032 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5033 __func__, pdev->device, pdev->subsystem_vendor, 5034 pdev->subsystem_device); 5035 retval = -1; 5036 } 5037 5038 return retval; 5039 } 5040 5041 /** 5042 * mpi3mr_probe - PCI probe callback 5043 * @pdev: PCI device instance 5044 * @id: PCI device ID details 5045 * 5046 * controller initialization routine. Checks the security status 5047 * of the controller and if it is invalid or tampered return the 5048 * probe without initializing the controller. Otherwise, 5049 * allocate per adapter instance through shost_priv and 5050 * initialize controller specific data structures, initializae 5051 * the controller hardware, add shost to the SCSI subsystem. 5052 * 5053 * Return: 0 on success, non-zero on failure. 5054 */ 5055 5056 static int 5057 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5058 { 5059 struct mpi3mr_ioc *mrioc = NULL; 5060 struct Scsi_Host *shost = NULL; 5061 int retval = 0, i; 5062 5063 if (osintfc_mrioc_security_status(pdev)) { 5064 warn_non_secure_ctlr = 1; 5065 return 1; /* For Invalid and Tampered device */ 5066 } 5067 5068 shost = scsi_host_alloc(&mpi3mr_driver_template, 5069 sizeof(struct mpi3mr_ioc)); 5070 if (!shost) { 5071 retval = -ENODEV; 5072 goto shost_failed; 5073 } 5074 5075 mrioc = shost_priv(shost); 5076 retval = ida_alloc_range(&mrioc_ida, 1, U8_MAX, GFP_KERNEL); 5077 if (retval < 0) 5078 goto id_alloc_failed; 5079 mrioc->id = (u8)retval; 5080 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5081 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5082 INIT_LIST_HEAD(&mrioc->list); 5083 spin_lock(&mrioc_list_lock); 5084 list_add_tail(&mrioc->list, &mrioc_list); 5085 spin_unlock(&mrioc_list_lock); 5086 5087 spin_lock_init(&mrioc->admin_req_lock); 5088 spin_lock_init(&mrioc->reply_free_queue_lock); 5089 spin_lock_init(&mrioc->sbq_lock); 5090 spin_lock_init(&mrioc->fwevt_lock); 5091 spin_lock_init(&mrioc->tgtdev_lock); 5092 spin_lock_init(&mrioc->watchdog_lock); 5093 spin_lock_init(&mrioc->chain_buf_lock); 5094 spin_lock_init(&mrioc->sas_node_lock); 5095 5096 INIT_LIST_HEAD(&mrioc->fwevt_list); 5097 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5098 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5099 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5100 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5101 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5102 INIT_LIST_HEAD(&mrioc->enclosure_list); 5103 5104 mutex_init(&mrioc->reset_mutex); 5105 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5106 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5107 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5108 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5109 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5110 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5111 5112 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5113 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5114 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5115 5116 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5117 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5118 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5119 5120 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5121 !pdev->revision) 5122 mrioc->enable_segqueue = false; 5123 else 5124 mrioc->enable_segqueue = true; 5125 5126 init_waitqueue_head(&mrioc->reset_waitq); 5127 mrioc->logging_level = logging_level; 5128 mrioc->shost = shost; 5129 mrioc->pdev = pdev; 5130 mrioc->stop_bsgs = 1; 5131 5132 mrioc->max_sgl_entries = max_sgl_entries; 5133 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5134 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5135 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5136 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5137 else { 5138 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5139 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5140 } 5141 5142 /* init shost parameters */ 5143 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5144 shost->max_lun = -1; 5145 shost->unique_id = mrioc->id; 5146 5147 shost->max_channel = 0; 5148 shost->max_id = 0xFFFFFFFF; 5149 5150 shost->host_tagset = 1; 5151 5152 if (prot_mask >= 0) 5153 scsi_host_set_prot(shost, prot_mask); 5154 else { 5155 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5156 | SHOST_DIF_TYPE2_PROTECTION 5157 | SHOST_DIF_TYPE3_PROTECTION; 5158 scsi_host_set_prot(shost, prot_mask); 5159 } 5160 5161 ioc_info(mrioc, 5162 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5163 __func__, 5164 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5165 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5166 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5167 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5168 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5169 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5170 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5171 5172 if (prot_guard_mask) 5173 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5174 else 5175 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5176 5177 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 5178 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 5179 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5180 mrioc->fwevt_worker_name, 0); 5181 if (!mrioc->fwevt_worker_thread) { 5182 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5183 __FILE__, __LINE__, __func__); 5184 retval = -ENODEV; 5185 goto fwevtthread_failed; 5186 } 5187 5188 mrioc->is_driver_loading = 1; 5189 mrioc->cpu_count = num_online_cpus(); 5190 if (mpi3mr_setup_resources(mrioc)) { 5191 ioc_err(mrioc, "setup resources failed\n"); 5192 retval = -ENODEV; 5193 goto resource_alloc_failed; 5194 } 5195 if (mpi3mr_init_ioc(mrioc)) { 5196 ioc_err(mrioc, "initializing IOC failed\n"); 5197 retval = -ENODEV; 5198 goto init_ioc_failed; 5199 } 5200 5201 shost->nr_hw_queues = mrioc->num_op_reply_q; 5202 if (mrioc->active_poll_qcount) 5203 shost->nr_maps = 3; 5204 5205 shost->can_queue = mrioc->max_host_ios; 5206 shost->sg_tablesize = mrioc->max_sgl_entries; 5207 shost->max_id = mrioc->facts.max_perids + 1; 5208 5209 retval = scsi_add_host(shost, &pdev->dev); 5210 if (retval) { 5211 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5212 __FILE__, __LINE__, __func__); 5213 goto addhost_failed; 5214 } 5215 5216 scsi_scan_host(shost); 5217 mpi3mr_bsg_init(mrioc); 5218 return retval; 5219 5220 addhost_failed: 5221 mpi3mr_stop_watchdog(mrioc); 5222 mpi3mr_cleanup_ioc(mrioc); 5223 init_ioc_failed: 5224 mpi3mr_free_mem(mrioc); 5225 mpi3mr_cleanup_resources(mrioc); 5226 resource_alloc_failed: 5227 destroy_workqueue(mrioc->fwevt_worker_thread); 5228 fwevtthread_failed: 5229 ida_free(&mrioc_ida, mrioc->id); 5230 spin_lock(&mrioc_list_lock); 5231 list_del(&mrioc->list); 5232 spin_unlock(&mrioc_list_lock); 5233 id_alloc_failed: 5234 scsi_host_put(shost); 5235 shost_failed: 5236 return retval; 5237 } 5238 5239 /** 5240 * mpi3mr_remove - PCI remove callback 5241 * @pdev: PCI device instance 5242 * 5243 * Cleanup the IOC by issuing MUR and shutdown notification. 5244 * Free up all memory and resources associated with the 5245 * controllerand target devices, unregister the shost. 5246 * 5247 * Return: Nothing. 5248 */ 5249 static void mpi3mr_remove(struct pci_dev *pdev) 5250 { 5251 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5252 struct mpi3mr_ioc *mrioc; 5253 struct workqueue_struct *wq; 5254 unsigned long flags; 5255 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5256 struct mpi3mr_hba_port *port, *hba_port_next; 5257 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5258 5259 if (!shost) 5260 return; 5261 5262 mrioc = shost_priv(shost); 5263 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5264 ssleep(1); 5265 5266 if (!pci_device_is_present(mrioc->pdev)) { 5267 mrioc->unrecoverable = 1; 5268 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5269 } 5270 5271 mpi3mr_bsg_exit(mrioc); 5272 mrioc->stop_drv_processing = 1; 5273 mpi3mr_cleanup_fwevt_list(mrioc); 5274 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5275 wq = mrioc->fwevt_worker_thread; 5276 mrioc->fwevt_worker_thread = NULL; 5277 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5278 if (wq) 5279 destroy_workqueue(wq); 5280 5281 if (mrioc->sas_transport_enabled) 5282 sas_remove_host(shost); 5283 else 5284 scsi_remove_host(shost); 5285 5286 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5287 list) { 5288 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5289 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5290 mpi3mr_tgtdev_put(tgtdev); 5291 } 5292 mpi3mr_stop_watchdog(mrioc); 5293 mpi3mr_cleanup_ioc(mrioc); 5294 mpi3mr_free_mem(mrioc); 5295 mpi3mr_cleanup_resources(mrioc); 5296 5297 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5298 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5299 &mrioc->sas_expander_list, list) { 5300 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5301 mpi3mr_expander_node_remove(mrioc, sas_expander); 5302 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5303 } 5304 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5305 ioc_info(mrioc, 5306 "removing hba_port entry: %p port: %d from hba_port list\n", 5307 port, port->port_id); 5308 list_del(&port->list); 5309 kfree(port); 5310 } 5311 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5312 5313 if (mrioc->sas_hba.num_phys) { 5314 kfree(mrioc->sas_hba.phy); 5315 mrioc->sas_hba.phy = NULL; 5316 mrioc->sas_hba.num_phys = 0; 5317 } 5318 5319 ida_free(&mrioc_ida, mrioc->id); 5320 spin_lock(&mrioc_list_lock); 5321 list_del(&mrioc->list); 5322 spin_unlock(&mrioc_list_lock); 5323 5324 scsi_host_put(shost); 5325 } 5326 5327 /** 5328 * mpi3mr_shutdown - PCI shutdown callback 5329 * @pdev: PCI device instance 5330 * 5331 * Free up all memory and resources associated with the 5332 * controller 5333 * 5334 * Return: Nothing. 5335 */ 5336 static void mpi3mr_shutdown(struct pci_dev *pdev) 5337 { 5338 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5339 struct mpi3mr_ioc *mrioc; 5340 struct workqueue_struct *wq; 5341 unsigned long flags; 5342 5343 if (!shost) 5344 return; 5345 5346 mrioc = shost_priv(shost); 5347 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5348 ssleep(1); 5349 5350 mrioc->stop_drv_processing = 1; 5351 mpi3mr_cleanup_fwevt_list(mrioc); 5352 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5353 wq = mrioc->fwevt_worker_thread; 5354 mrioc->fwevt_worker_thread = NULL; 5355 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5356 if (wq) 5357 destroy_workqueue(wq); 5358 5359 mpi3mr_stop_watchdog(mrioc); 5360 mpi3mr_cleanup_ioc(mrioc); 5361 mpi3mr_cleanup_resources(mrioc); 5362 } 5363 5364 /** 5365 * mpi3mr_suspend - PCI power management suspend callback 5366 * @dev: Device struct 5367 * 5368 * Change the power state to the given value and cleanup the IOC 5369 * by issuing MUR and shutdown notification 5370 * 5371 * Return: 0 always. 5372 */ 5373 static int __maybe_unused 5374 mpi3mr_suspend(struct device *dev) 5375 { 5376 struct pci_dev *pdev = to_pci_dev(dev); 5377 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5378 struct mpi3mr_ioc *mrioc; 5379 5380 if (!shost) 5381 return 0; 5382 5383 mrioc = shost_priv(shost); 5384 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5385 ssleep(1); 5386 mrioc->stop_drv_processing = 1; 5387 mpi3mr_cleanup_fwevt_list(mrioc); 5388 scsi_block_requests(shost); 5389 mpi3mr_stop_watchdog(mrioc); 5390 mpi3mr_cleanup_ioc(mrioc); 5391 5392 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5393 pdev, pci_name(pdev)); 5394 mpi3mr_cleanup_resources(mrioc); 5395 5396 return 0; 5397 } 5398 5399 /** 5400 * mpi3mr_resume - PCI power management resume callback 5401 * @dev: Device struct 5402 * 5403 * Restore the power state to D0 and reinitialize the controller 5404 * and resume I/O operations to the target devices 5405 * 5406 * Return: 0 on success, non-zero on failure 5407 */ 5408 static int __maybe_unused 5409 mpi3mr_resume(struct device *dev) 5410 { 5411 struct pci_dev *pdev = to_pci_dev(dev); 5412 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5413 struct mpi3mr_ioc *mrioc; 5414 pci_power_t device_state = pdev->current_state; 5415 int r; 5416 5417 if (!shost) 5418 return 0; 5419 5420 mrioc = shost_priv(shost); 5421 5422 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5423 pdev, pci_name(pdev), device_state); 5424 mrioc->pdev = pdev; 5425 mrioc->cpu_count = num_online_cpus(); 5426 r = mpi3mr_setup_resources(mrioc); 5427 if (r) { 5428 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5429 __func__, r); 5430 return r; 5431 } 5432 5433 mrioc->stop_drv_processing = 0; 5434 mpi3mr_invalidate_devhandles(mrioc); 5435 mpi3mr_free_enclosure_list(mrioc); 5436 mpi3mr_memset_buffers(mrioc); 5437 r = mpi3mr_reinit_ioc(mrioc, 1); 5438 if (r) { 5439 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5440 return r; 5441 } 5442 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5443 scsi_unblock_requests(shost); 5444 mrioc->device_refresh_on = 0; 5445 mpi3mr_start_watchdog(mrioc); 5446 5447 return 0; 5448 } 5449 5450 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5451 { 5452 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5453 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5454 }, 5455 { 5456 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5457 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5458 }, 5459 { 5460 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5461 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5462 }, 5463 { 0 } 5464 }; 5465 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5466 5467 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5468 5469 static struct pci_driver mpi3mr_pci_driver = { 5470 .name = MPI3MR_DRIVER_NAME, 5471 .id_table = mpi3mr_pci_id_table, 5472 .probe = mpi3mr_probe, 5473 .remove = mpi3mr_remove, 5474 .shutdown = mpi3mr_shutdown, 5475 .driver.pm = &mpi3mr_pm_ops, 5476 }; 5477 5478 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5479 { 5480 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5481 } 5482 static DRIVER_ATTR_RO(event_counter); 5483 5484 static int __init mpi3mr_init(void) 5485 { 5486 int ret_val; 5487 5488 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5489 MPI3MR_DRIVER_VERSION); 5490 5491 mpi3mr_transport_template = 5492 sas_attach_transport(&mpi3mr_transport_functions); 5493 if (!mpi3mr_transport_template) { 5494 pr_err("%s failed to load due to sas transport attach failure\n", 5495 MPI3MR_DRIVER_NAME); 5496 return -ENODEV; 5497 } 5498 5499 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5500 if (ret_val) { 5501 pr_err("%s failed to load due to pci register driver failure\n", 5502 MPI3MR_DRIVER_NAME); 5503 goto err_pci_reg_fail; 5504 } 5505 5506 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5507 &driver_attr_event_counter); 5508 if (ret_val) 5509 goto err_event_counter; 5510 5511 return ret_val; 5512 5513 err_event_counter: 5514 pci_unregister_driver(&mpi3mr_pci_driver); 5515 5516 err_pci_reg_fail: 5517 sas_release_transport(mpi3mr_transport_template); 5518 return ret_val; 5519 } 5520 5521 static void __exit mpi3mr_exit(void) 5522 { 5523 if (warn_non_secure_ctlr) 5524 pr_warn( 5525 "Unloading %s version %s while managing a non secure controller\n", 5526 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5527 else 5528 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5529 MPI3MR_DRIVER_VERSION); 5530 5531 driver_remove_file(&mpi3mr_pci_driver.driver, 5532 &driver_attr_event_counter); 5533 pci_unregister_driver(&mpi3mr_pci_driver); 5534 sas_release_transport(mpi3mr_transport_template); 5535 ida_destroy(&mrioc_ida); 5536 } 5537 5538 module_init(mpi3mr_init); 5539 module_exit(mpi3mr_exit); 5540