1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 atomic64_t event_counter; 18 19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 22 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 23 24 /* Module parameters*/ 25 int prot_mask = -1; 26 module_param(prot_mask, int, 0); 27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 28 29 static int prot_guard_mask = 3; 30 module_param(prot_guard_mask, int, 0); 31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 32 static int logging_level; 33 module_param(logging_level, int, 0); 34 MODULE_PARM_DESC(logging_level, 35 " bits for enabling additional logging info (default=0)"); 36 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 37 module_param(max_sgl_entries, int, 0444); 38 MODULE_PARM_DESC(max_sgl_entries, 39 "Preferred max number of SG entries to be used for a single I/O\n" 40 "The actual value will be determined by the driver\n" 41 "(Minimum=256, Maximum=2048, default=256)"); 42 43 /* Forward declarations*/ 44 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 45 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 46 47 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 48 49 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 50 51 /** 52 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 53 * @mrioc: Adapter instance reference 54 * @scmd: SCSI command reference 55 * 56 * Calculate the host tag based on block tag for a given scmd. 57 * 58 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 59 */ 60 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 61 struct scsi_cmnd *scmd) 62 { 63 struct scmd_priv *priv = NULL; 64 u32 unique_tag; 65 u16 host_tag, hw_queue; 66 67 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 68 69 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 70 if (hw_queue >= mrioc->num_op_reply_q) 71 return MPI3MR_HOSTTAG_INVALID; 72 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 73 74 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 75 return MPI3MR_HOSTTAG_INVALID; 76 77 priv = scsi_cmd_priv(scmd); 78 /*host_tag 0 is invalid hence incrementing by 1*/ 79 priv->host_tag = host_tag + 1; 80 priv->scmd = scmd; 81 priv->in_lld_scope = 1; 82 priv->req_q_idx = hw_queue; 83 priv->meta_chain_idx = -1; 84 priv->chain_idx = -1; 85 priv->meta_sg_valid = 0; 86 return priv->host_tag; 87 } 88 89 /** 90 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 91 * @mrioc: Adapter instance reference 92 * @host_tag: Host tag 93 * @qidx: Operational queue index 94 * 95 * Identify the block tag from the host tag and queue index and 96 * retrieve associated scsi command using scsi_host_find_tag(). 97 * 98 * Return: SCSI command reference or NULL. 99 */ 100 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 101 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 102 { 103 struct scsi_cmnd *scmd = NULL; 104 struct scmd_priv *priv = NULL; 105 u32 unique_tag = host_tag - 1; 106 107 if (WARN_ON(host_tag > mrioc->max_host_ios)) 108 goto out; 109 110 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 111 112 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 113 if (scmd) { 114 priv = scsi_cmd_priv(scmd); 115 if (!priv->in_lld_scope) 116 scmd = NULL; 117 } 118 out: 119 return scmd; 120 } 121 122 /** 123 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 124 * @mrioc: Adapter instance reference 125 * @scmd: SCSI command reference 126 * 127 * Invalidate the SCSI command private data to mark the command 128 * is not in LLD scope anymore. 129 * 130 * Return: Nothing. 131 */ 132 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 133 struct scsi_cmnd *scmd) 134 { 135 struct scmd_priv *priv = NULL; 136 137 priv = scsi_cmd_priv(scmd); 138 139 if (WARN_ON(priv->in_lld_scope == 0)) 140 return; 141 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 142 priv->req_q_idx = 0xFFFF; 143 priv->scmd = NULL; 144 priv->in_lld_scope = 0; 145 priv->meta_sg_valid = 0; 146 if (priv->chain_idx >= 0) { 147 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 148 priv->chain_idx = -1; 149 } 150 if (priv->meta_chain_idx >= 0) { 151 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 152 priv->meta_chain_idx = -1; 153 } 154 } 155 156 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 157 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 158 static void mpi3mr_fwevt_worker(struct work_struct *work); 159 160 /** 161 * mpi3mr_fwevt_free - firmware event memory dealloctor 162 * @r: k reference pointer of the firmware event 163 * 164 * Free firmware event memory when no reference. 165 */ 166 static void mpi3mr_fwevt_free(struct kref *r) 167 { 168 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 169 } 170 171 /** 172 * mpi3mr_fwevt_get - k reference incrementor 173 * @fwevt: Firmware event reference 174 * 175 * Increment firmware event reference count. 176 */ 177 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 178 { 179 kref_get(&fwevt->ref_count); 180 } 181 182 /** 183 * mpi3mr_fwevt_put - k reference decrementor 184 * @fwevt: Firmware event reference 185 * 186 * decrement firmware event reference count. 187 */ 188 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 189 { 190 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 191 } 192 193 /** 194 * mpi3mr_alloc_fwevt - Allocate firmware event 195 * @len: length of firmware event data to allocate 196 * 197 * Allocate firmware event with required length and initialize 198 * the reference counter. 199 * 200 * Return: firmware event reference. 201 */ 202 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 203 { 204 struct mpi3mr_fwevt *fwevt; 205 206 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 207 if (!fwevt) 208 return NULL; 209 210 kref_init(&fwevt->ref_count); 211 return fwevt; 212 } 213 214 /** 215 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 216 * @mrioc: Adapter instance reference 217 * @fwevt: Firmware event reference 218 * 219 * Add the given firmware event to the firmware event list. 220 * 221 * Return: Nothing. 222 */ 223 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 224 struct mpi3mr_fwevt *fwevt) 225 { 226 unsigned long flags; 227 228 if (!mrioc->fwevt_worker_thread) 229 return; 230 231 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 232 /* get fwevt reference count while adding it to fwevt_list */ 233 mpi3mr_fwevt_get(fwevt); 234 INIT_LIST_HEAD(&fwevt->list); 235 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 236 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 237 /* get fwevt reference count while enqueueing it to worker queue */ 238 mpi3mr_fwevt_get(fwevt); 239 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 240 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 241 } 242 243 /** 244 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 245 * @mrioc: Adapter instance reference 246 * @fwevt: Firmware event reference 247 * 248 * Delete the given firmware event from the firmware event list. 249 * 250 * Return: Nothing. 251 */ 252 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 253 struct mpi3mr_fwevt *fwevt) 254 { 255 unsigned long flags; 256 257 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 258 if (!list_empty(&fwevt->list)) { 259 list_del_init(&fwevt->list); 260 /* 261 * Put fwevt reference count after 262 * removing it from fwevt_list 263 */ 264 mpi3mr_fwevt_put(fwevt); 265 } 266 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 267 } 268 269 /** 270 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 271 * @mrioc: Adapter instance reference 272 * 273 * Dequeue a firmware event from the firmware event list. 274 * 275 * Return: firmware event. 276 */ 277 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 278 struct mpi3mr_ioc *mrioc) 279 { 280 unsigned long flags; 281 struct mpi3mr_fwevt *fwevt = NULL; 282 283 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 284 if (!list_empty(&mrioc->fwevt_list)) { 285 fwevt = list_first_entry(&mrioc->fwevt_list, 286 struct mpi3mr_fwevt, list); 287 list_del_init(&fwevt->list); 288 /* 289 * Put fwevt reference count after 290 * removing it from fwevt_list 291 */ 292 mpi3mr_fwevt_put(fwevt); 293 } 294 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 295 296 return fwevt; 297 } 298 299 /** 300 * mpi3mr_cancel_work - cancel firmware event 301 * @fwevt: fwevt object which needs to be canceled 302 * 303 * Return: Nothing. 304 */ 305 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 306 { 307 /* 308 * Wait on the fwevt to complete. If this returns 1, then 309 * the event was never executed. 310 * 311 * If it did execute, we wait for it to finish, and the put will 312 * happen from mpi3mr_process_fwevt() 313 */ 314 if (cancel_work_sync(&fwevt->work)) { 315 /* 316 * Put fwevt reference count after 317 * dequeuing it from worker queue 318 */ 319 mpi3mr_fwevt_put(fwevt); 320 /* 321 * Put fwevt reference count to neutralize 322 * kref_init increment 323 */ 324 mpi3mr_fwevt_put(fwevt); 325 } 326 } 327 328 /** 329 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 330 * @mrioc: Adapter instance reference 331 * 332 * Flush all pending firmware events from the firmware event 333 * list. 334 * 335 * Return: Nothing. 336 */ 337 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 338 { 339 struct mpi3mr_fwevt *fwevt = NULL; 340 341 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 342 !mrioc->fwevt_worker_thread) 343 return; 344 345 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 346 mpi3mr_cancel_work(fwevt); 347 348 if (mrioc->current_event) { 349 fwevt = mrioc->current_event; 350 /* 351 * Don't call cancel_work_sync() API for the 352 * fwevt work if the controller reset is 353 * get called as part of processing the 354 * same fwevt work (or) when worker thread is 355 * waiting for device add/remove APIs to complete. 356 * Otherwise we will see deadlock. 357 */ 358 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 359 fwevt->discard = 1; 360 return; 361 } 362 363 mpi3mr_cancel_work(fwevt); 364 } 365 } 366 367 /** 368 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 369 * @mrioc: Adapter instance reference 370 * @tg: Throttle group information pointer 371 * 372 * Accessor to queue on synthetically generated driver event to 373 * the event worker thread, the driver event will be used to 374 * reduce the QD of all VDs in the TG from the worker thread. 375 * 376 * Return: None. 377 */ 378 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 379 struct mpi3mr_throttle_group_info *tg) 380 { 381 struct mpi3mr_fwevt *fwevt; 382 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 383 384 /* 385 * If the QD reduction event is already queued due to throttle and if 386 * the QD is not restored through device info change event 387 * then dont queue further reduction events 388 */ 389 if (tg->fw_qd != tg->modified_qd) 390 return; 391 392 fwevt = mpi3mr_alloc_fwevt(sz); 393 if (!fwevt) { 394 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 395 return; 396 } 397 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 398 fwevt->mrioc = mrioc; 399 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 400 fwevt->send_ack = 0; 401 fwevt->process_evt = 1; 402 fwevt->evt_ctx = 0; 403 fwevt->event_data_size = sz; 404 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 405 406 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 407 tg->id); 408 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 409 } 410 411 /** 412 * mpi3mr_invalidate_devhandles -Invalidate device handles 413 * @mrioc: Adapter instance reference 414 * 415 * Invalidate the device handles in the target device structures 416 * . Called post reset prior to reinitializing the controller. 417 * 418 * Return: Nothing. 419 */ 420 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 421 { 422 struct mpi3mr_tgt_dev *tgtdev; 423 struct mpi3mr_stgt_priv_data *tgt_priv; 424 425 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 426 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 427 if (tgtdev->starget && tgtdev->starget->hostdata) { 428 tgt_priv = tgtdev->starget->hostdata; 429 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 430 tgt_priv->io_throttle_enabled = 0; 431 tgt_priv->io_divert = 0; 432 tgt_priv->throttle_group = NULL; 433 tgt_priv->wslen = 0; 434 if (tgtdev->host_exposed) 435 atomic_set(&tgt_priv->block_io, 1); 436 } 437 } 438 } 439 440 /** 441 * mpi3mr_print_scmd - print individual SCSI command 442 * @rq: Block request 443 * @data: Adapter instance reference 444 * 445 * Print the SCSI command details if it is in LLD scope. 446 * 447 * Return: true always. 448 */ 449 static bool mpi3mr_print_scmd(struct request *rq, void *data) 450 { 451 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 452 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 453 struct scmd_priv *priv = NULL; 454 455 if (scmd) { 456 priv = scsi_cmd_priv(scmd); 457 if (!priv->in_lld_scope) 458 goto out; 459 460 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 461 __func__, priv->host_tag, priv->req_q_idx + 1); 462 scsi_print_command(scmd); 463 } 464 465 out: 466 return(true); 467 } 468 469 /** 470 * mpi3mr_flush_scmd - Flush individual SCSI command 471 * @rq: Block request 472 * @data: Adapter instance reference 473 * 474 * Return the SCSI command to the upper layers if it is in LLD 475 * scope. 476 * 477 * Return: true always. 478 */ 479 480 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 481 { 482 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 483 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 484 struct scmd_priv *priv = NULL; 485 486 if (scmd) { 487 priv = scsi_cmd_priv(scmd); 488 if (!priv->in_lld_scope) 489 goto out; 490 491 if (priv->meta_sg_valid) 492 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 493 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 494 mpi3mr_clear_scmd_priv(mrioc, scmd); 495 scsi_dma_unmap(scmd); 496 scmd->result = DID_RESET << 16; 497 scsi_print_command(scmd); 498 scsi_done(scmd); 499 mrioc->flush_io_count++; 500 } 501 502 out: 503 return(true); 504 } 505 506 /** 507 * mpi3mr_count_dev_pending - Count commands pending for a lun 508 * @rq: Block request 509 * @data: SCSI device reference 510 * 511 * This is an iterator function called for each SCSI command in 512 * a host and if the command is pending in the LLD for the 513 * specific device(lun) then device specific pending I/O counter 514 * is updated in the device structure. 515 * 516 * Return: true always. 517 */ 518 519 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 520 { 521 struct scsi_device *sdev = (struct scsi_device *)data; 522 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 523 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 524 struct scmd_priv *priv; 525 526 if (scmd) { 527 priv = scsi_cmd_priv(scmd); 528 if (!priv->in_lld_scope) 529 goto out; 530 if (scmd->device == sdev) 531 sdev_priv_data->pend_count++; 532 } 533 534 out: 535 return true; 536 } 537 538 /** 539 * mpi3mr_count_tgt_pending - Count commands pending for target 540 * @rq: Block request 541 * @data: SCSI target reference 542 * 543 * This is an iterator function called for each SCSI command in 544 * a host and if the command is pending in the LLD for the 545 * specific target then target specific pending I/O counter is 546 * updated in the target structure. 547 * 548 * Return: true always. 549 */ 550 551 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 552 { 553 struct scsi_target *starget = (struct scsi_target *)data; 554 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 555 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 556 struct scmd_priv *priv; 557 558 if (scmd) { 559 priv = scsi_cmd_priv(scmd); 560 if (!priv->in_lld_scope) 561 goto out; 562 if (scmd->device && (scsi_target(scmd->device) == starget)) 563 stgt_priv_data->pend_count++; 564 } 565 566 out: 567 return true; 568 } 569 570 /** 571 * mpi3mr_flush_host_io - Flush host I/Os 572 * @mrioc: Adapter instance reference 573 * 574 * Flush all of the pending I/Os by calling 575 * blk_mq_tagset_busy_iter() for each possible tag. This is 576 * executed post controller reset 577 * 578 * Return: Nothing. 579 */ 580 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 581 { 582 struct Scsi_Host *shost = mrioc->shost; 583 584 mrioc->flush_io_count = 0; 585 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 586 blk_mq_tagset_busy_iter(&shost->tag_set, 587 mpi3mr_flush_scmd, (void *)mrioc); 588 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 589 mrioc->flush_io_count); 590 } 591 592 /** 593 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 594 * @mrioc: Adapter instance reference 595 * 596 * This function waits for currently running IO poll threads to 597 * exit and then flushes all host I/Os and any internal pending 598 * cmds. This is executed after controller is marked as 599 * unrecoverable. 600 * 601 * Return: Nothing. 602 */ 603 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 604 { 605 struct Scsi_Host *shost = mrioc->shost; 606 int i; 607 608 if (!mrioc->unrecoverable) 609 return; 610 611 if (mrioc->op_reply_qinfo) { 612 for (i = 0; i < mrioc->num_queues; i++) { 613 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 614 udelay(500); 615 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 616 } 617 } 618 mrioc->flush_io_count = 0; 619 blk_mq_tagset_busy_iter(&shost->tag_set, 620 mpi3mr_flush_scmd, (void *)mrioc); 621 mpi3mr_flush_delayed_cmd_lists(mrioc); 622 mpi3mr_flush_drv_cmds(mrioc); 623 } 624 625 /** 626 * mpi3mr_alloc_tgtdev - target device allocator 627 * 628 * Allocate target device instance and initialize the reference 629 * count 630 * 631 * Return: target device instance. 632 */ 633 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 634 { 635 struct mpi3mr_tgt_dev *tgtdev; 636 637 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 638 if (!tgtdev) 639 return NULL; 640 kref_init(&tgtdev->ref_count); 641 return tgtdev; 642 } 643 644 /** 645 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 646 * @mrioc: Adapter instance reference 647 * @tgtdev: Target device 648 * 649 * Add the target device to the target device list 650 * 651 * Return: Nothing. 652 */ 653 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 654 struct mpi3mr_tgt_dev *tgtdev) 655 { 656 unsigned long flags; 657 658 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 659 mpi3mr_tgtdev_get(tgtdev); 660 INIT_LIST_HEAD(&tgtdev->list); 661 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 662 tgtdev->state = MPI3MR_DEV_CREATED; 663 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 664 } 665 666 /** 667 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 668 * @mrioc: Adapter instance reference 669 * @tgtdev: Target device 670 * @must_delete: Must delete the target device from the list irrespective 671 * of the device state. 672 * 673 * Remove the target device from the target device list 674 * 675 * Return: Nothing. 676 */ 677 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 678 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 679 { 680 unsigned long flags; 681 682 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 683 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 684 if (!list_empty(&tgtdev->list)) { 685 list_del_init(&tgtdev->list); 686 tgtdev->state = MPI3MR_DEV_DELETED; 687 mpi3mr_tgtdev_put(tgtdev); 688 } 689 } 690 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 691 } 692 693 /** 694 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 695 * @mrioc: Adapter instance reference 696 * @handle: Device handle 697 * 698 * Accessor to retrieve target device from the device handle. 699 * Non Lock version 700 * 701 * Return: Target device reference. 702 */ 703 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 704 struct mpi3mr_ioc *mrioc, u16 handle) 705 { 706 struct mpi3mr_tgt_dev *tgtdev; 707 708 assert_spin_locked(&mrioc->tgtdev_lock); 709 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 710 if (tgtdev->dev_handle == handle) 711 goto found_tgtdev; 712 return NULL; 713 714 found_tgtdev: 715 mpi3mr_tgtdev_get(tgtdev); 716 return tgtdev; 717 } 718 719 /** 720 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 721 * @mrioc: Adapter instance reference 722 * @handle: Device handle 723 * 724 * Accessor to retrieve target device from the device handle. 725 * Lock version 726 * 727 * Return: Target device reference. 728 */ 729 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 730 struct mpi3mr_ioc *mrioc, u16 handle) 731 { 732 struct mpi3mr_tgt_dev *tgtdev; 733 unsigned long flags; 734 735 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 736 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 737 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 738 return tgtdev; 739 } 740 741 /** 742 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 743 * @mrioc: Adapter instance reference 744 * @persist_id: Persistent ID 745 * 746 * Accessor to retrieve target device from the Persistent ID. 747 * Non Lock version 748 * 749 * Return: Target device reference. 750 */ 751 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 752 struct mpi3mr_ioc *mrioc, u16 persist_id) 753 { 754 struct mpi3mr_tgt_dev *tgtdev; 755 756 assert_spin_locked(&mrioc->tgtdev_lock); 757 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 758 if (tgtdev->perst_id == persist_id) 759 goto found_tgtdev; 760 return NULL; 761 762 found_tgtdev: 763 mpi3mr_tgtdev_get(tgtdev); 764 return tgtdev; 765 } 766 767 /** 768 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 769 * @mrioc: Adapter instance reference 770 * @persist_id: Persistent ID 771 * 772 * Accessor to retrieve target device from the Persistent ID. 773 * Lock version 774 * 775 * Return: Target device reference. 776 */ 777 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 778 struct mpi3mr_ioc *mrioc, u16 persist_id) 779 { 780 struct mpi3mr_tgt_dev *tgtdev; 781 unsigned long flags; 782 783 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 784 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 785 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 786 return tgtdev; 787 } 788 789 /** 790 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 791 * @mrioc: Adapter instance reference 792 * @tgt_priv: Target private data 793 * 794 * Accessor to return target device from the target private 795 * data. Non Lock version 796 * 797 * Return: Target device reference. 798 */ 799 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 800 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 801 { 802 struct mpi3mr_tgt_dev *tgtdev; 803 804 assert_spin_locked(&mrioc->tgtdev_lock); 805 tgtdev = tgt_priv->tgt_dev; 806 if (tgtdev) 807 mpi3mr_tgtdev_get(tgtdev); 808 return tgtdev; 809 } 810 811 /** 812 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 813 * @mrioc: Adapter instance reference 814 * @tg: Throttle group information pointer 815 * @divert_value: 1 or 0 816 * 817 * Accessor to set io_divert flag for each device associated 818 * with the given throttle group with the given value. 819 * 820 * Return: None. 821 */ 822 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 823 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 824 { 825 unsigned long flags; 826 struct mpi3mr_tgt_dev *tgtdev; 827 struct mpi3mr_stgt_priv_data *tgt_priv; 828 829 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 830 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 831 if (tgtdev->starget && tgtdev->starget->hostdata) { 832 tgt_priv = tgtdev->starget->hostdata; 833 if (tgt_priv->throttle_group == tg) 834 tgt_priv->io_divert = divert_value; 835 } 836 } 837 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 838 } 839 840 /** 841 * mpi3mr_print_device_event_notice - print notice related to post processing of 842 * device event after controller reset. 843 * 844 * @mrioc: Adapter instance reference 845 * @device_add: true for device add event and false for device removal event 846 * 847 * Return: None. 848 */ 849 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 850 bool device_add) 851 { 852 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 853 (device_add ? "addition" : "removal")); 854 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 855 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 856 } 857 858 /** 859 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 860 * @mrioc: Adapter instance reference 861 * @tgtdev: Target device structure 862 * 863 * Checks whether the device is exposed to upper layers and if it 864 * is then remove the device from upper layers by calling 865 * scsi_remove_target(). 866 * 867 * Return: 0 on success, non zero on failure. 868 */ 869 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 870 struct mpi3mr_tgt_dev *tgtdev) 871 { 872 struct mpi3mr_stgt_priv_data *tgt_priv; 873 874 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 875 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 876 if (tgtdev->starget && tgtdev->starget->hostdata) { 877 tgt_priv = tgtdev->starget->hostdata; 878 atomic_set(&tgt_priv->block_io, 0); 879 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 880 } 881 882 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 883 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 884 if (tgtdev->starget) { 885 if (mrioc->current_event) 886 mrioc->current_event->pending_at_sml = 1; 887 scsi_remove_target(&tgtdev->starget->dev); 888 tgtdev->host_exposed = 0; 889 if (mrioc->current_event) { 890 mrioc->current_event->pending_at_sml = 0; 891 if (mrioc->current_event->discard) { 892 mpi3mr_print_device_event_notice(mrioc, 893 false); 894 return; 895 } 896 } 897 } 898 } else 899 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 900 901 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 902 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 903 } 904 905 /** 906 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 907 * @mrioc: Adapter instance reference 908 * @perst_id: Persistent ID of the device 909 * 910 * Checks whether the device can be exposed to upper layers and 911 * if it is not then expose the device to upper layers by 912 * calling scsi_scan_target(). 913 * 914 * Return: 0 on success, non zero on failure. 915 */ 916 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 917 u16 perst_id) 918 { 919 int retval = 0; 920 struct mpi3mr_tgt_dev *tgtdev; 921 922 if (mrioc->reset_in_progress) 923 return -1; 924 925 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 926 if (!tgtdev) { 927 retval = -1; 928 goto out; 929 } 930 if (tgtdev->is_hidden || tgtdev->host_exposed) { 931 retval = -1; 932 goto out; 933 } 934 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 935 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 936 tgtdev->host_exposed = 1; 937 if (mrioc->current_event) 938 mrioc->current_event->pending_at_sml = 1; 939 scsi_scan_target(&mrioc->shost->shost_gendev, 940 mrioc->scsi_device_channel, tgtdev->perst_id, 941 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 942 if (!tgtdev->starget) 943 tgtdev->host_exposed = 0; 944 if (mrioc->current_event) { 945 mrioc->current_event->pending_at_sml = 0; 946 if (mrioc->current_event->discard) { 947 mpi3mr_print_device_event_notice(mrioc, true); 948 goto out; 949 } 950 } 951 } else 952 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 953 out: 954 if (tgtdev) 955 mpi3mr_tgtdev_put(tgtdev); 956 957 return retval; 958 } 959 960 /** 961 * mpi3mr_change_queue_depth- Change QD callback handler 962 * @sdev: SCSI device reference 963 * @q_depth: Queue depth 964 * 965 * Validate and limit QD and call scsi_change_queue_depth. 966 * 967 * Return: return value of scsi_change_queue_depth 968 */ 969 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 970 int q_depth) 971 { 972 struct scsi_target *starget = scsi_target(sdev); 973 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 974 int retval = 0; 975 976 if (!sdev->tagged_supported) 977 q_depth = 1; 978 if (q_depth > shost->can_queue) 979 q_depth = shost->can_queue; 980 else if (!q_depth) 981 q_depth = MPI3MR_DEFAULT_SDEV_QD; 982 retval = scsi_change_queue_depth(sdev, q_depth); 983 sdev->max_queue_depth = sdev->queue_depth; 984 985 return retval; 986 } 987 988 /** 989 * mpi3mr_update_sdev - Update SCSI device information 990 * @sdev: SCSI device reference 991 * @data: target device reference 992 * 993 * This is an iterator function called for each SCSI device in a 994 * target to update the target specific information into each 995 * SCSI device. 996 * 997 * Return: Nothing. 998 */ 999 static void 1000 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1001 { 1002 struct mpi3mr_tgt_dev *tgtdev; 1003 1004 tgtdev = (struct mpi3mr_tgt_dev *)data; 1005 if (!tgtdev) 1006 return; 1007 1008 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1009 switch (tgtdev->dev_type) { 1010 case MPI3_DEVICE_DEVFORM_PCIE: 1011 /*The block layer hw sector size = 512*/ 1012 if ((tgtdev->dev_spec.pcie_inf.dev_info & 1013 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1014 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 1015 blk_queue_max_hw_sectors(sdev->request_queue, 1016 tgtdev->dev_spec.pcie_inf.mdts / 512); 1017 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 1018 blk_queue_virt_boundary(sdev->request_queue, 1019 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 1020 else 1021 blk_queue_virt_boundary(sdev->request_queue, 1022 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 1023 } 1024 break; 1025 default: 1026 break; 1027 } 1028 } 1029 1030 /** 1031 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 1032 * @mrioc: Adapter instance reference 1033 * 1034 * This is executed post controller reset to identify any 1035 * missing devices during reset and remove from the upper layers 1036 * or expose any newly detected device to the upper layers. 1037 * 1038 * Return: Nothing. 1039 */ 1040 1041 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1042 { 1043 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1044 struct mpi3mr_stgt_priv_data *tgt_priv; 1045 1046 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1047 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1048 list) { 1049 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && 1050 tgtdev->is_hidden && 1051 tgtdev->host_exposed && tgtdev->starget && 1052 tgtdev->starget->hostdata) { 1053 tgt_priv = tgtdev->starget->hostdata; 1054 tgt_priv->dev_removed = 1; 1055 atomic_set(&tgt_priv->block_io, 0); 1056 } 1057 } 1058 1059 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1060 list) { 1061 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1062 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1063 tgtdev->perst_id); 1064 if (tgtdev->host_exposed) 1065 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1066 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1067 mpi3mr_tgtdev_put(tgtdev); 1068 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1069 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1070 tgtdev->perst_id); 1071 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1072 } 1073 } 1074 1075 tgtdev = NULL; 1076 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1077 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1078 !tgtdev->is_hidden) { 1079 if (!tgtdev->host_exposed) 1080 mpi3mr_report_tgtdev_to_host(mrioc, 1081 tgtdev->perst_id); 1082 else if (tgtdev->starget) 1083 starget_for_each_device(tgtdev->starget, 1084 (void *)tgtdev, mpi3mr_update_sdev); 1085 } 1086 } 1087 } 1088 1089 /** 1090 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1091 * @mrioc: Adapter instance reference 1092 * @tgtdev: Target device internal structure 1093 * @dev_pg0: New device page0 1094 * @is_added: Flag to indicate the device is just added 1095 * 1096 * Update the information from the device page0 into the driver 1097 * cached target device structure. 1098 * 1099 * Return: Nothing. 1100 */ 1101 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1102 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1103 bool is_added) 1104 { 1105 u16 flags = 0; 1106 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1107 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1108 u8 prot_mask = 0; 1109 1110 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1111 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1112 tgtdev->dev_type = dev_pg0->device_form; 1113 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1114 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1115 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1116 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1117 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1118 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1119 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1120 1121 if (tgtdev->encl_handle) 1122 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1123 tgtdev->encl_handle); 1124 if (enclosure_dev) 1125 tgtdev->enclosure_logical_id = le64_to_cpu( 1126 enclosure_dev->pg0.enclosure_logical_id); 1127 1128 flags = tgtdev->devpg0_flag; 1129 1130 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1131 1132 if (is_added == true) 1133 tgtdev->io_throttle_enabled = 1134 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1135 1136 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1137 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1138 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1139 break; 1140 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1141 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1142 break; 1143 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1144 default: 1145 tgtdev->wslen = 0; 1146 break; 1147 } 1148 1149 if (tgtdev->starget && tgtdev->starget->hostdata) { 1150 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1151 tgtdev->starget->hostdata; 1152 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1153 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1154 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1155 scsi_tgt_priv_data->io_throttle_enabled = 1156 tgtdev->io_throttle_enabled; 1157 if (is_added == true) 1158 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1159 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1160 } 1161 1162 switch (dev_pg0->access_status) { 1163 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1164 case MPI3_DEVICE0_ASTATUS_PREPARE: 1165 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1166 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1167 break; 1168 default: 1169 tgtdev->is_hidden = 1; 1170 break; 1171 } 1172 1173 switch (tgtdev->dev_type) { 1174 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1175 { 1176 struct mpi3_device0_sas_sata_format *sasinf = 1177 &dev_pg0->device_specific.sas_sata_format; 1178 u16 dev_info = le16_to_cpu(sasinf->device_info); 1179 1180 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1181 tgtdev->dev_spec.sas_sata_inf.sas_address = 1182 le64_to_cpu(sasinf->sas_address); 1183 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1184 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1185 sasinf->attached_phy_identifier; 1186 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1187 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1188 tgtdev->is_hidden = 1; 1189 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1190 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1191 tgtdev->is_hidden = 1; 1192 1193 if (((tgtdev->devpg0_flag & 1194 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1195 && (tgtdev->devpg0_flag & 1196 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1197 (tgtdev->parent_handle == 0xFFFF)) 1198 tgtdev->non_stl = 1; 1199 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1200 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1201 dev_pg0->io_unit_port; 1202 break; 1203 } 1204 case MPI3_DEVICE_DEVFORM_PCIE: 1205 { 1206 struct mpi3_device0_pcie_format *pcieinf = 1207 &dev_pg0->device_specific.pcie_format; 1208 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1209 1210 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1211 tgtdev->dev_spec.pcie_inf.capb = 1212 le32_to_cpu(pcieinf->capabilities); 1213 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1214 /* 2^12 = 4096 */ 1215 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1216 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1217 tgtdev->dev_spec.pcie_inf.mdts = 1218 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1219 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1220 tgtdev->dev_spec.pcie_inf.reset_to = 1221 max_t(u8, pcieinf->controller_reset_to, 1222 MPI3MR_INTADMCMD_TIMEOUT); 1223 tgtdev->dev_spec.pcie_inf.abort_to = 1224 max_t(u8, pcieinf->nvme_abort_to, 1225 MPI3MR_INTADMCMD_TIMEOUT); 1226 } 1227 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1228 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1229 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1230 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1231 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1232 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1233 tgtdev->is_hidden = 1; 1234 tgtdev->non_stl = 1; 1235 if (!mrioc->shost) 1236 break; 1237 prot_mask = scsi_host_get_prot(mrioc->shost); 1238 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1239 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1240 ioc_info(mrioc, 1241 "%s : Disabling DIX0 prot capability\n", __func__); 1242 ioc_info(mrioc, 1243 "because HBA does not support DIX0 operation on NVME drives\n"); 1244 } 1245 break; 1246 } 1247 case MPI3_DEVICE_DEVFORM_VD: 1248 { 1249 struct mpi3_device0_vd_format *vdinf = 1250 &dev_pg0->device_specific.vd_format; 1251 struct mpi3mr_throttle_group_info *tg = NULL; 1252 u16 vdinf_io_throttle_group = 1253 le16_to_cpu(vdinf->io_throttle_group); 1254 1255 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1256 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1257 tgtdev->is_hidden = 1; 1258 tgtdev->non_stl = 1; 1259 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1260 tgtdev->dev_spec.vd_inf.tg_high = 1261 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1262 tgtdev->dev_spec.vd_inf.tg_low = 1263 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1264 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1265 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1266 tg->id = vdinf_io_throttle_group; 1267 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1268 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1269 tg->qd_reduction = 1270 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1271 if (is_added == true) 1272 tg->fw_qd = tgtdev->q_depth; 1273 tg->modified_qd = tgtdev->q_depth; 1274 } 1275 tgtdev->dev_spec.vd_inf.tg = tg; 1276 if (scsi_tgt_priv_data) 1277 scsi_tgt_priv_data->throttle_group = tg; 1278 break; 1279 } 1280 default: 1281 break; 1282 } 1283 } 1284 1285 /** 1286 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1287 * @mrioc: Adapter instance reference 1288 * @fwevt: Firmware event information. 1289 * 1290 * Process Device status Change event and based on device's new 1291 * information, either expose the device to the upper layers, or 1292 * remove the device from upper layers. 1293 * 1294 * Return: Nothing. 1295 */ 1296 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1297 struct mpi3mr_fwevt *fwevt) 1298 { 1299 u16 dev_handle = 0; 1300 u8 uhide = 0, delete = 0, cleanup = 0; 1301 struct mpi3mr_tgt_dev *tgtdev = NULL; 1302 struct mpi3_event_data_device_status_change *evtdata = 1303 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1304 1305 dev_handle = le16_to_cpu(evtdata->dev_handle); 1306 ioc_info(mrioc, 1307 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1308 __func__, dev_handle, evtdata->reason_code); 1309 switch (evtdata->reason_code) { 1310 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1311 delete = 1; 1312 break; 1313 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1314 uhide = 1; 1315 break; 1316 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1317 delete = 1; 1318 cleanup = 1; 1319 break; 1320 default: 1321 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1322 evtdata->reason_code); 1323 break; 1324 } 1325 1326 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1327 if (!tgtdev) 1328 goto out; 1329 if (uhide) { 1330 tgtdev->is_hidden = 0; 1331 if (!tgtdev->host_exposed) 1332 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1333 } 1334 1335 if (delete) 1336 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1337 1338 if (cleanup) { 1339 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1340 mpi3mr_tgtdev_put(tgtdev); 1341 } 1342 1343 out: 1344 if (tgtdev) 1345 mpi3mr_tgtdev_put(tgtdev); 1346 } 1347 1348 /** 1349 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1350 * @mrioc: Adapter instance reference 1351 * @dev_pg0: New device page0 1352 * 1353 * Process Device Info Change event and based on device's new 1354 * information, either expose the device to the upper layers, or 1355 * remove the device from upper layers or update the details of 1356 * the device. 1357 * 1358 * Return: Nothing. 1359 */ 1360 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1361 struct mpi3_device_page0 *dev_pg0) 1362 { 1363 struct mpi3mr_tgt_dev *tgtdev = NULL; 1364 u16 dev_handle = 0, perst_id = 0; 1365 1366 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1367 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1368 ioc_info(mrioc, 1369 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1370 __func__, dev_handle, perst_id); 1371 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1372 if (!tgtdev) 1373 goto out; 1374 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1375 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1376 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1377 if (tgtdev->is_hidden && tgtdev->host_exposed) 1378 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1379 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1380 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1381 mpi3mr_update_sdev); 1382 out: 1383 if (tgtdev) 1384 mpi3mr_tgtdev_put(tgtdev); 1385 } 1386 1387 /** 1388 * mpi3mr_free_enclosure_list - release enclosures 1389 * @mrioc: Adapter instance reference 1390 * 1391 * Free memory allocated during encloure add. 1392 * 1393 * Return nothing. 1394 */ 1395 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1396 { 1397 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1398 1399 list_for_each_entry_safe(enclosure_dev, 1400 enclosure_dev_next, &mrioc->enclosure_list, list) { 1401 list_del(&enclosure_dev->list); 1402 kfree(enclosure_dev); 1403 } 1404 } 1405 1406 /** 1407 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1408 * @mrioc: Adapter instance reference 1409 * @handle: Firmware device handle of the enclosure 1410 * 1411 * This searches for enclosure device based on handle, then returns the 1412 * enclosure object. 1413 * 1414 * Return: Enclosure object reference or NULL 1415 */ 1416 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1417 struct mpi3mr_ioc *mrioc, u16 handle) 1418 { 1419 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1420 1421 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1422 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1423 continue; 1424 r = enclosure_dev; 1425 goto out; 1426 } 1427 out: 1428 return r; 1429 } 1430 1431 /** 1432 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1433 * @mrioc: Adapter instance reference 1434 * @encl_pg0: Enclosure page 0. 1435 * @is_added: Added event or not 1436 * 1437 * Return nothing. 1438 */ 1439 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1440 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1441 { 1442 char *reason_str = NULL; 1443 1444 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1445 return; 1446 1447 if (is_added) 1448 reason_str = "enclosure added"; 1449 else 1450 reason_str = "enclosure dev status changed"; 1451 1452 ioc_info(mrioc, 1453 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1454 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1455 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1456 ioc_info(mrioc, 1457 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1458 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1459 le16_to_cpu(encl_pg0->flags), 1460 ((le16_to_cpu(encl_pg0->flags) & 1461 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1462 } 1463 1464 /** 1465 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1466 * @mrioc: Adapter instance reference 1467 * @fwevt: Firmware event reference 1468 * 1469 * Prints information about the Enclosure device status or 1470 * Enclosure add events if logging is enabled and add or remove 1471 * the enclosure from the controller's internal list of 1472 * enclosures. 1473 * 1474 * Return: Nothing. 1475 */ 1476 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1477 struct mpi3mr_fwevt *fwevt) 1478 { 1479 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1480 struct mpi3_enclosure_page0 *encl_pg0; 1481 u16 encl_handle; 1482 u8 added, present; 1483 1484 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1485 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1486 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1487 1488 1489 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1490 present = ((le16_to_cpu(encl_pg0->flags) & 1491 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1492 1493 if (encl_handle) 1494 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1495 encl_handle); 1496 if (!enclosure_dev && present) { 1497 enclosure_dev = 1498 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1499 GFP_KERNEL); 1500 if (!enclosure_dev) 1501 return; 1502 list_add_tail(&enclosure_dev->list, 1503 &mrioc->enclosure_list); 1504 } 1505 if (enclosure_dev) { 1506 if (!present) { 1507 list_del(&enclosure_dev->list); 1508 kfree(enclosure_dev); 1509 } else 1510 memcpy(&enclosure_dev->pg0, encl_pg0, 1511 sizeof(enclosure_dev->pg0)); 1512 1513 } 1514 } 1515 1516 /** 1517 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1518 * @mrioc: Adapter instance reference 1519 * @event_data: SAS topology change list event data 1520 * 1521 * Prints information about the SAS topology change event. 1522 * 1523 * Return: Nothing. 1524 */ 1525 static void 1526 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1527 struct mpi3_event_data_sas_topology_change_list *event_data) 1528 { 1529 int i; 1530 u16 handle; 1531 u8 reason_code, phy_number; 1532 char *status_str = NULL; 1533 u8 link_rate, prev_link_rate; 1534 1535 switch (event_data->exp_status) { 1536 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1537 status_str = "remove"; 1538 break; 1539 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1540 status_str = "responding"; 1541 break; 1542 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1543 status_str = "remove delay"; 1544 break; 1545 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1546 status_str = "direct attached"; 1547 break; 1548 default: 1549 status_str = "unknown status"; 1550 break; 1551 } 1552 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1553 __func__, status_str); 1554 ioc_info(mrioc, 1555 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1556 __func__, le16_to_cpu(event_data->expander_dev_handle), 1557 event_data->io_unit_port, 1558 le16_to_cpu(event_data->enclosure_handle), 1559 event_data->start_phy_num, event_data->num_entries); 1560 for (i = 0; i < event_data->num_entries; i++) { 1561 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1562 if (!handle) 1563 continue; 1564 phy_number = event_data->start_phy_num + i; 1565 reason_code = event_data->phy_entry[i].status & 1566 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1567 switch (reason_code) { 1568 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1569 status_str = "target remove"; 1570 break; 1571 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1572 status_str = "delay target remove"; 1573 break; 1574 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1575 status_str = "link status change"; 1576 break; 1577 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1578 status_str = "link status no change"; 1579 break; 1580 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1581 status_str = "target responding"; 1582 break; 1583 default: 1584 status_str = "unknown"; 1585 break; 1586 } 1587 link_rate = event_data->phy_entry[i].link_rate >> 4; 1588 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1589 ioc_info(mrioc, 1590 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1591 __func__, phy_number, handle, status_str, link_rate, 1592 prev_link_rate); 1593 } 1594 } 1595 1596 /** 1597 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1598 * @mrioc: Adapter instance reference 1599 * @fwevt: Firmware event reference 1600 * 1601 * Prints information about the SAS topology change event and 1602 * for "not responding" event code, removes the device from the 1603 * upper layers. 1604 * 1605 * Return: Nothing. 1606 */ 1607 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1608 struct mpi3mr_fwevt *fwevt) 1609 { 1610 struct mpi3_event_data_sas_topology_change_list *event_data = 1611 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1612 int i; 1613 u16 handle; 1614 u8 reason_code; 1615 u64 exp_sas_address = 0, parent_sas_address = 0; 1616 struct mpi3mr_hba_port *hba_port = NULL; 1617 struct mpi3mr_tgt_dev *tgtdev = NULL; 1618 struct mpi3mr_sas_node *sas_expander = NULL; 1619 unsigned long flags; 1620 u8 link_rate, prev_link_rate, parent_phy_number; 1621 1622 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1623 if (mrioc->sas_transport_enabled) { 1624 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1625 event_data->io_unit_port); 1626 if (le16_to_cpu(event_data->expander_dev_handle)) { 1627 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1628 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1629 le16_to_cpu(event_data->expander_dev_handle)); 1630 if (sas_expander) { 1631 exp_sas_address = sas_expander->sas_address; 1632 hba_port = sas_expander->hba_port; 1633 } 1634 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1635 parent_sas_address = exp_sas_address; 1636 } else 1637 parent_sas_address = mrioc->sas_hba.sas_address; 1638 } 1639 1640 for (i = 0; i < event_data->num_entries; i++) { 1641 if (fwevt->discard) 1642 return; 1643 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1644 if (!handle) 1645 continue; 1646 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1647 if (!tgtdev) 1648 continue; 1649 1650 reason_code = event_data->phy_entry[i].status & 1651 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1652 1653 switch (reason_code) { 1654 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1655 if (tgtdev->host_exposed) 1656 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1657 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1658 mpi3mr_tgtdev_put(tgtdev); 1659 break; 1660 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1661 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1662 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1663 { 1664 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1665 || tgtdev->is_hidden) 1666 break; 1667 link_rate = event_data->phy_entry[i].link_rate >> 4; 1668 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1669 if (link_rate == prev_link_rate) 1670 break; 1671 if (!parent_sas_address) 1672 break; 1673 parent_phy_number = event_data->start_phy_num + i; 1674 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1675 parent_phy_number, link_rate, hba_port); 1676 break; 1677 } 1678 default: 1679 break; 1680 } 1681 if (tgtdev) 1682 mpi3mr_tgtdev_put(tgtdev); 1683 } 1684 1685 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1686 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1687 if (sas_expander) 1688 mpi3mr_expander_remove(mrioc, exp_sas_address, 1689 hba_port); 1690 } 1691 } 1692 1693 /** 1694 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1695 * @mrioc: Adapter instance reference 1696 * @event_data: PCIe topology change list event data 1697 * 1698 * Prints information about the PCIe topology change event. 1699 * 1700 * Return: Nothing. 1701 */ 1702 static void 1703 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1704 struct mpi3_event_data_pcie_topology_change_list *event_data) 1705 { 1706 int i; 1707 u16 handle; 1708 u16 reason_code; 1709 u8 port_number; 1710 char *status_str = NULL; 1711 u8 link_rate, prev_link_rate; 1712 1713 switch (event_data->switch_status) { 1714 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1715 status_str = "remove"; 1716 break; 1717 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1718 status_str = "responding"; 1719 break; 1720 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1721 status_str = "remove delay"; 1722 break; 1723 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1724 status_str = "direct attached"; 1725 break; 1726 default: 1727 status_str = "unknown status"; 1728 break; 1729 } 1730 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1731 __func__, status_str); 1732 ioc_info(mrioc, 1733 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1734 __func__, le16_to_cpu(event_data->switch_dev_handle), 1735 le16_to_cpu(event_data->enclosure_handle), 1736 event_data->start_port_num, event_data->num_entries); 1737 for (i = 0; i < event_data->num_entries; i++) { 1738 handle = 1739 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1740 if (!handle) 1741 continue; 1742 port_number = event_data->start_port_num + i; 1743 reason_code = event_data->port_entry[i].port_status; 1744 switch (reason_code) { 1745 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1746 status_str = "target remove"; 1747 break; 1748 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1749 status_str = "delay target remove"; 1750 break; 1751 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1752 status_str = "link status change"; 1753 break; 1754 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1755 status_str = "link status no change"; 1756 break; 1757 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1758 status_str = "target responding"; 1759 break; 1760 default: 1761 status_str = "unknown"; 1762 break; 1763 } 1764 link_rate = event_data->port_entry[i].current_port_info & 1765 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1766 prev_link_rate = event_data->port_entry[i].previous_port_info & 1767 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1768 ioc_info(mrioc, 1769 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1770 __func__, port_number, handle, status_str, link_rate, 1771 prev_link_rate); 1772 } 1773 } 1774 1775 /** 1776 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1777 * @mrioc: Adapter instance reference 1778 * @fwevt: Firmware event reference 1779 * 1780 * Prints information about the PCIe topology change event and 1781 * for "not responding" event code, removes the device from the 1782 * upper layers. 1783 * 1784 * Return: Nothing. 1785 */ 1786 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1787 struct mpi3mr_fwevt *fwevt) 1788 { 1789 struct mpi3_event_data_pcie_topology_change_list *event_data = 1790 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1791 int i; 1792 u16 handle; 1793 u8 reason_code; 1794 struct mpi3mr_tgt_dev *tgtdev = NULL; 1795 1796 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1797 1798 for (i = 0; i < event_data->num_entries; i++) { 1799 if (fwevt->discard) 1800 return; 1801 handle = 1802 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1803 if (!handle) 1804 continue; 1805 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1806 if (!tgtdev) 1807 continue; 1808 1809 reason_code = event_data->port_entry[i].port_status; 1810 1811 switch (reason_code) { 1812 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1813 if (tgtdev->host_exposed) 1814 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1815 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1816 mpi3mr_tgtdev_put(tgtdev); 1817 break; 1818 default: 1819 break; 1820 } 1821 if (tgtdev) 1822 mpi3mr_tgtdev_put(tgtdev); 1823 } 1824 } 1825 1826 /** 1827 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1828 * @mrioc: Adapter instance reference 1829 * @fwevt: Firmware event reference 1830 * 1831 * Extracts the event data and calls application interfacing 1832 * function to process the event further. 1833 * 1834 * Return: Nothing. 1835 */ 1836 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1837 struct mpi3mr_fwevt *fwevt) 1838 { 1839 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1840 fwevt->event_data_size); 1841 } 1842 1843 /** 1844 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1845 * @sdev: SCSI device reference 1846 * @data: Queue depth reference 1847 * 1848 * This is an iterator function called for each SCSI device in a 1849 * target to update the QD of each SCSI device. 1850 * 1851 * Return: Nothing. 1852 */ 1853 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1854 { 1855 u16 *q_depth = (u16 *)data; 1856 1857 scsi_change_queue_depth(sdev, (int)*q_depth); 1858 sdev->max_queue_depth = sdev->queue_depth; 1859 } 1860 1861 /** 1862 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1863 * @mrioc: Adapter instance reference 1864 * @tg: Throttle group information pointer 1865 * 1866 * Accessor to reduce QD for each device associated with the 1867 * given throttle group. 1868 * 1869 * Return: None. 1870 */ 1871 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1872 struct mpi3mr_throttle_group_info *tg) 1873 { 1874 unsigned long flags; 1875 struct mpi3mr_tgt_dev *tgtdev; 1876 struct mpi3mr_stgt_priv_data *tgt_priv; 1877 1878 1879 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1880 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1881 if (tgtdev->starget && tgtdev->starget->hostdata) { 1882 tgt_priv = tgtdev->starget->hostdata; 1883 if (tgt_priv->throttle_group == tg) { 1884 dprint_event_bh(mrioc, 1885 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1886 tgt_priv->perst_id, tgtdev->q_depth, 1887 tg->modified_qd); 1888 starget_for_each_device(tgtdev->starget, 1889 (void *)&tg->modified_qd, 1890 mpi3mr_update_sdev_qd); 1891 } 1892 } 1893 } 1894 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1895 } 1896 1897 /** 1898 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1899 * @mrioc: Adapter instance reference 1900 * @fwevt: Firmware event reference 1901 * 1902 * Identifies the firmware event and calls corresponding bottomg 1903 * half handler and sends event acknowledgment if required. 1904 * 1905 * Return: Nothing. 1906 */ 1907 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1908 struct mpi3mr_fwevt *fwevt) 1909 { 1910 struct mpi3_device_page0 *dev_pg0 = NULL; 1911 u16 perst_id, handle, dev_info; 1912 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1913 1914 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1915 mrioc->current_event = fwevt; 1916 1917 if (mrioc->stop_drv_processing) 1918 goto out; 1919 1920 if (mrioc->unrecoverable) { 1921 dprint_event_bh(mrioc, 1922 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 1923 fwevt->event_id); 1924 goto out; 1925 } 1926 1927 if (!fwevt->process_evt) 1928 goto evt_ack; 1929 1930 switch (fwevt->event_id) { 1931 case MPI3_EVENT_DEVICE_ADDED: 1932 { 1933 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1934 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1935 handle = le16_to_cpu(dev_pg0->dev_handle); 1936 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1937 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1938 else if (mrioc->sas_transport_enabled && 1939 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 1940 sasinf = &dev_pg0->device_specific.sas_sata_format; 1941 dev_info = le16_to_cpu(sasinf->device_info); 1942 if (!mrioc->sas_hba.num_phys) 1943 mpi3mr_sas_host_add(mrioc); 1944 else 1945 mpi3mr_sas_host_refresh(mrioc); 1946 1947 if (mpi3mr_is_expander_device(dev_info)) 1948 mpi3mr_expander_add(mrioc, handle); 1949 } 1950 break; 1951 } 1952 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1953 { 1954 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1955 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1956 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1957 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 1958 break; 1959 } 1960 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1961 { 1962 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1963 break; 1964 } 1965 case MPI3_EVENT_ENCL_DEVICE_ADDED: 1966 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1967 { 1968 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 1969 break; 1970 } 1971 1972 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1973 { 1974 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1975 break; 1976 } 1977 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1978 { 1979 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1980 break; 1981 } 1982 case MPI3_EVENT_LOG_DATA: 1983 { 1984 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1985 break; 1986 } 1987 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1988 { 1989 struct mpi3mr_throttle_group_info *tg; 1990 1991 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1992 dprint_event_bh(mrioc, 1993 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1994 tg->id, tg->need_qd_reduction); 1995 if (tg->need_qd_reduction) { 1996 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1997 tg->need_qd_reduction = 0; 1998 } 1999 break; 2000 } 2001 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2002 { 2003 while (mrioc->device_refresh_on) 2004 msleep(500); 2005 2006 dprint_event_bh(mrioc, 2007 "scan for non responding and newly added devices after soft reset started\n"); 2008 if (mrioc->sas_transport_enabled) { 2009 mpi3mr_refresh_sas_ports(mrioc); 2010 mpi3mr_refresh_expanders(mrioc); 2011 } 2012 mpi3mr_rfresh_tgtdevs(mrioc); 2013 ioc_info(mrioc, 2014 "scan for non responding and newly added devices after soft reset completed\n"); 2015 break; 2016 } 2017 default: 2018 break; 2019 } 2020 2021 evt_ack: 2022 if (fwevt->send_ack) 2023 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2024 fwevt->evt_ctx); 2025 out: 2026 /* Put fwevt reference count to neutralize kref_init increment */ 2027 mpi3mr_fwevt_put(fwevt); 2028 mrioc->current_event = NULL; 2029 } 2030 2031 /** 2032 * mpi3mr_fwevt_worker - Firmware event worker 2033 * @work: Work struct containing firmware event 2034 * 2035 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2036 * 2037 * Return: Nothing. 2038 */ 2039 static void mpi3mr_fwevt_worker(struct work_struct *work) 2040 { 2041 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2042 work); 2043 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2044 /* 2045 * Put fwevt reference count after 2046 * dequeuing it from worker queue 2047 */ 2048 mpi3mr_fwevt_put(fwevt); 2049 } 2050 2051 /** 2052 * mpi3mr_create_tgtdev - Create and add a target device 2053 * @mrioc: Adapter instance reference 2054 * @dev_pg0: Device Page 0 data 2055 * 2056 * If the device specified by the device page 0 data is not 2057 * present in the driver's internal list, allocate the memory 2058 * for the device, populate the data and add to the list, else 2059 * update the device data. The key is persistent ID. 2060 * 2061 * Return: 0 on success, -ENOMEM on memory allocation failure 2062 */ 2063 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2064 struct mpi3_device_page0 *dev_pg0) 2065 { 2066 int retval = 0; 2067 struct mpi3mr_tgt_dev *tgtdev = NULL; 2068 u16 perst_id = 0; 2069 unsigned long flags; 2070 2071 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2072 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2073 return retval; 2074 2075 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2076 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2077 if (tgtdev) 2078 tgtdev->state = MPI3MR_DEV_CREATED; 2079 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2080 2081 if (tgtdev) { 2082 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2083 mpi3mr_tgtdev_put(tgtdev); 2084 } else { 2085 tgtdev = mpi3mr_alloc_tgtdev(); 2086 if (!tgtdev) 2087 return -ENOMEM; 2088 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2089 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2090 } 2091 2092 return retval; 2093 } 2094 2095 /** 2096 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2097 * @mrioc: Adapter instance reference 2098 * 2099 * Flush pending commands in the delayed lists due to a 2100 * controller reset or driver removal as a cleanup. 2101 * 2102 * Return: Nothing 2103 */ 2104 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2105 { 2106 struct delayed_dev_rmhs_node *_rmhs_node; 2107 struct delayed_evt_ack_node *_evtack_node; 2108 2109 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2110 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2111 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2112 struct delayed_dev_rmhs_node, list); 2113 list_del(&_rmhs_node->list); 2114 kfree(_rmhs_node); 2115 } 2116 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2117 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2118 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2119 struct delayed_evt_ack_node, list); 2120 list_del(&_evtack_node->list); 2121 kfree(_evtack_node); 2122 } 2123 } 2124 2125 /** 2126 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2127 * @mrioc: Adapter instance reference 2128 * @drv_cmd: Internal command tracker 2129 * 2130 * Issues a target reset TM to the firmware from the device 2131 * removal TM pend list or retry the removal handshake sequence 2132 * based on the IOU control request IOC status. 2133 * 2134 * Return: Nothing 2135 */ 2136 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2137 struct mpi3mr_drv_cmd *drv_cmd) 2138 { 2139 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2140 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2141 2142 if (drv_cmd->state & MPI3MR_CMD_RESET) 2143 goto clear_drv_cmd; 2144 2145 ioc_info(mrioc, 2146 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2147 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2148 drv_cmd->ioc_loginfo); 2149 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2150 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2151 drv_cmd->retry_count++; 2152 ioc_info(mrioc, 2153 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2154 __func__, drv_cmd->dev_handle, 2155 drv_cmd->retry_count); 2156 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2157 drv_cmd, drv_cmd->iou_rc); 2158 return; 2159 } 2160 ioc_err(mrioc, 2161 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2162 __func__, drv_cmd->dev_handle); 2163 } else { 2164 ioc_info(mrioc, 2165 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2166 __func__, drv_cmd->dev_handle); 2167 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2168 } 2169 2170 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2171 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2172 struct delayed_dev_rmhs_node, list); 2173 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2174 drv_cmd->retry_count = 0; 2175 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2176 ioc_info(mrioc, 2177 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2178 __func__, drv_cmd->dev_handle); 2179 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2180 drv_cmd->iou_rc); 2181 list_del(&delayed_dev_rmhs->list); 2182 kfree(delayed_dev_rmhs); 2183 return; 2184 } 2185 2186 clear_drv_cmd: 2187 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2188 drv_cmd->callback = NULL; 2189 drv_cmd->retry_count = 0; 2190 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2191 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2192 } 2193 2194 /** 2195 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2196 * @mrioc: Adapter instance reference 2197 * @drv_cmd: Internal command tracker 2198 * 2199 * Issues a target reset TM to the firmware from the device 2200 * removal TM pend list or issue IO unit control request as 2201 * part of device removal or hidden acknowledgment handshake. 2202 * 2203 * Return: Nothing 2204 */ 2205 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2206 struct mpi3mr_drv_cmd *drv_cmd) 2207 { 2208 struct mpi3_iounit_control_request iou_ctrl; 2209 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2210 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2211 int retval; 2212 2213 if (drv_cmd->state & MPI3MR_CMD_RESET) 2214 goto clear_drv_cmd; 2215 2216 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2217 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2218 2219 if (tm_reply) 2220 pr_info(IOCNAME 2221 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2222 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2223 drv_cmd->ioc_loginfo, 2224 le32_to_cpu(tm_reply->termination_count)); 2225 2226 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2227 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2228 2229 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2230 2231 drv_cmd->state = MPI3MR_CMD_PENDING; 2232 drv_cmd->is_waiting = 0; 2233 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2234 iou_ctrl.operation = drv_cmd->iou_rc; 2235 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2236 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2237 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2238 2239 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2240 1); 2241 if (retval) { 2242 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2243 mrioc->name); 2244 goto clear_drv_cmd; 2245 } 2246 2247 return; 2248 clear_drv_cmd: 2249 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2250 drv_cmd->callback = NULL; 2251 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2252 drv_cmd->retry_count = 0; 2253 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2254 } 2255 2256 /** 2257 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2258 * @mrioc: Adapter instance reference 2259 * @handle: Device handle 2260 * @cmdparam: Internal command tracker 2261 * @iou_rc: IO unit reason code 2262 * 2263 * Issues a target reset TM to the firmware or add it to a pend 2264 * list as part of device removal or hidden acknowledgment 2265 * handshake. 2266 * 2267 * Return: Nothing 2268 */ 2269 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2270 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2271 { 2272 struct mpi3_scsi_task_mgmt_request tm_req; 2273 int retval = 0; 2274 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2275 u8 retrycount = 5; 2276 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2277 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2278 struct mpi3mr_tgt_dev *tgtdev = NULL; 2279 unsigned long flags; 2280 2281 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2282 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2283 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2284 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2285 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2286 2287 if (drv_cmd) 2288 goto issue_cmd; 2289 do { 2290 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2291 MPI3MR_NUM_DEVRMCMD); 2292 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2293 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2294 break; 2295 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2296 } 2297 } while (retrycount--); 2298 2299 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2300 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2301 GFP_ATOMIC); 2302 if (!delayed_dev_rmhs) 2303 return; 2304 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2305 delayed_dev_rmhs->handle = handle; 2306 delayed_dev_rmhs->iou_rc = iou_rc; 2307 list_add_tail(&delayed_dev_rmhs->list, 2308 &mrioc->delayed_rmhs_list); 2309 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2310 __func__, handle); 2311 return; 2312 } 2313 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2314 2315 issue_cmd: 2316 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2317 ioc_info(mrioc, 2318 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2319 __func__, handle, cmd_idx); 2320 2321 memset(&tm_req, 0, sizeof(tm_req)); 2322 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2323 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2324 goto out; 2325 } 2326 drv_cmd->state = MPI3MR_CMD_PENDING; 2327 drv_cmd->is_waiting = 0; 2328 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2329 drv_cmd->dev_handle = handle; 2330 drv_cmd->iou_rc = iou_rc; 2331 tm_req.dev_handle = cpu_to_le16(handle); 2332 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2333 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2334 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2335 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2336 2337 set_bit(handle, mrioc->removepend_bitmap); 2338 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2339 if (retval) { 2340 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2341 __func__); 2342 goto out_failed; 2343 } 2344 out: 2345 return; 2346 out_failed: 2347 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2348 drv_cmd->callback = NULL; 2349 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2350 drv_cmd->retry_count = 0; 2351 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2352 } 2353 2354 /** 2355 * mpi3mr_complete_evt_ack - event ack request completion 2356 * @mrioc: Adapter instance reference 2357 * @drv_cmd: Internal command tracker 2358 * 2359 * This is the completion handler for non blocking event 2360 * acknowledgment sent to the firmware and this will issue any 2361 * pending event acknowledgment request. 2362 * 2363 * Return: Nothing 2364 */ 2365 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2366 struct mpi3mr_drv_cmd *drv_cmd) 2367 { 2368 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2369 struct delayed_evt_ack_node *delayed_evtack = NULL; 2370 2371 if (drv_cmd->state & MPI3MR_CMD_RESET) 2372 goto clear_drv_cmd; 2373 2374 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2375 dprint_event_th(mrioc, 2376 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2377 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2378 drv_cmd->ioc_loginfo); 2379 } 2380 2381 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2382 delayed_evtack = 2383 list_entry(mrioc->delayed_evtack_cmds_list.next, 2384 struct delayed_evt_ack_node, list); 2385 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2386 delayed_evtack->event_ctx); 2387 list_del(&delayed_evtack->list); 2388 kfree(delayed_evtack); 2389 return; 2390 } 2391 clear_drv_cmd: 2392 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2393 drv_cmd->callback = NULL; 2394 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2395 } 2396 2397 /** 2398 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2399 * @mrioc: Adapter instance reference 2400 * @event: MPI3 event id 2401 * @cmdparam: Internal command tracker 2402 * @event_ctx: event context 2403 * 2404 * Issues event acknowledgment request to the firmware if there 2405 * is a free command to send the event ack else it to a pend 2406 * list so that it will be processed on a completion of a prior 2407 * event acknowledgment . 2408 * 2409 * Return: Nothing 2410 */ 2411 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2412 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2413 { 2414 struct mpi3_event_ack_request evtack_req; 2415 int retval = 0; 2416 u8 retrycount = 5; 2417 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2418 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2419 struct delayed_evt_ack_node *delayed_evtack = NULL; 2420 2421 if (drv_cmd) { 2422 dprint_event_th(mrioc, 2423 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2424 event, event_ctx); 2425 goto issue_cmd; 2426 } 2427 dprint_event_th(mrioc, 2428 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2429 event, event_ctx); 2430 do { 2431 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2432 MPI3MR_NUM_EVTACKCMD); 2433 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2434 if (!test_and_set_bit(cmd_idx, 2435 mrioc->evtack_cmds_bitmap)) 2436 break; 2437 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2438 } 2439 } while (retrycount--); 2440 2441 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2442 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2443 GFP_ATOMIC); 2444 if (!delayed_evtack) 2445 return; 2446 INIT_LIST_HEAD(&delayed_evtack->list); 2447 delayed_evtack->event = event; 2448 delayed_evtack->event_ctx = event_ctx; 2449 list_add_tail(&delayed_evtack->list, 2450 &mrioc->delayed_evtack_cmds_list); 2451 dprint_event_th(mrioc, 2452 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2453 event, event_ctx); 2454 return; 2455 } 2456 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2457 2458 issue_cmd: 2459 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2460 2461 memset(&evtack_req, 0, sizeof(evtack_req)); 2462 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2463 dprint_event_th(mrioc, 2464 "sending event ack failed due to command in use\n"); 2465 goto out; 2466 } 2467 drv_cmd->state = MPI3MR_CMD_PENDING; 2468 drv_cmd->is_waiting = 0; 2469 drv_cmd->callback = mpi3mr_complete_evt_ack; 2470 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2471 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2472 evtack_req.event = event; 2473 evtack_req.event_context = cpu_to_le32(event_ctx); 2474 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2475 sizeof(evtack_req), 1); 2476 if (retval) { 2477 dprint_event_th(mrioc, 2478 "posting event ack request is failed\n"); 2479 goto out_failed; 2480 } 2481 2482 dprint_event_th(mrioc, 2483 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2484 event, event_ctx); 2485 out: 2486 return; 2487 out_failed: 2488 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2489 drv_cmd->callback = NULL; 2490 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2491 } 2492 2493 /** 2494 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2495 * @mrioc: Adapter instance reference 2496 * @event_reply: event data 2497 * 2498 * Checks for the reason code and based on that either block I/O 2499 * to device, or unblock I/O to the device, or start the device 2500 * removal handshake with reason as remove with the firmware for 2501 * PCIe devices. 2502 * 2503 * Return: Nothing 2504 */ 2505 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2506 struct mpi3_event_notification_reply *event_reply) 2507 { 2508 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2509 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2510 int i; 2511 u16 handle; 2512 u8 reason_code; 2513 struct mpi3mr_tgt_dev *tgtdev = NULL; 2514 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2515 2516 for (i = 0; i < topo_evt->num_entries; i++) { 2517 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2518 if (!handle) 2519 continue; 2520 reason_code = topo_evt->port_entry[i].port_status; 2521 scsi_tgt_priv_data = NULL; 2522 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2523 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2524 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2525 tgtdev->starget->hostdata; 2526 switch (reason_code) { 2527 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2528 if (scsi_tgt_priv_data) { 2529 scsi_tgt_priv_data->dev_removed = 1; 2530 scsi_tgt_priv_data->dev_removedelay = 0; 2531 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2532 } 2533 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2534 MPI3_CTRL_OP_REMOVE_DEVICE); 2535 break; 2536 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2537 if (scsi_tgt_priv_data) { 2538 scsi_tgt_priv_data->dev_removedelay = 1; 2539 atomic_inc(&scsi_tgt_priv_data->block_io); 2540 } 2541 break; 2542 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2543 if (scsi_tgt_priv_data && 2544 scsi_tgt_priv_data->dev_removedelay) { 2545 scsi_tgt_priv_data->dev_removedelay = 0; 2546 atomic_dec_if_positive 2547 (&scsi_tgt_priv_data->block_io); 2548 } 2549 break; 2550 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2551 default: 2552 break; 2553 } 2554 if (tgtdev) 2555 mpi3mr_tgtdev_put(tgtdev); 2556 } 2557 } 2558 2559 /** 2560 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2561 * @mrioc: Adapter instance reference 2562 * @event_reply: event data 2563 * 2564 * Checks for the reason code and based on that either block I/O 2565 * to device, or unblock I/O to the device, or start the device 2566 * removal handshake with reason as remove with the firmware for 2567 * SAS/SATA devices. 2568 * 2569 * Return: Nothing 2570 */ 2571 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2572 struct mpi3_event_notification_reply *event_reply) 2573 { 2574 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2575 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2576 int i; 2577 u16 handle; 2578 u8 reason_code; 2579 struct mpi3mr_tgt_dev *tgtdev = NULL; 2580 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2581 2582 for (i = 0; i < topo_evt->num_entries; i++) { 2583 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2584 if (!handle) 2585 continue; 2586 reason_code = topo_evt->phy_entry[i].status & 2587 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2588 scsi_tgt_priv_data = NULL; 2589 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2590 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2591 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2592 tgtdev->starget->hostdata; 2593 switch (reason_code) { 2594 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2595 if (scsi_tgt_priv_data) { 2596 scsi_tgt_priv_data->dev_removed = 1; 2597 scsi_tgt_priv_data->dev_removedelay = 0; 2598 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2599 } 2600 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2601 MPI3_CTRL_OP_REMOVE_DEVICE); 2602 break; 2603 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2604 if (scsi_tgt_priv_data) { 2605 scsi_tgt_priv_data->dev_removedelay = 1; 2606 atomic_inc(&scsi_tgt_priv_data->block_io); 2607 } 2608 break; 2609 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2610 if (scsi_tgt_priv_data && 2611 scsi_tgt_priv_data->dev_removedelay) { 2612 scsi_tgt_priv_data->dev_removedelay = 0; 2613 atomic_dec_if_positive 2614 (&scsi_tgt_priv_data->block_io); 2615 } 2616 break; 2617 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2618 default: 2619 break; 2620 } 2621 if (tgtdev) 2622 mpi3mr_tgtdev_put(tgtdev); 2623 } 2624 } 2625 2626 /** 2627 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2628 * @mrioc: Adapter instance reference 2629 * @event_reply: event data 2630 * 2631 * Checks for the reason code and based on that either block I/O 2632 * to device, or unblock I/O to the device, or start the device 2633 * removal handshake with reason as remove/hide acknowledgment 2634 * with the firmware. 2635 * 2636 * Return: Nothing 2637 */ 2638 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2639 struct mpi3_event_notification_reply *event_reply) 2640 { 2641 u16 dev_handle = 0; 2642 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2643 struct mpi3mr_tgt_dev *tgtdev = NULL; 2644 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2645 struct mpi3_event_data_device_status_change *evtdata = 2646 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2647 2648 if (mrioc->stop_drv_processing) 2649 goto out; 2650 2651 dev_handle = le16_to_cpu(evtdata->dev_handle); 2652 2653 switch (evtdata->reason_code) { 2654 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2655 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2656 block = 1; 2657 break; 2658 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2659 delete = 1; 2660 hide = 1; 2661 break; 2662 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2663 delete = 1; 2664 remove = 1; 2665 break; 2666 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2667 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2668 ublock = 1; 2669 break; 2670 default: 2671 break; 2672 } 2673 2674 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2675 if (!tgtdev) 2676 goto out; 2677 if (hide) 2678 tgtdev->is_hidden = hide; 2679 if (tgtdev->starget && tgtdev->starget->hostdata) { 2680 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2681 tgtdev->starget->hostdata; 2682 if (block) 2683 atomic_inc(&scsi_tgt_priv_data->block_io); 2684 if (delete) 2685 scsi_tgt_priv_data->dev_removed = 1; 2686 if (ublock) 2687 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2688 } 2689 if (remove) 2690 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2691 MPI3_CTRL_OP_REMOVE_DEVICE); 2692 if (hide) 2693 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2694 MPI3_CTRL_OP_HIDDEN_ACK); 2695 2696 out: 2697 if (tgtdev) 2698 mpi3mr_tgtdev_put(tgtdev); 2699 } 2700 2701 /** 2702 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2703 * @mrioc: Adapter instance reference 2704 * @event_reply: event data 2705 * 2706 * Blocks and unblocks host level I/O based on the reason code 2707 * 2708 * Return: Nothing 2709 */ 2710 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2711 struct mpi3_event_notification_reply *event_reply) 2712 { 2713 struct mpi3_event_data_prepare_for_reset *evtdata = 2714 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2715 2716 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2717 dprint_event_th(mrioc, 2718 "prepare for reset event top half with rc=start\n"); 2719 if (mrioc->prepare_for_reset) 2720 return; 2721 mrioc->prepare_for_reset = 1; 2722 mrioc->prepare_for_reset_timeout_counter = 0; 2723 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2724 dprint_event_th(mrioc, 2725 "prepare for reset top half with rc=abort\n"); 2726 mrioc->prepare_for_reset = 0; 2727 mrioc->prepare_for_reset_timeout_counter = 0; 2728 } 2729 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2730 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2731 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2732 le32_to_cpu(event_reply->event_context)); 2733 } 2734 2735 /** 2736 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2737 * @mrioc: Adapter instance reference 2738 * @event_reply: event data 2739 * 2740 * Identifies the new shutdown timeout value and update. 2741 * 2742 * Return: Nothing 2743 */ 2744 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2745 struct mpi3_event_notification_reply *event_reply) 2746 { 2747 struct mpi3_event_data_energy_pack_change *evtdata = 2748 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2749 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2750 2751 if (shutdown_timeout <= 0) { 2752 ioc_warn(mrioc, 2753 "%s :Invalid Shutdown Timeout received = %d\n", 2754 __func__, shutdown_timeout); 2755 return; 2756 } 2757 2758 ioc_info(mrioc, 2759 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2760 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2761 mrioc->facts.shutdown_timeout = shutdown_timeout; 2762 } 2763 2764 /** 2765 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2766 * @mrioc: Adapter instance reference 2767 * @event_reply: event data 2768 * 2769 * Displays Cable manegemt event details. 2770 * 2771 * Return: Nothing 2772 */ 2773 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2774 struct mpi3_event_notification_reply *event_reply) 2775 { 2776 struct mpi3_event_data_cable_management *evtdata = 2777 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2778 2779 switch (evtdata->status) { 2780 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2781 { 2782 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2783 "Devices connected to this cable are not detected.\n" 2784 "This cable requires %d mW of power.\n", 2785 evtdata->receptacle_id, 2786 le32_to_cpu(evtdata->active_cable_power_requirement)); 2787 break; 2788 } 2789 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2790 { 2791 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2792 evtdata->receptacle_id); 2793 break; 2794 } 2795 default: 2796 break; 2797 } 2798 } 2799 2800 /** 2801 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2802 * @mrioc: Adapter instance reference 2803 * 2804 * Add driver specific event to make sure that the driver won't process the 2805 * events until all the devices are refreshed during soft reset. 2806 * 2807 * Return: Nothing 2808 */ 2809 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2810 { 2811 struct mpi3mr_fwevt *fwevt = NULL; 2812 2813 fwevt = mpi3mr_alloc_fwevt(0); 2814 if (!fwevt) { 2815 dprint_event_th(mrioc, 2816 "failed to schedule bottom half handler for event(0x%02x)\n", 2817 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2818 return; 2819 } 2820 fwevt->mrioc = mrioc; 2821 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2822 fwevt->send_ack = 0; 2823 fwevt->process_evt = 1; 2824 fwevt->evt_ctx = 0; 2825 fwevt->event_data_size = 0; 2826 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2827 } 2828 2829 /** 2830 * mpi3mr_os_handle_events - Firmware event handler 2831 * @mrioc: Adapter instance reference 2832 * @event_reply: event data 2833 * 2834 * Identify whteher the event has to handled and acknowledged 2835 * and either process the event in the tophalf and/or schedule a 2836 * bottom half through mpi3mr_fwevt_worker. 2837 * 2838 * Return: Nothing 2839 */ 2840 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2841 struct mpi3_event_notification_reply *event_reply) 2842 { 2843 u16 evt_type, sz; 2844 struct mpi3mr_fwevt *fwevt = NULL; 2845 bool ack_req = 0, process_evt_bh = 0; 2846 2847 if (mrioc->stop_drv_processing) 2848 return; 2849 2850 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2851 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2852 ack_req = 1; 2853 2854 evt_type = event_reply->event; 2855 2856 switch (evt_type) { 2857 case MPI3_EVENT_DEVICE_ADDED: 2858 { 2859 struct mpi3_device_page0 *dev_pg0 = 2860 (struct mpi3_device_page0 *)event_reply->event_data; 2861 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2862 ioc_err(mrioc, 2863 "%s :Failed to add device in the device add event\n", 2864 __func__); 2865 else 2866 process_evt_bh = 1; 2867 break; 2868 } 2869 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2870 { 2871 process_evt_bh = 1; 2872 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2873 break; 2874 } 2875 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2876 { 2877 process_evt_bh = 1; 2878 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2879 break; 2880 } 2881 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2882 { 2883 process_evt_bh = 1; 2884 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2885 break; 2886 } 2887 case MPI3_EVENT_PREPARE_FOR_RESET: 2888 { 2889 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2890 ack_req = 0; 2891 break; 2892 } 2893 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2894 case MPI3_EVENT_LOG_DATA: 2895 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2896 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2897 { 2898 process_evt_bh = 1; 2899 break; 2900 } 2901 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2902 { 2903 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2904 break; 2905 } 2906 case MPI3_EVENT_CABLE_MGMT: 2907 { 2908 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2909 break; 2910 } 2911 case MPI3_EVENT_SAS_DISCOVERY: 2912 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2913 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2914 case MPI3_EVENT_PCIE_ENUMERATION: 2915 break; 2916 default: 2917 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2918 __func__, evt_type); 2919 break; 2920 } 2921 if (process_evt_bh || ack_req) { 2922 sz = event_reply->event_data_length * 4; 2923 fwevt = mpi3mr_alloc_fwevt(sz); 2924 if (!fwevt) { 2925 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2926 __func__, __FILE__, __LINE__, __func__); 2927 return; 2928 } 2929 2930 memcpy(fwevt->event_data, event_reply->event_data, sz); 2931 fwevt->mrioc = mrioc; 2932 fwevt->event_id = evt_type; 2933 fwevt->send_ack = ack_req; 2934 fwevt->process_evt = process_evt_bh; 2935 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2936 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2937 } 2938 } 2939 2940 /** 2941 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2942 * @mrioc: Adapter instance reference 2943 * @scmd: SCSI command reference 2944 * @scsiio_req: MPI3 SCSI IO request 2945 * 2946 * Identifies the protection information flags from the SCSI 2947 * command and set appropriate flags in the MPI3 SCSI IO 2948 * request. 2949 * 2950 * Return: Nothing 2951 */ 2952 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2953 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2954 { 2955 u16 eedp_flags = 0; 2956 unsigned char prot_op = scsi_get_prot_op(scmd); 2957 2958 switch (prot_op) { 2959 case SCSI_PROT_NORMAL: 2960 return; 2961 case SCSI_PROT_READ_STRIP: 2962 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2963 break; 2964 case SCSI_PROT_WRITE_INSERT: 2965 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2966 break; 2967 case SCSI_PROT_READ_INSERT: 2968 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2969 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2970 break; 2971 case SCSI_PROT_WRITE_STRIP: 2972 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2973 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2974 break; 2975 case SCSI_PROT_READ_PASS: 2976 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2977 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2978 break; 2979 case SCSI_PROT_WRITE_PASS: 2980 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2981 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2982 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2983 0xffff; 2984 } else 2985 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2986 2987 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2988 break; 2989 default: 2990 return; 2991 } 2992 2993 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2994 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2995 2996 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2997 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2998 2999 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3000 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3001 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3002 scsiio_req->cdb.eedp32.primary_reference_tag = 3003 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3004 } 3005 3006 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3007 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3008 3009 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3010 3011 switch (scsi_prot_interval(scmd)) { 3012 case 512: 3013 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3014 break; 3015 case 520: 3016 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3017 break; 3018 case 4080: 3019 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3020 break; 3021 case 4088: 3022 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3023 break; 3024 case 4096: 3025 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3026 break; 3027 case 4104: 3028 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3029 break; 3030 case 4160: 3031 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3032 break; 3033 default: 3034 break; 3035 } 3036 3037 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3038 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3039 } 3040 3041 /** 3042 * mpi3mr_build_sense_buffer - Map sense information 3043 * @desc: Sense type 3044 * @buf: Sense buffer to populate 3045 * @key: Sense key 3046 * @asc: Additional sense code 3047 * @ascq: Additional sense code qualifier 3048 * 3049 * Maps the given sense information into either descriptor or 3050 * fixed format sense data. 3051 * 3052 * Return: Nothing 3053 */ 3054 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3055 u8 asc, u8 ascq) 3056 { 3057 if (desc) { 3058 buf[0] = 0x72; /* descriptor, current */ 3059 buf[1] = key; 3060 buf[2] = asc; 3061 buf[3] = ascq; 3062 buf[7] = 0; 3063 } else { 3064 buf[0] = 0x70; /* fixed, current */ 3065 buf[2] = key; 3066 buf[7] = 0xa; 3067 buf[12] = asc; 3068 buf[13] = ascq; 3069 } 3070 } 3071 3072 /** 3073 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3074 * @scmd: SCSI command reference 3075 * @ioc_status: status of MPI3 request 3076 * 3077 * Maps the EEDP error status of the SCSI IO request to sense 3078 * data. 3079 * 3080 * Return: Nothing 3081 */ 3082 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3083 u16 ioc_status) 3084 { 3085 u8 ascq = 0; 3086 3087 switch (ioc_status) { 3088 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3089 ascq = 0x01; 3090 break; 3091 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3092 ascq = 0x02; 3093 break; 3094 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3095 ascq = 0x03; 3096 break; 3097 default: 3098 ascq = 0x00; 3099 break; 3100 } 3101 3102 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3103 0x10, ascq); 3104 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3105 } 3106 3107 /** 3108 * mpi3mr_process_op_reply_desc - reply descriptor handler 3109 * @mrioc: Adapter instance reference 3110 * @reply_desc: Operational reply descriptor 3111 * @reply_dma: place holder for reply DMA address 3112 * @qidx: Operational queue index 3113 * 3114 * Process the operational reply descriptor and identifies the 3115 * descriptor type. Based on the descriptor map the MPI3 request 3116 * status to a SCSI command status and calls scsi_done call 3117 * back. 3118 * 3119 * Return: Nothing 3120 */ 3121 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3122 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3123 { 3124 u16 reply_desc_type, host_tag = 0; 3125 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3126 u32 ioc_loginfo = 0; 3127 struct mpi3_status_reply_descriptor *status_desc = NULL; 3128 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3129 struct mpi3_success_reply_descriptor *success_desc = NULL; 3130 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3131 struct scsi_cmnd *scmd = NULL; 3132 struct scmd_priv *priv = NULL; 3133 u8 *sense_buf = NULL; 3134 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3135 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3136 u16 dev_handle = 0xFFFF; 3137 struct scsi_sense_hdr sshdr; 3138 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3139 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3140 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3141 struct mpi3mr_throttle_group_info *tg = NULL; 3142 u8 throttle_enabled_dev = 0; 3143 3144 *reply_dma = 0; 3145 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3146 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3147 switch (reply_desc_type) { 3148 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3149 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3150 host_tag = le16_to_cpu(status_desc->host_tag); 3151 ioc_status = le16_to_cpu(status_desc->ioc_status); 3152 if (ioc_status & 3153 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3154 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3155 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3156 break; 3157 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3158 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3159 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3160 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3161 *reply_dma); 3162 if (!scsi_reply) { 3163 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3164 mrioc->name); 3165 goto out; 3166 } 3167 host_tag = le16_to_cpu(scsi_reply->host_tag); 3168 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3169 scsi_status = scsi_reply->scsi_status; 3170 scsi_state = scsi_reply->scsi_state; 3171 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3172 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3173 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3174 sense_count = le32_to_cpu(scsi_reply->sense_count); 3175 resp_data = le32_to_cpu(scsi_reply->response_data); 3176 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3177 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3178 if (ioc_status & 3179 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3180 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3181 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3182 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3183 panic("%s: Ran out of sense buffers\n", mrioc->name); 3184 break; 3185 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3186 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3187 host_tag = le16_to_cpu(success_desc->host_tag); 3188 break; 3189 default: 3190 break; 3191 } 3192 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3193 if (!scmd) { 3194 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3195 mrioc->name, host_tag); 3196 goto out; 3197 } 3198 priv = scsi_cmd_priv(scmd); 3199 3200 data_len_blks = scsi_bufflen(scmd) >> 9; 3201 sdev_priv_data = scmd->device->hostdata; 3202 if (sdev_priv_data) { 3203 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3204 if (stgt_priv_data) { 3205 tg = stgt_priv_data->throttle_group; 3206 throttle_enabled_dev = 3207 stgt_priv_data->io_throttle_enabled; 3208 dev_handle = stgt_priv_data->dev_handle; 3209 } 3210 } 3211 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3212 throttle_enabled_dev)) { 3213 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3214 &mrioc->pend_large_data_sz); 3215 if (tg) { 3216 tg_pend_data_len = atomic_sub_return(data_len_blks, 3217 &tg->pend_large_data_sz); 3218 if (tg->io_divert && ((ioc_pend_data_len <= 3219 mrioc->io_throttle_low) && 3220 (tg_pend_data_len <= tg->low))) { 3221 tg->io_divert = 0; 3222 mpi3mr_set_io_divert_for_all_vd_in_tg( 3223 mrioc, tg, 0); 3224 } 3225 } else { 3226 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3227 stgt_priv_data->io_divert = 0; 3228 } 3229 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3230 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3231 if (!tg) { 3232 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3233 stgt_priv_data->io_divert = 0; 3234 3235 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3236 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3237 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3238 tg->io_divert = 0; 3239 mpi3mr_set_io_divert_for_all_vd_in_tg( 3240 mrioc, tg, 0); 3241 } 3242 } 3243 } 3244 3245 if (success_desc) { 3246 scmd->result = DID_OK << 16; 3247 goto out_success; 3248 } 3249 3250 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3251 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3252 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3253 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3254 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3255 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3256 3257 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3258 sense_buf) { 3259 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3260 3261 memcpy(scmd->sense_buffer, sense_buf, sz); 3262 } 3263 3264 switch (ioc_status) { 3265 case MPI3_IOCSTATUS_BUSY: 3266 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3267 scmd->result = SAM_STAT_BUSY; 3268 break; 3269 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3270 scmd->result = DID_NO_CONNECT << 16; 3271 break; 3272 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3273 scmd->result = DID_SOFT_ERROR << 16; 3274 break; 3275 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3276 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3277 scmd->result = DID_RESET << 16; 3278 break; 3279 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3280 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3281 scmd->result = DID_SOFT_ERROR << 16; 3282 else 3283 scmd->result = (DID_OK << 16) | scsi_status; 3284 break; 3285 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3286 scmd->result = (DID_OK << 16) | scsi_status; 3287 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3288 break; 3289 if (xfer_count < scmd->underflow) { 3290 if (scsi_status == SAM_STAT_BUSY) 3291 scmd->result = SAM_STAT_BUSY; 3292 else 3293 scmd->result = DID_SOFT_ERROR << 16; 3294 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3295 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3296 scmd->result = DID_SOFT_ERROR << 16; 3297 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3298 scmd->result = DID_RESET << 16; 3299 break; 3300 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3301 scsi_set_resid(scmd, 0); 3302 fallthrough; 3303 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3304 case MPI3_IOCSTATUS_SUCCESS: 3305 scmd->result = (DID_OK << 16) | scsi_status; 3306 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3307 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3308 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3309 scmd->result = DID_SOFT_ERROR << 16; 3310 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3311 scmd->result = DID_RESET << 16; 3312 break; 3313 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3314 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3315 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3316 mpi3mr_map_eedp_error(scmd, ioc_status); 3317 break; 3318 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3319 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3320 case MPI3_IOCSTATUS_INVALID_SGL: 3321 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3322 case MPI3_IOCSTATUS_INVALID_FIELD: 3323 case MPI3_IOCSTATUS_INVALID_STATE: 3324 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3325 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3326 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3327 default: 3328 scmd->result = DID_SOFT_ERROR << 16; 3329 break; 3330 } 3331 3332 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3333 (scmd->cmnd[0] != ATA_16) && 3334 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3335 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3336 scmd->result); 3337 scsi_print_command(scmd); 3338 ioc_info(mrioc, 3339 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3340 __func__, dev_handle, ioc_status, ioc_loginfo, 3341 priv->req_q_idx + 1); 3342 ioc_info(mrioc, 3343 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3344 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3345 if (sense_buf) { 3346 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3347 ioc_info(mrioc, 3348 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3349 __func__, sense_count, sshdr.sense_key, 3350 sshdr.asc, sshdr.ascq); 3351 } 3352 } 3353 out_success: 3354 if (priv->meta_sg_valid) { 3355 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3356 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3357 } 3358 mpi3mr_clear_scmd_priv(mrioc, scmd); 3359 scsi_dma_unmap(scmd); 3360 scsi_done(scmd); 3361 out: 3362 if (sense_buf) 3363 mpi3mr_repost_sense_buf(mrioc, 3364 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3365 } 3366 3367 /** 3368 * mpi3mr_get_chain_idx - get free chain buffer index 3369 * @mrioc: Adapter instance reference 3370 * 3371 * Try to get a free chain buffer index from the free pool. 3372 * 3373 * Return: -1 on failure or the free chain buffer index 3374 */ 3375 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3376 { 3377 u8 retry_count = 5; 3378 int cmd_idx = -1; 3379 unsigned long flags; 3380 3381 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3382 do { 3383 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3384 mrioc->chain_buf_count); 3385 if (cmd_idx < mrioc->chain_buf_count) { 3386 set_bit(cmd_idx, mrioc->chain_bitmap); 3387 break; 3388 } 3389 cmd_idx = -1; 3390 } while (retry_count--); 3391 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3392 return cmd_idx; 3393 } 3394 3395 /** 3396 * mpi3mr_prepare_sg_scmd - build scatter gather list 3397 * @mrioc: Adapter instance reference 3398 * @scmd: SCSI command reference 3399 * @scsiio_req: MPI3 SCSI IO request 3400 * 3401 * This function maps SCSI command's data and protection SGEs to 3402 * MPI request SGEs. If required additional 4K chain buffer is 3403 * used to send the SGEs. 3404 * 3405 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3406 */ 3407 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3408 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3409 { 3410 dma_addr_t chain_dma; 3411 struct scatterlist *sg_scmd; 3412 void *sg_local, *chain; 3413 u32 chain_length; 3414 int sges_left, chain_idx; 3415 u32 sges_in_segment; 3416 u8 simple_sgl_flags; 3417 u8 simple_sgl_flags_last; 3418 u8 last_chain_sgl_flags; 3419 struct chain_element *chain_req; 3420 struct scmd_priv *priv = NULL; 3421 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3422 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3423 3424 priv = scsi_cmd_priv(scmd); 3425 3426 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3427 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3428 simple_sgl_flags_last = simple_sgl_flags | 3429 MPI3_SGE_FLAGS_END_OF_LIST; 3430 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3431 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3432 3433 if (meta_sg) 3434 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3435 else 3436 sg_local = &scsiio_req->sgl; 3437 3438 if (!scsiio_req->data_length && !meta_sg) { 3439 mpi3mr_build_zero_len_sge(sg_local); 3440 return 0; 3441 } 3442 3443 if (meta_sg) { 3444 sg_scmd = scsi_prot_sglist(scmd); 3445 sges_left = dma_map_sg(&mrioc->pdev->dev, 3446 scsi_prot_sglist(scmd), 3447 scsi_prot_sg_count(scmd), 3448 scmd->sc_data_direction); 3449 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3450 } else { 3451 sg_scmd = scsi_sglist(scmd); 3452 sges_left = scsi_dma_map(scmd); 3453 } 3454 3455 if (sges_left < 0) { 3456 sdev_printk(KERN_ERR, scmd->device, 3457 "scsi_dma_map failed: request for %d bytes!\n", 3458 scsi_bufflen(scmd)); 3459 return -ENOMEM; 3460 } 3461 if (sges_left > mrioc->max_sgl_entries) { 3462 sdev_printk(KERN_ERR, scmd->device, 3463 "scsi_dma_map returned unsupported sge count %d!\n", 3464 sges_left); 3465 return -ENOMEM; 3466 } 3467 3468 sges_in_segment = (mrioc->facts.op_req_sz - 3469 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3470 3471 if (scsiio_req->sgl[0].eedp.flags == 3472 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3473 sg_local += sizeof(struct mpi3_sge_common); 3474 sges_in_segment--; 3475 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3476 } 3477 3478 if (scsiio_req->msg_flags == 3479 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3480 sges_in_segment--; 3481 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3482 } 3483 3484 if (meta_sg) 3485 sges_in_segment = 1; 3486 3487 if (sges_left <= sges_in_segment) 3488 goto fill_in_last_segment; 3489 3490 /* fill in main message segment when there is a chain following */ 3491 while (sges_in_segment > 1) { 3492 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3493 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3494 sg_scmd = sg_next(sg_scmd); 3495 sg_local += sizeof(struct mpi3_sge_common); 3496 sges_left--; 3497 sges_in_segment--; 3498 } 3499 3500 chain_idx = mpi3mr_get_chain_idx(mrioc); 3501 if (chain_idx < 0) 3502 return -1; 3503 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3504 if (meta_sg) 3505 priv->meta_chain_idx = chain_idx; 3506 else 3507 priv->chain_idx = chain_idx; 3508 3509 chain = chain_req->addr; 3510 chain_dma = chain_req->dma_addr; 3511 sges_in_segment = sges_left; 3512 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3513 3514 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3515 chain_length, chain_dma); 3516 3517 sg_local = chain; 3518 3519 fill_in_last_segment: 3520 while (sges_left > 0) { 3521 if (sges_left == 1) 3522 mpi3mr_add_sg_single(sg_local, 3523 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3524 sg_dma_address(sg_scmd)); 3525 else 3526 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3527 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3528 sg_scmd = sg_next(sg_scmd); 3529 sg_local += sizeof(struct mpi3_sge_common); 3530 sges_left--; 3531 } 3532 3533 return 0; 3534 } 3535 3536 /** 3537 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3538 * @mrioc: Adapter instance reference 3539 * @scmd: SCSI command reference 3540 * @scsiio_req: MPI3 SCSI IO request 3541 * 3542 * This function calls mpi3mr_prepare_sg_scmd for constructing 3543 * both data SGEs and protection information SGEs in the MPI 3544 * format from the SCSI Command as appropriate . 3545 * 3546 * Return: return value of mpi3mr_prepare_sg_scmd. 3547 */ 3548 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3549 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3550 { 3551 int ret; 3552 3553 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3554 if (ret) 3555 return ret; 3556 3557 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3558 /* There is a valid meta sg */ 3559 scsiio_req->flags |= 3560 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3561 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3562 } 3563 3564 return ret; 3565 } 3566 3567 /** 3568 * mpi3mr_tm_response_name - get TM response as a string 3569 * @resp_code: TM response code 3570 * 3571 * Convert known task management response code as a readable 3572 * string. 3573 * 3574 * Return: response code string. 3575 */ 3576 static const char *mpi3mr_tm_response_name(u8 resp_code) 3577 { 3578 char *desc; 3579 3580 switch (resp_code) { 3581 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3582 desc = "task management request completed"; 3583 break; 3584 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3585 desc = "invalid frame"; 3586 break; 3587 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3588 desc = "task management request not supported"; 3589 break; 3590 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3591 desc = "task management request failed"; 3592 break; 3593 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3594 desc = "task management request succeeded"; 3595 break; 3596 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3597 desc = "invalid LUN"; 3598 break; 3599 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3600 desc = "overlapped tag attempted"; 3601 break; 3602 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3603 desc = "task queued, however not sent to target"; 3604 break; 3605 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3606 desc = "task management request denied by NVMe device"; 3607 break; 3608 default: 3609 desc = "unknown"; 3610 break; 3611 } 3612 3613 return desc; 3614 } 3615 3616 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3617 { 3618 int i; 3619 int num_of_reply_queues = 3620 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3621 3622 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3623 mpi3mr_process_op_reply_q(mrioc, 3624 mrioc->intr_info[i].op_reply_q); 3625 } 3626 3627 /** 3628 * mpi3mr_issue_tm - Issue Task Management request 3629 * @mrioc: Adapter instance reference 3630 * @tm_type: Task Management type 3631 * @handle: Device handle 3632 * @lun: lun ID 3633 * @htag: Host tag of the TM request 3634 * @timeout: TM timeout value 3635 * @drv_cmd: Internal command tracker 3636 * @resp_code: Response code place holder 3637 * @scmd: SCSI command 3638 * 3639 * Issues a Task Management Request to the controller for a 3640 * specified target, lun and command and wait for its completion 3641 * and check TM response. Recover the TM if it timed out by 3642 * issuing controller reset. 3643 * 3644 * Return: 0 on success, non-zero on errors 3645 */ 3646 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3647 u16 handle, uint lun, u16 htag, ulong timeout, 3648 struct mpi3mr_drv_cmd *drv_cmd, 3649 u8 *resp_code, struct scsi_cmnd *scmd) 3650 { 3651 struct mpi3_scsi_task_mgmt_request tm_req; 3652 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3653 int retval = 0; 3654 struct mpi3mr_tgt_dev *tgtdev = NULL; 3655 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3656 struct scmd_priv *cmd_priv = NULL; 3657 struct scsi_device *sdev = NULL; 3658 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3659 3660 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3661 __func__, tm_type, handle); 3662 if (mrioc->unrecoverable) { 3663 retval = -1; 3664 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3665 __func__); 3666 goto out; 3667 } 3668 3669 memset(&tm_req, 0, sizeof(tm_req)); 3670 mutex_lock(&drv_cmd->mutex); 3671 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3672 retval = -1; 3673 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3674 mutex_unlock(&drv_cmd->mutex); 3675 goto out; 3676 } 3677 if (mrioc->reset_in_progress) { 3678 retval = -1; 3679 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3680 mutex_unlock(&drv_cmd->mutex); 3681 goto out; 3682 } 3683 3684 drv_cmd->state = MPI3MR_CMD_PENDING; 3685 drv_cmd->is_waiting = 1; 3686 drv_cmd->callback = NULL; 3687 tm_req.dev_handle = cpu_to_le16(handle); 3688 tm_req.task_type = tm_type; 3689 tm_req.host_tag = cpu_to_le16(htag); 3690 3691 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3692 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3693 3694 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3695 3696 if (scmd) { 3697 sdev = scmd->device; 3698 sdev_priv_data = sdev->hostdata; 3699 scsi_tgt_priv_data = ((sdev_priv_data) ? 3700 sdev_priv_data->tgt_priv_data : NULL); 3701 } else { 3702 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3703 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3704 tgtdev->starget->hostdata; 3705 } 3706 3707 if (scsi_tgt_priv_data) 3708 atomic_inc(&scsi_tgt_priv_data->block_io); 3709 3710 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3711 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3712 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3713 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3714 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3715 } 3716 3717 init_completion(&drv_cmd->done); 3718 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3719 if (retval) { 3720 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3721 goto out_unlock; 3722 } 3723 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3724 3725 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3726 drv_cmd->is_waiting = 0; 3727 retval = -1; 3728 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3729 dprint_tm(mrioc, 3730 "task management request timed out after %ld seconds\n", 3731 timeout); 3732 if (mrioc->logging_level & MPI3_DEBUG_TM) 3733 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3734 mpi3mr_soft_reset_handler(mrioc, 3735 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3736 } 3737 goto out_unlock; 3738 } 3739 3740 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3741 dprint_tm(mrioc, "invalid task management reply message\n"); 3742 retval = -1; 3743 goto out_unlock; 3744 } 3745 3746 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3747 3748 switch (drv_cmd->ioc_status) { 3749 case MPI3_IOCSTATUS_SUCCESS: 3750 *resp_code = le32_to_cpu(tm_reply->response_data) & 3751 MPI3MR_RI_MASK_RESPCODE; 3752 break; 3753 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3754 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3755 break; 3756 default: 3757 dprint_tm(mrioc, 3758 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3759 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3760 retval = -1; 3761 goto out_unlock; 3762 } 3763 3764 switch (*resp_code) { 3765 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3766 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3767 break; 3768 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3769 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3770 retval = -1; 3771 break; 3772 default: 3773 retval = -1; 3774 break; 3775 } 3776 3777 dprint_tm(mrioc, 3778 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3779 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3780 le32_to_cpu(tm_reply->termination_count), 3781 mpi3mr_tm_response_name(*resp_code), *resp_code); 3782 3783 if (!retval) { 3784 mpi3mr_ioc_disable_intr(mrioc); 3785 mpi3mr_poll_pend_io_completions(mrioc); 3786 mpi3mr_ioc_enable_intr(mrioc); 3787 mpi3mr_poll_pend_io_completions(mrioc); 3788 mpi3mr_process_admin_reply_q(mrioc); 3789 } 3790 switch (tm_type) { 3791 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3792 if (!scsi_tgt_priv_data) 3793 break; 3794 scsi_tgt_priv_data->pend_count = 0; 3795 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3796 mpi3mr_count_tgt_pending, 3797 (void *)scsi_tgt_priv_data->starget); 3798 break; 3799 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3800 if (!sdev_priv_data) 3801 break; 3802 sdev_priv_data->pend_count = 0; 3803 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3804 mpi3mr_count_dev_pending, (void *)sdev); 3805 break; 3806 default: 3807 break; 3808 } 3809 3810 out_unlock: 3811 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3812 mutex_unlock(&drv_cmd->mutex); 3813 if (scsi_tgt_priv_data) 3814 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3815 if (tgtdev) 3816 mpi3mr_tgtdev_put(tgtdev); 3817 out: 3818 return retval; 3819 } 3820 3821 /** 3822 * mpi3mr_bios_param - BIOS param callback 3823 * @sdev: SCSI device reference 3824 * @bdev: Block device reference 3825 * @capacity: Capacity in logical sectors 3826 * @params: Parameter array 3827 * 3828 * Just the parameters with heads/secots/cylinders. 3829 * 3830 * Return: 0 always 3831 */ 3832 static int mpi3mr_bios_param(struct scsi_device *sdev, 3833 struct block_device *bdev, sector_t capacity, int params[]) 3834 { 3835 int heads; 3836 int sectors; 3837 sector_t cylinders; 3838 ulong dummy; 3839 3840 heads = 64; 3841 sectors = 32; 3842 3843 dummy = heads * sectors; 3844 cylinders = capacity; 3845 sector_div(cylinders, dummy); 3846 3847 if ((ulong)capacity >= 0x200000) { 3848 heads = 255; 3849 sectors = 63; 3850 dummy = heads * sectors; 3851 cylinders = capacity; 3852 sector_div(cylinders, dummy); 3853 } 3854 3855 params[0] = heads; 3856 params[1] = sectors; 3857 params[2] = cylinders; 3858 return 0; 3859 } 3860 3861 /** 3862 * mpi3mr_map_queues - Map queues callback handler 3863 * @shost: SCSI host reference 3864 * 3865 * Maps default and poll queues. 3866 * 3867 * Return: return zero. 3868 */ 3869 static void mpi3mr_map_queues(struct Scsi_Host *shost) 3870 { 3871 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3872 int i, qoff, offset; 3873 struct blk_mq_queue_map *map = NULL; 3874 3875 offset = mrioc->op_reply_q_offset; 3876 3877 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3878 map = &shost->tag_set.map[i]; 3879 3880 map->nr_queues = 0; 3881 3882 if (i == HCTX_TYPE_DEFAULT) 3883 map->nr_queues = mrioc->default_qcount; 3884 else if (i == HCTX_TYPE_POLL) 3885 map->nr_queues = mrioc->active_poll_qcount; 3886 3887 if (!map->nr_queues) { 3888 BUG_ON(i == HCTX_TYPE_DEFAULT); 3889 continue; 3890 } 3891 3892 /* 3893 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3894 * affinity), so use the regular blk-mq cpu mapping 3895 */ 3896 map->queue_offset = qoff; 3897 if (i != HCTX_TYPE_POLL) 3898 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3899 else 3900 blk_mq_map_queues(map); 3901 3902 qoff += map->nr_queues; 3903 offset += map->nr_queues; 3904 } 3905 } 3906 3907 /** 3908 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3909 * @mrioc: Adapter instance reference 3910 * 3911 * Calculate the pending I/Os for the controller and return. 3912 * 3913 * Return: Number of pending I/Os 3914 */ 3915 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3916 { 3917 u16 i; 3918 uint pend_ios = 0; 3919 3920 for (i = 0; i < mrioc->num_op_reply_q; i++) 3921 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3922 return pend_ios; 3923 } 3924 3925 /** 3926 * mpi3mr_print_pending_host_io - print pending I/Os 3927 * @mrioc: Adapter instance reference 3928 * 3929 * Print number of pending I/Os and each I/O details prior to 3930 * reset for debug purpose. 3931 * 3932 * Return: Nothing 3933 */ 3934 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3935 { 3936 struct Scsi_Host *shost = mrioc->shost; 3937 3938 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3939 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3940 blk_mq_tagset_busy_iter(&shost->tag_set, 3941 mpi3mr_print_scmd, (void *)mrioc); 3942 } 3943 3944 /** 3945 * mpi3mr_wait_for_host_io - block for I/Os to complete 3946 * @mrioc: Adapter instance reference 3947 * @timeout: time out in seconds 3948 * Waits for pending I/Os for the given adapter to complete or 3949 * to hit the timeout. 3950 * 3951 * Return: Nothing 3952 */ 3953 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3954 { 3955 enum mpi3mr_iocstate iocstate; 3956 int i = 0; 3957 3958 iocstate = mpi3mr_get_iocstate(mrioc); 3959 if (iocstate != MRIOC_STATE_READY) 3960 return; 3961 3962 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3963 return; 3964 ioc_info(mrioc, 3965 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3966 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3967 3968 for (i = 0; i < timeout; i++) { 3969 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3970 break; 3971 iocstate = mpi3mr_get_iocstate(mrioc); 3972 if (iocstate != MRIOC_STATE_READY) 3973 break; 3974 msleep(1000); 3975 } 3976 3977 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3978 mpi3mr_get_fw_pending_ios(mrioc)); 3979 } 3980 3981 /** 3982 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 3983 * @mrioc: Adapter instance reference 3984 * @scmd: SCSI command reference 3985 * @scsiio_req: MPI3 SCSI IO request 3986 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 3987 * @wslen: write same max length 3988 * 3989 * Gets values of unmap, ndob and number of blocks from write 3990 * same scsi io and based on these values it sets divert IO flag 3991 * and reason for diverting IO to firmware. 3992 * 3993 * Return: Nothing 3994 */ 3995 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 3996 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 3997 u32 *scsiio_flags, u16 wslen) 3998 { 3999 u8 unmap = 0, ndob = 0; 4000 u8 opcode = scmd->cmnd[0]; 4001 u32 num_blocks = 0; 4002 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4003 4004 if (opcode == WRITE_SAME_16) { 4005 unmap = scmd->cmnd[1] & 0x08; 4006 ndob = scmd->cmnd[1] & 0x01; 4007 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4008 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4009 unmap = scmd->cmnd[10] & 0x08; 4010 ndob = scmd->cmnd[10] & 0x01; 4011 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4012 } else 4013 return; 4014 4015 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4016 scsiio_req->msg_flags |= 4017 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4018 *scsiio_flags |= 4019 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4020 } 4021 } 4022 4023 /** 4024 * mpi3mr_eh_host_reset - Host reset error handling callback 4025 * @scmd: SCSI command reference 4026 * 4027 * Issue controller reset 4028 * 4029 * Return: SUCCESS of successful reset else FAILED 4030 */ 4031 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4032 { 4033 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4034 int retval = FAILED, ret; 4035 4036 ret = mpi3mr_soft_reset_handler(mrioc, 4037 MPI3MR_RESET_FROM_EH_HOS, 1); 4038 if (ret) 4039 goto out; 4040 4041 retval = SUCCESS; 4042 out: 4043 sdev_printk(KERN_INFO, scmd->device, 4044 "Host reset is %s for scmd(%p)\n", 4045 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4046 4047 return retval; 4048 } 4049 4050 /** 4051 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4052 * @scmd: SCSI command reference 4053 * 4054 * Checks whether pending I/Os are present for the RAID volume; 4055 * if not there's no need to reset the adapter. 4056 * 4057 * Return: SUCCESS of successful reset else FAILED 4058 */ 4059 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4060 { 4061 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4062 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4063 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4064 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4065 int retval = FAILED; 4066 4067 sdev_priv_data = scmd->device->hostdata; 4068 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4069 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4070 dev_type = stgt_priv_data->dev_type; 4071 } 4072 4073 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4074 mpi3mr_wait_for_host_io(mrioc, 4075 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4076 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4077 retval = SUCCESS; 4078 } 4079 if (retval == FAILED) 4080 mpi3mr_print_pending_host_io(mrioc); 4081 4082 sdev_printk(KERN_INFO, scmd->device, 4083 "Bus reset is %s for scmd(%p)\n", 4084 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4085 return retval; 4086 } 4087 4088 /** 4089 * mpi3mr_eh_target_reset - Target reset error handling callback 4090 * @scmd: SCSI command reference 4091 * 4092 * Issue Target reset Task Management and verify the scmd is 4093 * terminated successfully and return status accordingly. 4094 * 4095 * Return: SUCCESS of successful termination of the scmd else 4096 * FAILED 4097 */ 4098 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4099 { 4100 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4101 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4102 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4103 u16 dev_handle; 4104 u8 resp_code = 0; 4105 int retval = FAILED, ret = 0; 4106 4107 sdev_printk(KERN_INFO, scmd->device, 4108 "Attempting Target Reset! scmd(%p)\n", scmd); 4109 scsi_print_command(scmd); 4110 4111 sdev_priv_data = scmd->device->hostdata; 4112 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4113 sdev_printk(KERN_INFO, scmd->device, 4114 "SCSI device is not available\n"); 4115 retval = SUCCESS; 4116 goto out; 4117 } 4118 4119 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4120 dev_handle = stgt_priv_data->dev_handle; 4121 if (stgt_priv_data->dev_removed) { 4122 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4123 sdev_printk(KERN_INFO, scmd->device, 4124 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4125 mrioc->name, dev_handle); 4126 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4127 retval = SUCCESS; 4128 else 4129 retval = FAILED; 4130 goto out; 4131 } 4132 sdev_printk(KERN_INFO, scmd->device, 4133 "Target Reset is issued to handle(0x%04x)\n", 4134 dev_handle); 4135 4136 ret = mpi3mr_issue_tm(mrioc, 4137 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4138 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4139 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4140 4141 if (ret) 4142 goto out; 4143 4144 if (stgt_priv_data->pend_count) { 4145 sdev_printk(KERN_INFO, scmd->device, 4146 "%s: target has %d pending commands, target reset is failed\n", 4147 mrioc->name, stgt_priv_data->pend_count); 4148 goto out; 4149 } 4150 4151 retval = SUCCESS; 4152 out: 4153 sdev_printk(KERN_INFO, scmd->device, 4154 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4155 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4156 4157 return retval; 4158 } 4159 4160 /** 4161 * mpi3mr_eh_dev_reset- Device reset error handling callback 4162 * @scmd: SCSI command reference 4163 * 4164 * Issue lun reset Task Management and verify the scmd is 4165 * terminated successfully and return status accordingly. 4166 * 4167 * Return: SUCCESS of successful termination of the scmd else 4168 * FAILED 4169 */ 4170 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4171 { 4172 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4173 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4174 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4175 u16 dev_handle; 4176 u8 resp_code = 0; 4177 int retval = FAILED, ret = 0; 4178 4179 sdev_printk(KERN_INFO, scmd->device, 4180 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4181 scsi_print_command(scmd); 4182 4183 sdev_priv_data = scmd->device->hostdata; 4184 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4185 sdev_printk(KERN_INFO, scmd->device, 4186 "SCSI device is not available\n"); 4187 retval = SUCCESS; 4188 goto out; 4189 } 4190 4191 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4192 dev_handle = stgt_priv_data->dev_handle; 4193 if (stgt_priv_data->dev_removed) { 4194 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4195 sdev_printk(KERN_INFO, scmd->device, 4196 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4197 mrioc->name, dev_handle); 4198 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4199 retval = SUCCESS; 4200 else 4201 retval = FAILED; 4202 goto out; 4203 } 4204 sdev_printk(KERN_INFO, scmd->device, 4205 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4206 4207 ret = mpi3mr_issue_tm(mrioc, 4208 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4209 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4210 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4211 4212 if (ret) 4213 goto out; 4214 4215 if (sdev_priv_data->pend_count) { 4216 sdev_printk(KERN_INFO, scmd->device, 4217 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4218 mrioc->name, sdev_priv_data->pend_count); 4219 goto out; 4220 } 4221 retval = SUCCESS; 4222 out: 4223 sdev_printk(KERN_INFO, scmd->device, 4224 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4225 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4226 4227 return retval; 4228 } 4229 4230 /** 4231 * mpi3mr_scan_start - Scan start callback handler 4232 * @shost: SCSI host reference 4233 * 4234 * Issue port enable request asynchronously. 4235 * 4236 * Return: Nothing 4237 */ 4238 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4239 { 4240 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4241 4242 mrioc->scan_started = 1; 4243 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4244 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4245 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4246 mrioc->scan_started = 0; 4247 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4248 } 4249 } 4250 4251 /** 4252 * mpi3mr_scan_finished - Scan finished callback handler 4253 * @shost: SCSI host reference 4254 * @time: Jiffies from the scan start 4255 * 4256 * Checks whether the port enable is completed or timedout or 4257 * failed and set the scan status accordingly after taking any 4258 * recovery if required. 4259 * 4260 * Return: 1 on scan finished or timed out, 0 for in progress 4261 */ 4262 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4263 unsigned long time) 4264 { 4265 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4266 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4267 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4268 4269 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4270 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4271 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4272 mpi3mr_print_fault_info(mrioc); 4273 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4274 mrioc->scan_started = 0; 4275 mrioc->init_cmds.is_waiting = 0; 4276 mrioc->init_cmds.callback = NULL; 4277 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4278 } 4279 4280 if (time >= (pe_timeout * HZ)) { 4281 ioc_err(mrioc, "port enable failed due to time out\n"); 4282 mpi3mr_check_rh_fault_ioc(mrioc, 4283 MPI3MR_RESET_FROM_PE_TIMEOUT); 4284 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4285 mrioc->scan_started = 0; 4286 mrioc->init_cmds.is_waiting = 0; 4287 mrioc->init_cmds.callback = NULL; 4288 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4289 } 4290 4291 if (mrioc->scan_started) 4292 return 0; 4293 4294 if (mrioc->scan_failed) { 4295 ioc_err(mrioc, 4296 "port enable failed with status=0x%04x\n", 4297 mrioc->scan_failed); 4298 } else 4299 ioc_info(mrioc, "port enable is successfully completed\n"); 4300 4301 mpi3mr_start_watchdog(mrioc); 4302 mrioc->is_driver_loading = 0; 4303 mrioc->stop_bsgs = 0; 4304 return 1; 4305 } 4306 4307 /** 4308 * mpi3mr_slave_destroy - Slave destroy callback handler 4309 * @sdev: SCSI device reference 4310 * 4311 * Cleanup and free per device(lun) private data. 4312 * 4313 * Return: Nothing. 4314 */ 4315 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 4316 { 4317 struct Scsi_Host *shost; 4318 struct mpi3mr_ioc *mrioc; 4319 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4320 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4321 unsigned long flags; 4322 struct scsi_target *starget; 4323 struct sas_rphy *rphy = NULL; 4324 4325 if (!sdev->hostdata) 4326 return; 4327 4328 starget = scsi_target(sdev); 4329 shost = dev_to_shost(&starget->dev); 4330 mrioc = shost_priv(shost); 4331 scsi_tgt_priv_data = starget->hostdata; 4332 4333 scsi_tgt_priv_data->num_luns--; 4334 4335 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4336 if (starget->channel == mrioc->scsi_device_channel) 4337 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4338 else if (mrioc->sas_transport_enabled && !starget->channel) { 4339 rphy = dev_to_rphy(starget->dev.parent); 4340 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4341 rphy->identify.sas_address, rphy); 4342 } 4343 4344 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4345 tgt_dev->starget = NULL; 4346 if (tgt_dev) 4347 mpi3mr_tgtdev_put(tgt_dev); 4348 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4349 4350 kfree(sdev->hostdata); 4351 sdev->hostdata = NULL; 4352 } 4353 4354 /** 4355 * mpi3mr_target_destroy - Target destroy callback handler 4356 * @starget: SCSI target reference 4357 * 4358 * Cleanup and free per target private data. 4359 * 4360 * Return: Nothing. 4361 */ 4362 static void mpi3mr_target_destroy(struct scsi_target *starget) 4363 { 4364 struct Scsi_Host *shost; 4365 struct mpi3mr_ioc *mrioc; 4366 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4367 struct mpi3mr_tgt_dev *tgt_dev; 4368 unsigned long flags; 4369 4370 if (!starget->hostdata) 4371 return; 4372 4373 shost = dev_to_shost(&starget->dev); 4374 mrioc = shost_priv(shost); 4375 scsi_tgt_priv_data = starget->hostdata; 4376 4377 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4378 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4379 if (tgt_dev && (tgt_dev->starget == starget) && 4380 (tgt_dev->perst_id == starget->id)) 4381 tgt_dev->starget = NULL; 4382 if (tgt_dev) { 4383 scsi_tgt_priv_data->tgt_dev = NULL; 4384 scsi_tgt_priv_data->perst_id = 0; 4385 mpi3mr_tgtdev_put(tgt_dev); 4386 mpi3mr_tgtdev_put(tgt_dev); 4387 } 4388 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4389 4390 kfree(starget->hostdata); 4391 starget->hostdata = NULL; 4392 } 4393 4394 /** 4395 * mpi3mr_slave_configure - Slave configure callback handler 4396 * @sdev: SCSI device reference 4397 * 4398 * Configure queue depth, max hardware sectors and virt boundary 4399 * as required 4400 * 4401 * Return: 0 always. 4402 */ 4403 static int mpi3mr_slave_configure(struct scsi_device *sdev) 4404 { 4405 struct scsi_target *starget; 4406 struct Scsi_Host *shost; 4407 struct mpi3mr_ioc *mrioc; 4408 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4409 unsigned long flags; 4410 int retval = 0; 4411 struct sas_rphy *rphy = NULL; 4412 4413 starget = scsi_target(sdev); 4414 shost = dev_to_shost(&starget->dev); 4415 mrioc = shost_priv(shost); 4416 4417 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4418 if (starget->channel == mrioc->scsi_device_channel) 4419 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4420 else if (mrioc->sas_transport_enabled && !starget->channel) { 4421 rphy = dev_to_rphy(starget->dev.parent); 4422 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4423 rphy->identify.sas_address, rphy); 4424 } 4425 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4426 if (!tgt_dev) 4427 return -ENXIO; 4428 4429 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4430 4431 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4432 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4433 4434 switch (tgt_dev->dev_type) { 4435 case MPI3_DEVICE_DEVFORM_PCIE: 4436 /*The block layer hw sector size = 512*/ 4437 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 4438 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4439 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 4440 blk_queue_max_hw_sectors(sdev->request_queue, 4441 tgt_dev->dev_spec.pcie_inf.mdts / 512); 4442 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 4443 blk_queue_virt_boundary(sdev->request_queue, 4444 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 4445 else 4446 blk_queue_virt_boundary(sdev->request_queue, 4447 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 4448 } 4449 break; 4450 default: 4451 break; 4452 } 4453 4454 mpi3mr_tgtdev_put(tgt_dev); 4455 4456 return retval; 4457 } 4458 4459 /** 4460 * mpi3mr_slave_alloc -Slave alloc callback handler 4461 * @sdev: SCSI device reference 4462 * 4463 * Allocate per device(lun) private data and initialize it. 4464 * 4465 * Return: 0 on success -ENOMEM on memory allocation failure. 4466 */ 4467 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 4468 { 4469 struct Scsi_Host *shost; 4470 struct mpi3mr_ioc *mrioc; 4471 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4472 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4473 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4474 unsigned long flags; 4475 struct scsi_target *starget; 4476 int retval = 0; 4477 struct sas_rphy *rphy = NULL; 4478 4479 starget = scsi_target(sdev); 4480 shost = dev_to_shost(&starget->dev); 4481 mrioc = shost_priv(shost); 4482 scsi_tgt_priv_data = starget->hostdata; 4483 4484 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4485 4486 if (starget->channel == mrioc->scsi_device_channel) 4487 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4488 else if (mrioc->sas_transport_enabled && !starget->channel) { 4489 rphy = dev_to_rphy(starget->dev.parent); 4490 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4491 rphy->identify.sas_address, rphy); 4492 } 4493 4494 if (tgt_dev) { 4495 if (tgt_dev->starget == NULL) 4496 tgt_dev->starget = starget; 4497 mpi3mr_tgtdev_put(tgt_dev); 4498 retval = 0; 4499 } else { 4500 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4501 return -ENXIO; 4502 } 4503 4504 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4505 4506 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4507 if (!scsi_dev_priv_data) 4508 return -ENOMEM; 4509 4510 scsi_dev_priv_data->lun_id = sdev->lun; 4511 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4512 sdev->hostdata = scsi_dev_priv_data; 4513 4514 scsi_tgt_priv_data->num_luns++; 4515 4516 return retval; 4517 } 4518 4519 /** 4520 * mpi3mr_target_alloc - Target alloc callback handler 4521 * @starget: SCSI target reference 4522 * 4523 * Allocate per target private data and initialize it. 4524 * 4525 * Return: 0 on success -ENOMEM on memory allocation failure. 4526 */ 4527 static int mpi3mr_target_alloc(struct scsi_target *starget) 4528 { 4529 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4530 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4531 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4532 struct mpi3mr_tgt_dev *tgt_dev; 4533 unsigned long flags; 4534 int retval = 0; 4535 struct sas_rphy *rphy = NULL; 4536 4537 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4538 if (!scsi_tgt_priv_data) 4539 return -ENOMEM; 4540 4541 starget->hostdata = scsi_tgt_priv_data; 4542 4543 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4544 if (starget->channel == mrioc->scsi_device_channel) { 4545 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4546 if (tgt_dev && !tgt_dev->is_hidden) { 4547 scsi_tgt_priv_data->starget = starget; 4548 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4549 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4550 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4551 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4552 tgt_dev->starget = starget; 4553 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4554 retval = 0; 4555 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4556 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4557 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4558 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4559 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4560 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4561 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4562 scsi_tgt_priv_data->dev_nvme_dif = 1; 4563 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4564 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4565 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4566 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4567 } else 4568 retval = -ENXIO; 4569 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4570 rphy = dev_to_rphy(starget->dev.parent); 4571 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4572 rphy->identify.sas_address, rphy); 4573 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4574 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4575 scsi_tgt_priv_data->starget = starget; 4576 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4577 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4578 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4579 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4580 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4581 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4582 tgt_dev->starget = starget; 4583 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4584 retval = 0; 4585 } else 4586 retval = -ENXIO; 4587 } 4588 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4589 4590 return retval; 4591 } 4592 4593 /** 4594 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4595 * @mrioc: Adapter instance reference 4596 * @scmd: SCSI Command reference 4597 * 4598 * The controller hardware cannot handle certain unmap commands 4599 * for NVMe drives, this routine checks those and return true 4600 * and completes the SCSI command with proper status and sense 4601 * data. 4602 * 4603 * Return: TRUE for not allowed unmap, FALSE otherwise. 4604 */ 4605 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4606 struct scsi_cmnd *scmd) 4607 { 4608 unsigned char *buf; 4609 u16 param_len, desc_len, trunc_param_len; 4610 4611 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4612 4613 if (mrioc->pdev->revision) { 4614 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4615 trunc_param_len -= (param_len - 8) & 0xF; 4616 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4617 dprint_scsi_err(mrioc, 4618 "truncating param_len from (%d) to (%d)\n", 4619 param_len, trunc_param_len); 4620 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4621 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4622 } 4623 return false; 4624 } 4625 4626 if (!param_len) { 4627 ioc_warn(mrioc, 4628 "%s: cdb received with zero parameter length\n", 4629 __func__); 4630 scsi_print_command(scmd); 4631 scmd->result = DID_OK << 16; 4632 scsi_done(scmd); 4633 return true; 4634 } 4635 4636 if (param_len < 24) { 4637 ioc_warn(mrioc, 4638 "%s: cdb received with invalid param_len: %d\n", 4639 __func__, param_len); 4640 scsi_print_command(scmd); 4641 scmd->result = SAM_STAT_CHECK_CONDITION; 4642 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4643 0x1A, 0); 4644 scsi_done(scmd); 4645 return true; 4646 } 4647 if (param_len != scsi_bufflen(scmd)) { 4648 ioc_warn(mrioc, 4649 "%s: cdb received with param_len: %d bufflen: %d\n", 4650 __func__, param_len, scsi_bufflen(scmd)); 4651 scsi_print_command(scmd); 4652 scmd->result = SAM_STAT_CHECK_CONDITION; 4653 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4654 0x1A, 0); 4655 scsi_done(scmd); 4656 return true; 4657 } 4658 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4659 if (!buf) { 4660 scsi_print_command(scmd); 4661 scmd->result = SAM_STAT_CHECK_CONDITION; 4662 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4663 0x55, 0x03); 4664 scsi_done(scmd); 4665 return true; 4666 } 4667 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4668 desc_len = get_unaligned_be16(&buf[2]); 4669 4670 if (desc_len < 16) { 4671 ioc_warn(mrioc, 4672 "%s: Invalid descriptor length in param list: %d\n", 4673 __func__, desc_len); 4674 scsi_print_command(scmd); 4675 scmd->result = SAM_STAT_CHECK_CONDITION; 4676 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4677 0x26, 0); 4678 scsi_done(scmd); 4679 kfree(buf); 4680 return true; 4681 } 4682 4683 if (param_len > (desc_len + 8)) { 4684 trunc_param_len = desc_len + 8; 4685 scsi_print_command(scmd); 4686 dprint_scsi_err(mrioc, 4687 "truncating param_len(%d) to desc_len+8(%d)\n", 4688 param_len, trunc_param_len); 4689 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4690 scsi_print_command(scmd); 4691 } 4692 4693 kfree(buf); 4694 return false; 4695 } 4696 4697 /** 4698 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4699 * @scmd: SCSI Command reference 4700 * 4701 * Checks whether a cdb is allowed during shutdown or not. 4702 * 4703 * Return: TRUE for allowed commands, FALSE otherwise. 4704 */ 4705 4706 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4707 { 4708 switch (scmd->cmnd[0]) { 4709 case SYNCHRONIZE_CACHE: 4710 case START_STOP: 4711 return true; 4712 default: 4713 return false; 4714 } 4715 } 4716 4717 /** 4718 * mpi3mr_qcmd - I/O request despatcher 4719 * @shost: SCSI Host reference 4720 * @scmd: SCSI Command reference 4721 * 4722 * Issues the SCSI Command as an MPI3 request. 4723 * 4724 * Return: 0 on successful queueing of the request or if the 4725 * request is completed with failure. 4726 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4727 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4728 */ 4729 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4730 struct scsi_cmnd *scmd) 4731 { 4732 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4733 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4734 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4735 struct scmd_priv *scmd_priv_data = NULL; 4736 struct mpi3_scsi_io_request *scsiio_req = NULL; 4737 struct op_req_qinfo *op_req_q = NULL; 4738 int retval = 0; 4739 u16 dev_handle; 4740 u16 host_tag; 4741 u32 scsiio_flags = 0, data_len_blks = 0; 4742 struct request *rq = scsi_cmd_to_rq(scmd); 4743 int iprio_class; 4744 u8 is_pcie_dev = 0; 4745 u32 tracked_io_sz = 0; 4746 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4747 struct mpi3mr_throttle_group_info *tg = NULL; 4748 4749 if (mrioc->unrecoverable) { 4750 scmd->result = DID_ERROR << 16; 4751 scsi_done(scmd); 4752 goto out; 4753 } 4754 4755 sdev_priv_data = scmd->device->hostdata; 4756 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4757 scmd->result = DID_NO_CONNECT << 16; 4758 scsi_done(scmd); 4759 goto out; 4760 } 4761 4762 if (mrioc->stop_drv_processing && 4763 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4764 scmd->result = DID_NO_CONNECT << 16; 4765 scsi_done(scmd); 4766 goto out; 4767 } 4768 4769 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4770 dev_handle = stgt_priv_data->dev_handle; 4771 4772 /* Avoid error handling escalation when device is removed or blocked */ 4773 4774 if (scmd->device->host->shost_state == SHOST_RECOVERY && 4775 scmd->cmnd[0] == TEST_UNIT_READY && 4776 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 4777 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 4778 scsi_done(scmd); 4779 goto out; 4780 } 4781 4782 if (mrioc->reset_in_progress) { 4783 retval = SCSI_MLQUEUE_HOST_BUSY; 4784 goto out; 4785 } 4786 4787 if (atomic_read(&stgt_priv_data->block_io)) { 4788 if (mrioc->stop_drv_processing) { 4789 scmd->result = DID_NO_CONNECT << 16; 4790 scsi_done(scmd); 4791 goto out; 4792 } 4793 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4794 goto out; 4795 } 4796 4797 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4798 scmd->result = DID_NO_CONNECT << 16; 4799 scsi_done(scmd); 4800 goto out; 4801 } 4802 if (stgt_priv_data->dev_removed) { 4803 scmd->result = DID_NO_CONNECT << 16; 4804 scsi_done(scmd); 4805 goto out; 4806 } 4807 4808 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4809 is_pcie_dev = 1; 4810 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4811 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4812 mpi3mr_check_return_unmap(mrioc, scmd)) 4813 goto out; 4814 4815 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4816 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4817 scmd->result = DID_ERROR << 16; 4818 scsi_done(scmd); 4819 goto out; 4820 } 4821 4822 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4823 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4824 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4825 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4826 else 4827 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4828 4829 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4830 4831 if (sdev_priv_data->ncq_prio_enable) { 4832 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4833 if (iprio_class == IOPRIO_CLASS_RT) 4834 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4835 } 4836 4837 if (scmd->cmd_len > 16) 4838 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4839 4840 scmd_priv_data = scsi_cmd_priv(scmd); 4841 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4842 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4843 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4844 scsiio_req->host_tag = cpu_to_le16(host_tag); 4845 4846 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4847 4848 if (stgt_priv_data->wslen) 4849 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 4850 stgt_priv_data->wslen); 4851 4852 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4853 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4854 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4855 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4856 int_to_scsilun(sdev_priv_data->lun_id, 4857 (struct scsi_lun *)scsiio_req->lun); 4858 4859 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4860 mpi3mr_clear_scmd_priv(mrioc, scmd); 4861 retval = SCSI_MLQUEUE_HOST_BUSY; 4862 goto out; 4863 } 4864 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4865 data_len_blks = scsi_bufflen(scmd) >> 9; 4866 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4867 stgt_priv_data->io_throttle_enabled) { 4868 tracked_io_sz = data_len_blks; 4869 tg = stgt_priv_data->throttle_group; 4870 if (tg) { 4871 ioc_pend_data_len = atomic_add_return(data_len_blks, 4872 &mrioc->pend_large_data_sz); 4873 tg_pend_data_len = atomic_add_return(data_len_blks, 4874 &tg->pend_large_data_sz); 4875 if (!tg->io_divert && ((ioc_pend_data_len >= 4876 mrioc->io_throttle_high) || 4877 (tg_pend_data_len >= tg->high))) { 4878 tg->io_divert = 1; 4879 tg->need_qd_reduction = 1; 4880 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4881 tg, 1); 4882 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4883 } 4884 } else { 4885 ioc_pend_data_len = atomic_add_return(data_len_blks, 4886 &mrioc->pend_large_data_sz); 4887 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4888 stgt_priv_data->io_divert = 1; 4889 } 4890 } 4891 4892 if (stgt_priv_data->io_divert) { 4893 scsiio_req->msg_flags |= 4894 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4895 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4896 } 4897 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4898 4899 if (mpi3mr_op_request_post(mrioc, op_req_q, 4900 scmd_priv_data->mpi3mr_scsiio_req)) { 4901 mpi3mr_clear_scmd_priv(mrioc, scmd); 4902 retval = SCSI_MLQUEUE_HOST_BUSY; 4903 if (tracked_io_sz) { 4904 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4905 if (tg) 4906 atomic_sub(tracked_io_sz, 4907 &tg->pend_large_data_sz); 4908 } 4909 goto out; 4910 } 4911 4912 out: 4913 return retval; 4914 } 4915 4916 static const struct scsi_host_template mpi3mr_driver_template = { 4917 .module = THIS_MODULE, 4918 .name = "MPI3 Storage Controller", 4919 .proc_name = MPI3MR_DRIVER_NAME, 4920 .queuecommand = mpi3mr_qcmd, 4921 .target_alloc = mpi3mr_target_alloc, 4922 .slave_alloc = mpi3mr_slave_alloc, 4923 .slave_configure = mpi3mr_slave_configure, 4924 .target_destroy = mpi3mr_target_destroy, 4925 .slave_destroy = mpi3mr_slave_destroy, 4926 .scan_finished = mpi3mr_scan_finished, 4927 .scan_start = mpi3mr_scan_start, 4928 .change_queue_depth = mpi3mr_change_queue_depth, 4929 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4930 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4931 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 4932 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4933 .bios_param = mpi3mr_bios_param, 4934 .map_queues = mpi3mr_map_queues, 4935 .mq_poll = mpi3mr_blk_mq_poll, 4936 .no_write_same = 1, 4937 .can_queue = 1, 4938 .this_id = -1, 4939 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 4940 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4941 */ 4942 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 4943 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4944 .max_segment_size = 0xffffffff, 4945 .track_queue_depth = 1, 4946 .cmd_size = sizeof(struct scmd_priv), 4947 .shost_groups = mpi3mr_host_groups, 4948 .sdev_groups = mpi3mr_dev_groups, 4949 }; 4950 4951 /** 4952 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4953 * @cmdptr: Internal command tracker 4954 * @host_tag: Host tag used for the specific command 4955 * 4956 * Initialize the internal command tracker structure with 4957 * specified host tag. 4958 * 4959 * Return: Nothing. 4960 */ 4961 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4962 u16 host_tag) 4963 { 4964 mutex_init(&cmdptr->mutex); 4965 cmdptr->reply = NULL; 4966 cmdptr->state = MPI3MR_CMD_NOTUSED; 4967 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4968 cmdptr->host_tag = host_tag; 4969 } 4970 4971 /** 4972 * osintfc_mrioc_security_status -Check controller secure status 4973 * @pdev: PCI device instance 4974 * 4975 * Read the Device Serial Number capability from PCI config 4976 * space and decide whether the controller is secure or not. 4977 * 4978 * Return: 0 on success, non-zero on failure. 4979 */ 4980 static int 4981 osintfc_mrioc_security_status(struct pci_dev *pdev) 4982 { 4983 u32 cap_data; 4984 int base; 4985 u32 ctlr_status; 4986 u32 debug_status; 4987 int retval = 0; 4988 4989 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4990 if (!base) { 4991 dev_err(&pdev->dev, 4992 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4993 return -1; 4994 } 4995 4996 pci_read_config_dword(pdev, base + 4, &cap_data); 4997 4998 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4999 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5000 5001 switch (ctlr_status) { 5002 case MPI3MR_INVALID_DEVICE: 5003 dev_err(&pdev->dev, 5004 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5005 __func__, pdev->device, pdev->subsystem_vendor, 5006 pdev->subsystem_device); 5007 retval = -1; 5008 break; 5009 case MPI3MR_CONFIG_SECURE_DEVICE: 5010 if (!debug_status) 5011 dev_info(&pdev->dev, 5012 "%s: Config secure ctlr is detected\n", 5013 __func__); 5014 break; 5015 case MPI3MR_HARD_SECURE_DEVICE: 5016 break; 5017 case MPI3MR_TAMPERED_DEVICE: 5018 dev_err(&pdev->dev, 5019 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5020 __func__, pdev->device, pdev->subsystem_vendor, 5021 pdev->subsystem_device); 5022 retval = -1; 5023 break; 5024 default: 5025 retval = -1; 5026 break; 5027 } 5028 5029 if (!retval && debug_status) { 5030 dev_err(&pdev->dev, 5031 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5032 __func__, pdev->device, pdev->subsystem_vendor, 5033 pdev->subsystem_device); 5034 retval = -1; 5035 } 5036 5037 return retval; 5038 } 5039 5040 /** 5041 * mpi3mr_probe - PCI probe callback 5042 * @pdev: PCI device instance 5043 * @id: PCI device ID details 5044 * 5045 * controller initialization routine. Checks the security status 5046 * of the controller and if it is invalid or tampered return the 5047 * probe without initializing the controller. Otherwise, 5048 * allocate per adapter instance through shost_priv and 5049 * initialize controller specific data structures, initializae 5050 * the controller hardware, add shost to the SCSI subsystem. 5051 * 5052 * Return: 0 on success, non-zero on failure. 5053 */ 5054 5055 static int 5056 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5057 { 5058 struct mpi3mr_ioc *mrioc = NULL; 5059 struct Scsi_Host *shost = NULL; 5060 int retval = 0, i; 5061 5062 if (osintfc_mrioc_security_status(pdev)) { 5063 warn_non_secure_ctlr = 1; 5064 return 1; /* For Invalid and Tampered device */ 5065 } 5066 5067 shost = scsi_host_alloc(&mpi3mr_driver_template, 5068 sizeof(struct mpi3mr_ioc)); 5069 if (!shost) { 5070 retval = -ENODEV; 5071 goto shost_failed; 5072 } 5073 5074 mrioc = shost_priv(shost); 5075 mrioc->id = mrioc_ids++; 5076 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5077 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5078 INIT_LIST_HEAD(&mrioc->list); 5079 spin_lock(&mrioc_list_lock); 5080 list_add_tail(&mrioc->list, &mrioc_list); 5081 spin_unlock(&mrioc_list_lock); 5082 5083 spin_lock_init(&mrioc->admin_req_lock); 5084 spin_lock_init(&mrioc->reply_free_queue_lock); 5085 spin_lock_init(&mrioc->sbq_lock); 5086 spin_lock_init(&mrioc->fwevt_lock); 5087 spin_lock_init(&mrioc->tgtdev_lock); 5088 spin_lock_init(&mrioc->watchdog_lock); 5089 spin_lock_init(&mrioc->chain_buf_lock); 5090 spin_lock_init(&mrioc->sas_node_lock); 5091 5092 INIT_LIST_HEAD(&mrioc->fwevt_list); 5093 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5094 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5095 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5096 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5097 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5098 INIT_LIST_HEAD(&mrioc->enclosure_list); 5099 5100 mutex_init(&mrioc->reset_mutex); 5101 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5102 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5103 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5104 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5105 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5106 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5107 5108 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5109 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5110 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5111 5112 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5113 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5114 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5115 5116 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5117 !pdev->revision) 5118 mrioc->enable_segqueue = false; 5119 else 5120 mrioc->enable_segqueue = true; 5121 5122 init_waitqueue_head(&mrioc->reset_waitq); 5123 mrioc->logging_level = logging_level; 5124 mrioc->shost = shost; 5125 mrioc->pdev = pdev; 5126 mrioc->stop_bsgs = 1; 5127 5128 mrioc->max_sgl_entries = max_sgl_entries; 5129 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5130 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5131 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5132 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5133 else { 5134 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5135 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5136 } 5137 5138 /* init shost parameters */ 5139 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5140 shost->max_lun = -1; 5141 shost->unique_id = mrioc->id; 5142 5143 shost->max_channel = 0; 5144 shost->max_id = 0xFFFFFFFF; 5145 5146 shost->host_tagset = 1; 5147 5148 if (prot_mask >= 0) 5149 scsi_host_set_prot(shost, prot_mask); 5150 else { 5151 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5152 | SHOST_DIF_TYPE2_PROTECTION 5153 | SHOST_DIF_TYPE3_PROTECTION; 5154 scsi_host_set_prot(shost, prot_mask); 5155 } 5156 5157 ioc_info(mrioc, 5158 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5159 __func__, 5160 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5161 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5162 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5163 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5164 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5165 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5166 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5167 5168 if (prot_guard_mask) 5169 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5170 else 5171 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5172 5173 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 5174 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 5175 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5176 mrioc->fwevt_worker_name, 0); 5177 if (!mrioc->fwevt_worker_thread) { 5178 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5179 __FILE__, __LINE__, __func__); 5180 retval = -ENODEV; 5181 goto fwevtthread_failed; 5182 } 5183 5184 mrioc->is_driver_loading = 1; 5185 mrioc->cpu_count = num_online_cpus(); 5186 if (mpi3mr_setup_resources(mrioc)) { 5187 ioc_err(mrioc, "setup resources failed\n"); 5188 retval = -ENODEV; 5189 goto resource_alloc_failed; 5190 } 5191 if (mpi3mr_init_ioc(mrioc)) { 5192 ioc_err(mrioc, "initializing IOC failed\n"); 5193 retval = -ENODEV; 5194 goto init_ioc_failed; 5195 } 5196 5197 shost->nr_hw_queues = mrioc->num_op_reply_q; 5198 if (mrioc->active_poll_qcount) 5199 shost->nr_maps = 3; 5200 5201 shost->can_queue = mrioc->max_host_ios; 5202 shost->sg_tablesize = mrioc->max_sgl_entries; 5203 shost->max_id = mrioc->facts.max_perids + 1; 5204 5205 retval = scsi_add_host(shost, &pdev->dev); 5206 if (retval) { 5207 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5208 __FILE__, __LINE__, __func__); 5209 goto addhost_failed; 5210 } 5211 5212 scsi_scan_host(shost); 5213 mpi3mr_bsg_init(mrioc); 5214 return retval; 5215 5216 addhost_failed: 5217 mpi3mr_stop_watchdog(mrioc); 5218 mpi3mr_cleanup_ioc(mrioc); 5219 init_ioc_failed: 5220 mpi3mr_free_mem(mrioc); 5221 mpi3mr_cleanup_resources(mrioc); 5222 resource_alloc_failed: 5223 destroy_workqueue(mrioc->fwevt_worker_thread); 5224 fwevtthread_failed: 5225 spin_lock(&mrioc_list_lock); 5226 list_del(&mrioc->list); 5227 spin_unlock(&mrioc_list_lock); 5228 scsi_host_put(shost); 5229 shost_failed: 5230 return retval; 5231 } 5232 5233 /** 5234 * mpi3mr_remove - PCI remove callback 5235 * @pdev: PCI device instance 5236 * 5237 * Cleanup the IOC by issuing MUR and shutdown notification. 5238 * Free up all memory and resources associated with the 5239 * controllerand target devices, unregister the shost. 5240 * 5241 * Return: Nothing. 5242 */ 5243 static void mpi3mr_remove(struct pci_dev *pdev) 5244 { 5245 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5246 struct mpi3mr_ioc *mrioc; 5247 struct workqueue_struct *wq; 5248 unsigned long flags; 5249 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5250 struct mpi3mr_hba_port *port, *hba_port_next; 5251 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5252 5253 if (!shost) 5254 return; 5255 5256 mrioc = shost_priv(shost); 5257 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5258 ssleep(1); 5259 5260 if (!pci_device_is_present(mrioc->pdev)) { 5261 mrioc->unrecoverable = 1; 5262 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5263 } 5264 5265 mpi3mr_bsg_exit(mrioc); 5266 mrioc->stop_drv_processing = 1; 5267 mpi3mr_cleanup_fwevt_list(mrioc); 5268 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5269 wq = mrioc->fwevt_worker_thread; 5270 mrioc->fwevt_worker_thread = NULL; 5271 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5272 if (wq) 5273 destroy_workqueue(wq); 5274 5275 if (mrioc->sas_transport_enabled) 5276 sas_remove_host(shost); 5277 else 5278 scsi_remove_host(shost); 5279 5280 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5281 list) { 5282 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5283 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5284 mpi3mr_tgtdev_put(tgtdev); 5285 } 5286 mpi3mr_stop_watchdog(mrioc); 5287 mpi3mr_cleanup_ioc(mrioc); 5288 mpi3mr_free_mem(mrioc); 5289 mpi3mr_cleanup_resources(mrioc); 5290 5291 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5292 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5293 &mrioc->sas_expander_list, list) { 5294 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5295 mpi3mr_expander_node_remove(mrioc, sas_expander); 5296 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5297 } 5298 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5299 ioc_info(mrioc, 5300 "removing hba_port entry: %p port: %d from hba_port list\n", 5301 port, port->port_id); 5302 list_del(&port->list); 5303 kfree(port); 5304 } 5305 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5306 5307 if (mrioc->sas_hba.num_phys) { 5308 kfree(mrioc->sas_hba.phy); 5309 mrioc->sas_hba.phy = NULL; 5310 mrioc->sas_hba.num_phys = 0; 5311 } 5312 5313 spin_lock(&mrioc_list_lock); 5314 list_del(&mrioc->list); 5315 spin_unlock(&mrioc_list_lock); 5316 5317 scsi_host_put(shost); 5318 } 5319 5320 /** 5321 * mpi3mr_shutdown - PCI shutdown callback 5322 * @pdev: PCI device instance 5323 * 5324 * Free up all memory and resources associated with the 5325 * controller 5326 * 5327 * Return: Nothing. 5328 */ 5329 static void mpi3mr_shutdown(struct pci_dev *pdev) 5330 { 5331 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5332 struct mpi3mr_ioc *mrioc; 5333 struct workqueue_struct *wq; 5334 unsigned long flags; 5335 5336 if (!shost) 5337 return; 5338 5339 mrioc = shost_priv(shost); 5340 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5341 ssleep(1); 5342 5343 mrioc->stop_drv_processing = 1; 5344 mpi3mr_cleanup_fwevt_list(mrioc); 5345 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5346 wq = mrioc->fwevt_worker_thread; 5347 mrioc->fwevt_worker_thread = NULL; 5348 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5349 if (wq) 5350 destroy_workqueue(wq); 5351 5352 mpi3mr_stop_watchdog(mrioc); 5353 mpi3mr_cleanup_ioc(mrioc); 5354 mpi3mr_cleanup_resources(mrioc); 5355 } 5356 5357 /** 5358 * mpi3mr_suspend - PCI power management suspend callback 5359 * @dev: Device struct 5360 * 5361 * Change the power state to the given value and cleanup the IOC 5362 * by issuing MUR and shutdown notification 5363 * 5364 * Return: 0 always. 5365 */ 5366 static int __maybe_unused 5367 mpi3mr_suspend(struct device *dev) 5368 { 5369 struct pci_dev *pdev = to_pci_dev(dev); 5370 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5371 struct mpi3mr_ioc *mrioc; 5372 5373 if (!shost) 5374 return 0; 5375 5376 mrioc = shost_priv(shost); 5377 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5378 ssleep(1); 5379 mrioc->stop_drv_processing = 1; 5380 mpi3mr_cleanup_fwevt_list(mrioc); 5381 scsi_block_requests(shost); 5382 mpi3mr_stop_watchdog(mrioc); 5383 mpi3mr_cleanup_ioc(mrioc); 5384 5385 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5386 pdev, pci_name(pdev)); 5387 mpi3mr_cleanup_resources(mrioc); 5388 5389 return 0; 5390 } 5391 5392 /** 5393 * mpi3mr_resume - PCI power management resume callback 5394 * @dev: Device struct 5395 * 5396 * Restore the power state to D0 and reinitialize the controller 5397 * and resume I/O operations to the target devices 5398 * 5399 * Return: 0 on success, non-zero on failure 5400 */ 5401 static int __maybe_unused 5402 mpi3mr_resume(struct device *dev) 5403 { 5404 struct pci_dev *pdev = to_pci_dev(dev); 5405 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5406 struct mpi3mr_ioc *mrioc; 5407 pci_power_t device_state = pdev->current_state; 5408 int r; 5409 5410 if (!shost) 5411 return 0; 5412 5413 mrioc = shost_priv(shost); 5414 5415 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5416 pdev, pci_name(pdev), device_state); 5417 mrioc->pdev = pdev; 5418 mrioc->cpu_count = num_online_cpus(); 5419 r = mpi3mr_setup_resources(mrioc); 5420 if (r) { 5421 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5422 __func__, r); 5423 return r; 5424 } 5425 5426 mrioc->stop_drv_processing = 0; 5427 mpi3mr_invalidate_devhandles(mrioc); 5428 mpi3mr_free_enclosure_list(mrioc); 5429 mpi3mr_memset_buffers(mrioc); 5430 r = mpi3mr_reinit_ioc(mrioc, 1); 5431 if (r) { 5432 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5433 return r; 5434 } 5435 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5436 scsi_unblock_requests(shost); 5437 mrioc->device_refresh_on = 0; 5438 mpi3mr_start_watchdog(mrioc); 5439 5440 return 0; 5441 } 5442 5443 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5444 { 5445 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5446 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5447 }, 5448 { 5449 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5450 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5451 }, 5452 { 5453 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5454 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5455 }, 5456 { 0 } 5457 }; 5458 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5459 5460 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5461 5462 static struct pci_driver mpi3mr_pci_driver = { 5463 .name = MPI3MR_DRIVER_NAME, 5464 .id_table = mpi3mr_pci_id_table, 5465 .probe = mpi3mr_probe, 5466 .remove = mpi3mr_remove, 5467 .shutdown = mpi3mr_shutdown, 5468 .driver.pm = &mpi3mr_pm_ops, 5469 }; 5470 5471 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5472 { 5473 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5474 } 5475 static DRIVER_ATTR_RO(event_counter); 5476 5477 static int __init mpi3mr_init(void) 5478 { 5479 int ret_val; 5480 5481 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5482 MPI3MR_DRIVER_VERSION); 5483 5484 mpi3mr_transport_template = 5485 sas_attach_transport(&mpi3mr_transport_functions); 5486 if (!mpi3mr_transport_template) { 5487 pr_err("%s failed to load due to sas transport attach failure\n", 5488 MPI3MR_DRIVER_NAME); 5489 return -ENODEV; 5490 } 5491 5492 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5493 if (ret_val) { 5494 pr_err("%s failed to load due to pci register driver failure\n", 5495 MPI3MR_DRIVER_NAME); 5496 goto err_pci_reg_fail; 5497 } 5498 5499 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5500 &driver_attr_event_counter); 5501 if (ret_val) 5502 goto err_event_counter; 5503 5504 return ret_val; 5505 5506 err_event_counter: 5507 pci_unregister_driver(&mpi3mr_pci_driver); 5508 5509 err_pci_reg_fail: 5510 sas_release_transport(mpi3mr_transport_template); 5511 return ret_val; 5512 } 5513 5514 static void __exit mpi3mr_exit(void) 5515 { 5516 if (warn_non_secure_ctlr) 5517 pr_warn( 5518 "Unloading %s version %s while managing a non secure controller\n", 5519 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5520 else 5521 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5522 MPI3MR_DRIVER_VERSION); 5523 5524 driver_remove_file(&mpi3mr_pci_driver.driver, 5525 &driver_attr_event_counter); 5526 pci_unregister_driver(&mpi3mr_pci_driver); 5527 sas_release_transport(mpi3mr_transport_template); 5528 } 5529 5530 module_init(mpi3mr_init); 5531 module_exit(mpi3mr_exit); 5532