1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /** 53 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 54 * @mrioc: Adapter instance reference 55 * @scmd: SCSI command reference 56 * 57 * Calculate the host tag based on block tag for a given scmd. 58 * 59 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 60 */ 61 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 62 struct scsi_cmnd *scmd) 63 { 64 struct scmd_priv *priv = NULL; 65 u32 unique_tag; 66 u16 host_tag, hw_queue; 67 68 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 69 70 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 71 if (hw_queue >= mrioc->num_op_reply_q) 72 return MPI3MR_HOSTTAG_INVALID; 73 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 74 75 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 76 return MPI3MR_HOSTTAG_INVALID; 77 78 priv = scsi_cmd_priv(scmd); 79 /*host_tag 0 is invalid hence incrementing by 1*/ 80 priv->host_tag = host_tag + 1; 81 priv->scmd = scmd; 82 priv->in_lld_scope = 1; 83 priv->req_q_idx = hw_queue; 84 priv->meta_chain_idx = -1; 85 priv->chain_idx = -1; 86 priv->meta_sg_valid = 0; 87 return priv->host_tag; 88 } 89 90 /** 91 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 92 * @mrioc: Adapter instance reference 93 * @host_tag: Host tag 94 * @qidx: Operational queue index 95 * 96 * Identify the block tag from the host tag and queue index and 97 * retrieve associated scsi command using scsi_host_find_tag(). 98 * 99 * Return: SCSI command reference or NULL. 100 */ 101 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 102 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 103 { 104 struct scsi_cmnd *scmd = NULL; 105 struct scmd_priv *priv = NULL; 106 u32 unique_tag = host_tag - 1; 107 108 if (WARN_ON(host_tag > mrioc->max_host_ios)) 109 goto out; 110 111 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 112 113 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 114 if (scmd) { 115 priv = scsi_cmd_priv(scmd); 116 if (!priv->in_lld_scope) 117 scmd = NULL; 118 } 119 out: 120 return scmd; 121 } 122 123 /** 124 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 125 * @mrioc: Adapter instance reference 126 * @scmd: SCSI command reference 127 * 128 * Invalidate the SCSI command private data to mark the command 129 * is not in LLD scope anymore. 130 * 131 * Return: Nothing. 132 */ 133 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 134 struct scsi_cmnd *scmd) 135 { 136 struct scmd_priv *priv = NULL; 137 138 priv = scsi_cmd_priv(scmd); 139 140 if (WARN_ON(priv->in_lld_scope == 0)) 141 return; 142 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 143 priv->req_q_idx = 0xFFFF; 144 priv->scmd = NULL; 145 priv->in_lld_scope = 0; 146 priv->meta_sg_valid = 0; 147 if (priv->chain_idx >= 0) { 148 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 149 priv->chain_idx = -1; 150 } 151 if (priv->meta_chain_idx >= 0) { 152 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 153 priv->meta_chain_idx = -1; 154 } 155 } 156 157 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 158 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 159 static void mpi3mr_fwevt_worker(struct work_struct *work); 160 161 /** 162 * mpi3mr_fwevt_free - firmware event memory dealloctor 163 * @r: k reference pointer of the firmware event 164 * 165 * Free firmware event memory when no reference. 166 */ 167 static void mpi3mr_fwevt_free(struct kref *r) 168 { 169 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 170 } 171 172 /** 173 * mpi3mr_fwevt_get - k reference incrementor 174 * @fwevt: Firmware event reference 175 * 176 * Increment firmware event reference count. 177 */ 178 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 179 { 180 kref_get(&fwevt->ref_count); 181 } 182 183 /** 184 * mpi3mr_fwevt_put - k reference decrementor 185 * @fwevt: Firmware event reference 186 * 187 * decrement firmware event reference count. 188 */ 189 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 190 { 191 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 192 } 193 194 /** 195 * mpi3mr_alloc_fwevt - Allocate firmware event 196 * @len: length of firmware event data to allocate 197 * 198 * Allocate firmware event with required length and initialize 199 * the reference counter. 200 * 201 * Return: firmware event reference. 202 */ 203 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 204 { 205 struct mpi3mr_fwevt *fwevt; 206 207 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 208 if (!fwevt) 209 return NULL; 210 211 kref_init(&fwevt->ref_count); 212 return fwevt; 213 } 214 215 /** 216 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 217 * @mrioc: Adapter instance reference 218 * @fwevt: Firmware event reference 219 * 220 * Add the given firmware event to the firmware event list. 221 * 222 * Return: Nothing. 223 */ 224 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 225 struct mpi3mr_fwevt *fwevt) 226 { 227 unsigned long flags; 228 229 if (!mrioc->fwevt_worker_thread) 230 return; 231 232 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 233 /* get fwevt reference count while adding it to fwevt_list */ 234 mpi3mr_fwevt_get(fwevt); 235 INIT_LIST_HEAD(&fwevt->list); 236 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 237 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 238 /* get fwevt reference count while enqueueing it to worker queue */ 239 mpi3mr_fwevt_get(fwevt); 240 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 241 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 242 } 243 244 /** 245 * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to 246 * the list 247 * @mrioc: Adapter instance reference 248 * @event_data: Event data 249 * 250 * Add the given hdb trigger data event to the firmware event 251 * list. 252 * 253 * Return: Nothing. 254 */ 255 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 256 struct trigger_event_data *event_data) 257 { 258 struct mpi3mr_fwevt *fwevt; 259 u16 sz = sizeof(*event_data); 260 261 fwevt = mpi3mr_alloc_fwevt(sz); 262 if (!fwevt) { 263 ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); 264 return; 265 } 266 267 fwevt->mrioc = mrioc; 268 fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; 269 fwevt->send_ack = 0; 270 fwevt->process_evt = 1; 271 fwevt->evt_ctx = 0; 272 fwevt->event_data_size = sz; 273 memcpy(fwevt->event_data, event_data, sz); 274 275 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 276 } 277 278 /** 279 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 280 * @mrioc: Adapter instance reference 281 * @fwevt: Firmware event reference 282 * 283 * Delete the given firmware event from the firmware event list. 284 * 285 * Return: Nothing. 286 */ 287 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 288 struct mpi3mr_fwevt *fwevt) 289 { 290 unsigned long flags; 291 292 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 293 if (!list_empty(&fwevt->list)) { 294 list_del_init(&fwevt->list); 295 /* 296 * Put fwevt reference count after 297 * removing it from fwevt_list 298 */ 299 mpi3mr_fwevt_put(fwevt); 300 } 301 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 302 } 303 304 /** 305 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 306 * @mrioc: Adapter instance reference 307 * 308 * Dequeue a firmware event from the firmware event list. 309 * 310 * Return: firmware event. 311 */ 312 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 313 struct mpi3mr_ioc *mrioc) 314 { 315 unsigned long flags; 316 struct mpi3mr_fwevt *fwevt = NULL; 317 318 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 319 if (!list_empty(&mrioc->fwevt_list)) { 320 fwevt = list_first_entry(&mrioc->fwevt_list, 321 struct mpi3mr_fwevt, list); 322 list_del_init(&fwevt->list); 323 /* 324 * Put fwevt reference count after 325 * removing it from fwevt_list 326 */ 327 mpi3mr_fwevt_put(fwevt); 328 } 329 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 330 331 return fwevt; 332 } 333 334 /** 335 * mpi3mr_cancel_work - cancel firmware event 336 * @fwevt: fwevt object which needs to be canceled 337 * 338 * Return: Nothing. 339 */ 340 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 341 { 342 /* 343 * Wait on the fwevt to complete. If this returns 1, then 344 * the event was never executed. 345 * 346 * If it did execute, we wait for it to finish, and the put will 347 * happen from mpi3mr_process_fwevt() 348 */ 349 if (cancel_work_sync(&fwevt->work)) { 350 /* 351 * Put fwevt reference count after 352 * dequeuing it from worker queue 353 */ 354 mpi3mr_fwevt_put(fwevt); 355 /* 356 * Put fwevt reference count to neutralize 357 * kref_init increment 358 */ 359 mpi3mr_fwevt_put(fwevt); 360 } 361 } 362 363 /** 364 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 365 * @mrioc: Adapter instance reference 366 * 367 * Flush all pending firmware events from the firmware event 368 * list. 369 * 370 * Return: Nothing. 371 */ 372 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 373 { 374 struct mpi3mr_fwevt *fwevt = NULL; 375 376 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 377 !mrioc->fwevt_worker_thread) 378 return; 379 380 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 381 mpi3mr_cancel_work(fwevt); 382 383 if (mrioc->current_event) { 384 fwevt = mrioc->current_event; 385 /* 386 * Don't call cancel_work_sync() API for the 387 * fwevt work if the controller reset is 388 * get called as part of processing the 389 * same fwevt work (or) when worker thread is 390 * waiting for device add/remove APIs to complete. 391 * Otherwise we will see deadlock. 392 */ 393 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 394 fwevt->discard = 1; 395 return; 396 } 397 398 mpi3mr_cancel_work(fwevt); 399 } 400 } 401 402 /** 403 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 404 * @mrioc: Adapter instance reference 405 * @tg: Throttle group information pointer 406 * 407 * Accessor to queue on synthetically generated driver event to 408 * the event worker thread, the driver event will be used to 409 * reduce the QD of all VDs in the TG from the worker thread. 410 * 411 * Return: None. 412 */ 413 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 414 struct mpi3mr_throttle_group_info *tg) 415 { 416 struct mpi3mr_fwevt *fwevt; 417 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 418 419 /* 420 * If the QD reduction event is already queued due to throttle and if 421 * the QD is not restored through device info change event 422 * then dont queue further reduction events 423 */ 424 if (tg->fw_qd != tg->modified_qd) 425 return; 426 427 fwevt = mpi3mr_alloc_fwevt(sz); 428 if (!fwevt) { 429 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 430 return; 431 } 432 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 433 fwevt->mrioc = mrioc; 434 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 435 fwevt->send_ack = 0; 436 fwevt->process_evt = 1; 437 fwevt->evt_ctx = 0; 438 fwevt->event_data_size = sz; 439 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 440 441 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 442 tg->id); 443 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 444 } 445 446 /** 447 * mpi3mr_invalidate_devhandles -Invalidate device handles 448 * @mrioc: Adapter instance reference 449 * 450 * Invalidate the device handles in the target device structures 451 * . Called post reset prior to reinitializing the controller. 452 * 453 * Return: Nothing. 454 */ 455 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 456 { 457 struct mpi3mr_tgt_dev *tgtdev; 458 struct mpi3mr_stgt_priv_data *tgt_priv; 459 460 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 461 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 462 if (tgtdev->starget && tgtdev->starget->hostdata) { 463 tgt_priv = tgtdev->starget->hostdata; 464 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 465 tgt_priv->io_throttle_enabled = 0; 466 tgt_priv->io_divert = 0; 467 tgt_priv->throttle_group = NULL; 468 tgt_priv->wslen = 0; 469 if (tgtdev->host_exposed) 470 atomic_set(&tgt_priv->block_io, 1); 471 } 472 } 473 } 474 475 /** 476 * mpi3mr_print_scmd - print individual SCSI command 477 * @rq: Block request 478 * @data: Adapter instance reference 479 * 480 * Print the SCSI command details if it is in LLD scope. 481 * 482 * Return: true always. 483 */ 484 static bool mpi3mr_print_scmd(struct request *rq, void *data) 485 { 486 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 487 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 488 struct scmd_priv *priv = NULL; 489 490 if (scmd) { 491 priv = scsi_cmd_priv(scmd); 492 if (!priv->in_lld_scope) 493 goto out; 494 495 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 496 __func__, priv->host_tag, priv->req_q_idx + 1); 497 scsi_print_command(scmd); 498 } 499 500 out: 501 return(true); 502 } 503 504 /** 505 * mpi3mr_flush_scmd - Flush individual SCSI command 506 * @rq: Block request 507 * @data: Adapter instance reference 508 * 509 * Return the SCSI command to the upper layers if it is in LLD 510 * scope. 511 * 512 * Return: true always. 513 */ 514 515 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 516 { 517 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 518 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 519 struct scmd_priv *priv = NULL; 520 521 if (scmd) { 522 priv = scsi_cmd_priv(scmd); 523 if (!priv->in_lld_scope) 524 goto out; 525 526 if (priv->meta_sg_valid) 527 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 528 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 529 mpi3mr_clear_scmd_priv(mrioc, scmd); 530 scsi_dma_unmap(scmd); 531 scmd->result = DID_RESET << 16; 532 scsi_print_command(scmd); 533 scsi_done(scmd); 534 mrioc->flush_io_count++; 535 } 536 537 out: 538 return(true); 539 } 540 541 /** 542 * mpi3mr_count_dev_pending - Count commands pending for a lun 543 * @rq: Block request 544 * @data: SCSI device reference 545 * 546 * This is an iterator function called for each SCSI command in 547 * a host and if the command is pending in the LLD for the 548 * specific device(lun) then device specific pending I/O counter 549 * is updated in the device structure. 550 * 551 * Return: true always. 552 */ 553 554 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 555 { 556 struct scsi_device *sdev = (struct scsi_device *)data; 557 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 558 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 559 struct scmd_priv *priv; 560 561 if (scmd) { 562 priv = scsi_cmd_priv(scmd); 563 if (!priv->in_lld_scope) 564 goto out; 565 if (scmd->device == sdev) 566 sdev_priv_data->pend_count++; 567 } 568 569 out: 570 return true; 571 } 572 573 /** 574 * mpi3mr_count_tgt_pending - Count commands pending for target 575 * @rq: Block request 576 * @data: SCSI target reference 577 * 578 * This is an iterator function called for each SCSI command in 579 * a host and if the command is pending in the LLD for the 580 * specific target then target specific pending I/O counter is 581 * updated in the target structure. 582 * 583 * Return: true always. 584 */ 585 586 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 587 { 588 struct scsi_target *starget = (struct scsi_target *)data; 589 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 590 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 591 struct scmd_priv *priv; 592 593 if (scmd) { 594 priv = scsi_cmd_priv(scmd); 595 if (!priv->in_lld_scope) 596 goto out; 597 if (scmd->device && (scsi_target(scmd->device) == starget)) 598 stgt_priv_data->pend_count++; 599 } 600 601 out: 602 return true; 603 } 604 605 /** 606 * mpi3mr_flush_host_io - Flush host I/Os 607 * @mrioc: Adapter instance reference 608 * 609 * Flush all of the pending I/Os by calling 610 * blk_mq_tagset_busy_iter() for each possible tag. This is 611 * executed post controller reset 612 * 613 * Return: Nothing. 614 */ 615 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 616 { 617 struct Scsi_Host *shost = mrioc->shost; 618 619 mrioc->flush_io_count = 0; 620 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 621 blk_mq_tagset_busy_iter(&shost->tag_set, 622 mpi3mr_flush_scmd, (void *)mrioc); 623 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 624 mrioc->flush_io_count); 625 } 626 627 /** 628 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 629 * @mrioc: Adapter instance reference 630 * 631 * This function waits for currently running IO poll threads to 632 * exit and then flushes all host I/Os and any internal pending 633 * cmds. This is executed after controller is marked as 634 * unrecoverable. 635 * 636 * Return: Nothing. 637 */ 638 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 639 { 640 struct Scsi_Host *shost = mrioc->shost; 641 int i; 642 643 if (!mrioc->unrecoverable) 644 return; 645 646 if (mrioc->op_reply_qinfo) { 647 for (i = 0; i < mrioc->num_queues; i++) { 648 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 649 udelay(500); 650 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 651 } 652 } 653 mrioc->flush_io_count = 0; 654 blk_mq_tagset_busy_iter(&shost->tag_set, 655 mpi3mr_flush_scmd, (void *)mrioc); 656 mpi3mr_flush_delayed_cmd_lists(mrioc); 657 mpi3mr_flush_drv_cmds(mrioc); 658 } 659 660 /** 661 * mpi3mr_alloc_tgtdev - target device allocator 662 * 663 * Allocate target device instance and initialize the reference 664 * count 665 * 666 * Return: target device instance. 667 */ 668 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 669 { 670 struct mpi3mr_tgt_dev *tgtdev; 671 672 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 673 if (!tgtdev) 674 return NULL; 675 kref_init(&tgtdev->ref_count); 676 return tgtdev; 677 } 678 679 /** 680 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 681 * @mrioc: Adapter instance reference 682 * @tgtdev: Target device 683 * 684 * Add the target device to the target device list 685 * 686 * Return: Nothing. 687 */ 688 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 689 struct mpi3mr_tgt_dev *tgtdev) 690 { 691 unsigned long flags; 692 693 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 694 mpi3mr_tgtdev_get(tgtdev); 695 INIT_LIST_HEAD(&tgtdev->list); 696 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 697 tgtdev->state = MPI3MR_DEV_CREATED; 698 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 699 } 700 701 /** 702 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 703 * @mrioc: Adapter instance reference 704 * @tgtdev: Target device 705 * @must_delete: Must delete the target device from the list irrespective 706 * of the device state. 707 * 708 * Remove the target device from the target device list 709 * 710 * Return: Nothing. 711 */ 712 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 713 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 714 { 715 unsigned long flags; 716 717 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 718 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 719 if (!list_empty(&tgtdev->list)) { 720 list_del_init(&tgtdev->list); 721 tgtdev->state = MPI3MR_DEV_DELETED; 722 mpi3mr_tgtdev_put(tgtdev); 723 } 724 } 725 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 726 } 727 728 /** 729 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 730 * @mrioc: Adapter instance reference 731 * @handle: Device handle 732 * 733 * Accessor to retrieve target device from the device handle. 734 * Non Lock version 735 * 736 * Return: Target device reference. 737 */ 738 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 739 struct mpi3mr_ioc *mrioc, u16 handle) 740 { 741 struct mpi3mr_tgt_dev *tgtdev; 742 743 assert_spin_locked(&mrioc->tgtdev_lock); 744 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 745 if (tgtdev->dev_handle == handle) 746 goto found_tgtdev; 747 return NULL; 748 749 found_tgtdev: 750 mpi3mr_tgtdev_get(tgtdev); 751 return tgtdev; 752 } 753 754 /** 755 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 756 * @mrioc: Adapter instance reference 757 * @handle: Device handle 758 * 759 * Accessor to retrieve target device from the device handle. 760 * Lock version 761 * 762 * Return: Target device reference. 763 */ 764 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 765 struct mpi3mr_ioc *mrioc, u16 handle) 766 { 767 struct mpi3mr_tgt_dev *tgtdev; 768 unsigned long flags; 769 770 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 771 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 772 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 773 return tgtdev; 774 } 775 776 /** 777 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 778 * @mrioc: Adapter instance reference 779 * @persist_id: Persistent ID 780 * 781 * Accessor to retrieve target device from the Persistent ID. 782 * Non Lock version 783 * 784 * Return: Target device reference. 785 */ 786 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 787 struct mpi3mr_ioc *mrioc, u16 persist_id) 788 { 789 struct mpi3mr_tgt_dev *tgtdev; 790 791 assert_spin_locked(&mrioc->tgtdev_lock); 792 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 793 if (tgtdev->perst_id == persist_id) 794 goto found_tgtdev; 795 return NULL; 796 797 found_tgtdev: 798 mpi3mr_tgtdev_get(tgtdev); 799 return tgtdev; 800 } 801 802 /** 803 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 804 * @mrioc: Adapter instance reference 805 * @persist_id: Persistent ID 806 * 807 * Accessor to retrieve target device from the Persistent ID. 808 * Lock version 809 * 810 * Return: Target device reference. 811 */ 812 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 813 struct mpi3mr_ioc *mrioc, u16 persist_id) 814 { 815 struct mpi3mr_tgt_dev *tgtdev; 816 unsigned long flags; 817 818 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 819 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 820 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 821 return tgtdev; 822 } 823 824 /** 825 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 826 * @mrioc: Adapter instance reference 827 * @tgt_priv: Target private data 828 * 829 * Accessor to return target device from the target private 830 * data. Non Lock version 831 * 832 * Return: Target device reference. 833 */ 834 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 835 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 836 { 837 struct mpi3mr_tgt_dev *tgtdev; 838 839 assert_spin_locked(&mrioc->tgtdev_lock); 840 tgtdev = tgt_priv->tgt_dev; 841 if (tgtdev) 842 mpi3mr_tgtdev_get(tgtdev); 843 return tgtdev; 844 } 845 846 /** 847 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 848 * @mrioc: Adapter instance reference 849 * @tg: Throttle group information pointer 850 * @divert_value: 1 or 0 851 * 852 * Accessor to set io_divert flag for each device associated 853 * with the given throttle group with the given value. 854 * 855 * Return: None. 856 */ 857 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 858 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 859 { 860 unsigned long flags; 861 struct mpi3mr_tgt_dev *tgtdev; 862 struct mpi3mr_stgt_priv_data *tgt_priv; 863 864 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 865 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 866 if (tgtdev->starget && tgtdev->starget->hostdata) { 867 tgt_priv = tgtdev->starget->hostdata; 868 if (tgt_priv->throttle_group == tg) 869 tgt_priv->io_divert = divert_value; 870 } 871 } 872 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 873 } 874 875 /** 876 * mpi3mr_print_device_event_notice - print notice related to post processing of 877 * device event after controller reset. 878 * 879 * @mrioc: Adapter instance reference 880 * @device_add: true for device add event and false for device removal event 881 * 882 * Return: None. 883 */ 884 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 885 bool device_add) 886 { 887 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 888 (device_add ? "addition" : "removal")); 889 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 890 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 891 } 892 893 /** 894 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 895 * @mrioc: Adapter instance reference 896 * @tgtdev: Target device structure 897 * 898 * Checks whether the device is exposed to upper layers and if it 899 * is then remove the device from upper layers by calling 900 * scsi_remove_target(). 901 * 902 * Return: 0 on success, non zero on failure. 903 */ 904 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 905 struct mpi3mr_tgt_dev *tgtdev) 906 { 907 struct mpi3mr_stgt_priv_data *tgt_priv; 908 909 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 910 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 911 if (tgtdev->starget && tgtdev->starget->hostdata) { 912 tgt_priv = tgtdev->starget->hostdata; 913 atomic_set(&tgt_priv->block_io, 0); 914 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 915 } 916 917 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 918 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 919 if (tgtdev->starget) { 920 if (mrioc->current_event) 921 mrioc->current_event->pending_at_sml = 1; 922 scsi_remove_target(&tgtdev->starget->dev); 923 tgtdev->host_exposed = 0; 924 if (mrioc->current_event) { 925 mrioc->current_event->pending_at_sml = 0; 926 if (mrioc->current_event->discard) { 927 mpi3mr_print_device_event_notice(mrioc, 928 false); 929 return; 930 } 931 } 932 } 933 } else 934 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 935 mpi3mr_global_trigger(mrioc, 936 MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); 937 938 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 939 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 940 } 941 942 /** 943 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 944 * @mrioc: Adapter instance reference 945 * @perst_id: Persistent ID of the device 946 * 947 * Checks whether the device can be exposed to upper layers and 948 * if it is not then expose the device to upper layers by 949 * calling scsi_scan_target(). 950 * 951 * Return: 0 on success, non zero on failure. 952 */ 953 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 954 u16 perst_id) 955 { 956 int retval = 0; 957 struct mpi3mr_tgt_dev *tgtdev; 958 959 if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 960 return -1; 961 962 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 963 if (!tgtdev) { 964 retval = -1; 965 goto out; 966 } 967 if (tgtdev->is_hidden || tgtdev->host_exposed) { 968 retval = -1; 969 goto out; 970 } 971 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 972 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 973 tgtdev->host_exposed = 1; 974 if (mrioc->current_event) 975 mrioc->current_event->pending_at_sml = 1; 976 scsi_scan_target(&mrioc->shost->shost_gendev, 977 mrioc->scsi_device_channel, tgtdev->perst_id, 978 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 979 if (!tgtdev->starget) 980 tgtdev->host_exposed = 0; 981 if (mrioc->current_event) { 982 mrioc->current_event->pending_at_sml = 0; 983 if (mrioc->current_event->discard) { 984 mpi3mr_print_device_event_notice(mrioc, true); 985 goto out; 986 } 987 } 988 } else 989 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 990 out: 991 if (tgtdev) 992 mpi3mr_tgtdev_put(tgtdev); 993 994 return retval; 995 } 996 997 /** 998 * mpi3mr_change_queue_depth- Change QD callback handler 999 * @sdev: SCSI device reference 1000 * @q_depth: Queue depth 1001 * 1002 * Validate and limit QD and call scsi_change_queue_depth. 1003 * 1004 * Return: return value of scsi_change_queue_depth 1005 */ 1006 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 1007 int q_depth) 1008 { 1009 struct scsi_target *starget = scsi_target(sdev); 1010 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1011 int retval = 0; 1012 1013 if (!sdev->tagged_supported) 1014 q_depth = 1; 1015 if (q_depth > shost->can_queue) 1016 q_depth = shost->can_queue; 1017 else if (!q_depth) 1018 q_depth = MPI3MR_DEFAULT_SDEV_QD; 1019 retval = scsi_change_queue_depth(sdev, q_depth); 1020 sdev->max_queue_depth = sdev->queue_depth; 1021 1022 return retval; 1023 } 1024 1025 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, 1026 struct queue_limits *lim) 1027 { 1028 u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; 1029 1030 lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; 1031 lim->virt_boundary_mask = (1 << pgsz) - 1; 1032 } 1033 1034 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, 1035 struct queue_limits *lim) 1036 { 1037 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && 1038 (tgt_dev->dev_spec.pcie_inf.dev_info & 1039 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1040 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) 1041 mpi3mr_configure_nvme_dev(tgt_dev, lim); 1042 } 1043 1044 /** 1045 * mpi3mr_update_sdev - Update SCSI device information 1046 * @sdev: SCSI device reference 1047 * @data: target device reference 1048 * 1049 * This is an iterator function called for each SCSI device in a 1050 * target to update the target specific information into each 1051 * SCSI device. 1052 * 1053 * Return: Nothing. 1054 */ 1055 static void 1056 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1057 { 1058 struct mpi3mr_tgt_dev *tgtdev; 1059 struct queue_limits lim; 1060 1061 tgtdev = (struct mpi3mr_tgt_dev *)data; 1062 if (!tgtdev) 1063 return; 1064 1065 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1066 1067 lim = queue_limits_start_update(sdev->request_queue); 1068 mpi3mr_configure_tgt_dev(tgtdev, &lim); 1069 WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); 1070 } 1071 1072 /** 1073 * mpi3mr_refresh_tgtdevs - Refresh target device exposure 1074 * @mrioc: Adapter instance reference 1075 * 1076 * This is executed post controller reset to identify any 1077 * missing devices during reset and remove from the upper layers 1078 * or expose any newly detected device to the upper layers. 1079 * 1080 * Return: Nothing. 1081 */ 1082 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1083 { 1084 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1085 struct mpi3mr_stgt_priv_data *tgt_priv; 1086 1087 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1088 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1089 list) { 1090 if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || 1091 tgtdev->is_hidden) && 1092 tgtdev->host_exposed && tgtdev->starget && 1093 tgtdev->starget->hostdata) { 1094 tgt_priv = tgtdev->starget->hostdata; 1095 tgt_priv->dev_removed = 1; 1096 atomic_set(&tgt_priv->block_io, 0); 1097 } 1098 } 1099 1100 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1101 list) { 1102 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1103 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1104 tgtdev->perst_id); 1105 if (tgtdev->host_exposed) 1106 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1107 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1108 mpi3mr_tgtdev_put(tgtdev); 1109 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1110 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1111 tgtdev->perst_id); 1112 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1113 } 1114 } 1115 1116 tgtdev = NULL; 1117 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1118 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1119 !tgtdev->is_hidden) { 1120 if (!tgtdev->host_exposed) 1121 mpi3mr_report_tgtdev_to_host(mrioc, 1122 tgtdev->perst_id); 1123 else if (tgtdev->starget) 1124 starget_for_each_device(tgtdev->starget, 1125 (void *)tgtdev, mpi3mr_update_sdev); 1126 } 1127 } 1128 } 1129 1130 /** 1131 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1132 * @mrioc: Adapter instance reference 1133 * @tgtdev: Target device internal structure 1134 * @dev_pg0: New device page0 1135 * @is_added: Flag to indicate the device is just added 1136 * 1137 * Update the information from the device page0 into the driver 1138 * cached target device structure. 1139 * 1140 * Return: Nothing. 1141 */ 1142 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1143 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1144 bool is_added) 1145 { 1146 u16 flags = 0; 1147 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1148 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1149 u8 prot_mask = 0; 1150 1151 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1152 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1153 tgtdev->dev_type = dev_pg0->device_form; 1154 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1155 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1156 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1157 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1158 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1159 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1160 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1161 1162 if (tgtdev->encl_handle) 1163 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1164 tgtdev->encl_handle); 1165 if (enclosure_dev) 1166 tgtdev->enclosure_logical_id = le64_to_cpu( 1167 enclosure_dev->pg0.enclosure_logical_id); 1168 1169 flags = tgtdev->devpg0_flag; 1170 1171 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1172 1173 if (is_added == true) 1174 tgtdev->io_throttle_enabled = 1175 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1176 1177 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1178 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1179 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1180 break; 1181 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1182 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1183 break; 1184 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1185 default: 1186 tgtdev->wslen = 0; 1187 break; 1188 } 1189 1190 if (tgtdev->starget && tgtdev->starget->hostdata) { 1191 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1192 tgtdev->starget->hostdata; 1193 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1194 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1195 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1196 scsi_tgt_priv_data->io_throttle_enabled = 1197 tgtdev->io_throttle_enabled; 1198 if (is_added == true) 1199 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1200 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1201 } 1202 1203 switch (dev_pg0->access_status) { 1204 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1205 case MPI3_DEVICE0_ASTATUS_PREPARE: 1206 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1207 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1208 break; 1209 default: 1210 tgtdev->is_hidden = 1; 1211 break; 1212 } 1213 1214 switch (tgtdev->dev_type) { 1215 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1216 { 1217 struct mpi3_device0_sas_sata_format *sasinf = 1218 &dev_pg0->device_specific.sas_sata_format; 1219 u16 dev_info = le16_to_cpu(sasinf->device_info); 1220 1221 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1222 tgtdev->dev_spec.sas_sata_inf.sas_address = 1223 le64_to_cpu(sasinf->sas_address); 1224 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1225 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1226 sasinf->attached_phy_identifier; 1227 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1228 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1229 tgtdev->is_hidden = 1; 1230 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1231 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1232 tgtdev->is_hidden = 1; 1233 1234 if (((tgtdev->devpg0_flag & 1235 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1236 && (tgtdev->devpg0_flag & 1237 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1238 (tgtdev->parent_handle == 0xFFFF)) 1239 tgtdev->non_stl = 1; 1240 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1241 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1242 dev_pg0->io_unit_port; 1243 break; 1244 } 1245 case MPI3_DEVICE_DEVFORM_PCIE: 1246 { 1247 struct mpi3_device0_pcie_format *pcieinf = 1248 &dev_pg0->device_specific.pcie_format; 1249 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1250 1251 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1252 tgtdev->dev_spec.pcie_inf.capb = 1253 le32_to_cpu(pcieinf->capabilities); 1254 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1255 /* 2^12 = 4096 */ 1256 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1257 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1258 tgtdev->dev_spec.pcie_inf.mdts = 1259 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1260 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1261 tgtdev->dev_spec.pcie_inf.reset_to = 1262 max_t(u8, pcieinf->controller_reset_to, 1263 MPI3MR_INTADMCMD_TIMEOUT); 1264 tgtdev->dev_spec.pcie_inf.abort_to = 1265 max_t(u8, pcieinf->nvme_abort_to, 1266 MPI3MR_INTADMCMD_TIMEOUT); 1267 } 1268 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1269 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1270 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1271 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1272 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1273 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1274 tgtdev->is_hidden = 1; 1275 tgtdev->non_stl = 1; 1276 if (!mrioc->shost) 1277 break; 1278 prot_mask = scsi_host_get_prot(mrioc->shost); 1279 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1280 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1281 ioc_info(mrioc, 1282 "%s : Disabling DIX0 prot capability\n", __func__); 1283 ioc_info(mrioc, 1284 "because HBA does not support DIX0 operation on NVME drives\n"); 1285 } 1286 break; 1287 } 1288 case MPI3_DEVICE_DEVFORM_VD: 1289 { 1290 struct mpi3_device0_vd_format *vdinf = 1291 &dev_pg0->device_specific.vd_format; 1292 struct mpi3mr_throttle_group_info *tg = NULL; 1293 u16 vdinf_io_throttle_group = 1294 le16_to_cpu(vdinf->io_throttle_group); 1295 1296 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1297 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1298 tgtdev->is_hidden = 1; 1299 tgtdev->non_stl = 1; 1300 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1301 tgtdev->dev_spec.vd_inf.tg_high = 1302 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1303 tgtdev->dev_spec.vd_inf.tg_low = 1304 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1305 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1306 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1307 tg->id = vdinf_io_throttle_group; 1308 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1309 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1310 tg->qd_reduction = 1311 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1312 if (is_added == true) 1313 tg->fw_qd = tgtdev->q_depth; 1314 tg->modified_qd = tgtdev->q_depth; 1315 } 1316 tgtdev->dev_spec.vd_inf.tg = tg; 1317 if (scsi_tgt_priv_data) 1318 scsi_tgt_priv_data->throttle_group = tg; 1319 break; 1320 } 1321 default: 1322 break; 1323 } 1324 } 1325 1326 /** 1327 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1328 * @mrioc: Adapter instance reference 1329 * @fwevt: Firmware event information. 1330 * 1331 * Process Device status Change event and based on device's new 1332 * information, either expose the device to the upper layers, or 1333 * remove the device from upper layers. 1334 * 1335 * Return: Nothing. 1336 */ 1337 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1338 struct mpi3mr_fwevt *fwevt) 1339 { 1340 u16 dev_handle = 0; 1341 u8 uhide = 0, delete = 0, cleanup = 0; 1342 struct mpi3mr_tgt_dev *tgtdev = NULL; 1343 struct mpi3_event_data_device_status_change *evtdata = 1344 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1345 1346 dev_handle = le16_to_cpu(evtdata->dev_handle); 1347 ioc_info(mrioc, 1348 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1349 __func__, dev_handle, evtdata->reason_code); 1350 switch (evtdata->reason_code) { 1351 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1352 delete = 1; 1353 break; 1354 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1355 uhide = 1; 1356 break; 1357 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1358 delete = 1; 1359 cleanup = 1; 1360 break; 1361 default: 1362 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1363 evtdata->reason_code); 1364 break; 1365 } 1366 1367 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1368 if (!tgtdev) 1369 goto out; 1370 if (uhide) { 1371 tgtdev->is_hidden = 0; 1372 if (!tgtdev->host_exposed) 1373 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1374 } 1375 1376 if (delete) 1377 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1378 1379 if (cleanup) { 1380 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1381 mpi3mr_tgtdev_put(tgtdev); 1382 } 1383 1384 out: 1385 if (tgtdev) 1386 mpi3mr_tgtdev_put(tgtdev); 1387 } 1388 1389 /** 1390 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1391 * @mrioc: Adapter instance reference 1392 * @dev_pg0: New device page0 1393 * 1394 * Process Device Info Change event and based on device's new 1395 * information, either expose the device to the upper layers, or 1396 * remove the device from upper layers or update the details of 1397 * the device. 1398 * 1399 * Return: Nothing. 1400 */ 1401 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1402 struct mpi3_device_page0 *dev_pg0) 1403 { 1404 struct mpi3mr_tgt_dev *tgtdev = NULL; 1405 u16 dev_handle = 0, perst_id = 0; 1406 1407 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1408 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1409 ioc_info(mrioc, 1410 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1411 __func__, dev_handle, perst_id); 1412 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1413 if (!tgtdev) 1414 goto out; 1415 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1416 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1417 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1418 if (tgtdev->is_hidden && tgtdev->host_exposed) 1419 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1420 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1421 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1422 mpi3mr_update_sdev); 1423 out: 1424 if (tgtdev) 1425 mpi3mr_tgtdev_put(tgtdev); 1426 } 1427 1428 /** 1429 * mpi3mr_free_enclosure_list - release enclosures 1430 * @mrioc: Adapter instance reference 1431 * 1432 * Free memory allocated during encloure add. 1433 * 1434 * Return nothing. 1435 */ 1436 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1437 { 1438 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1439 1440 list_for_each_entry_safe(enclosure_dev, 1441 enclosure_dev_next, &mrioc->enclosure_list, list) { 1442 list_del(&enclosure_dev->list); 1443 kfree(enclosure_dev); 1444 } 1445 } 1446 1447 /** 1448 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1449 * @mrioc: Adapter instance reference 1450 * @handle: Firmware device handle of the enclosure 1451 * 1452 * This searches for enclosure device based on handle, then returns the 1453 * enclosure object. 1454 * 1455 * Return: Enclosure object reference or NULL 1456 */ 1457 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1458 struct mpi3mr_ioc *mrioc, u16 handle) 1459 { 1460 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1461 1462 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1463 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1464 continue; 1465 r = enclosure_dev; 1466 goto out; 1467 } 1468 out: 1469 return r; 1470 } 1471 1472 /** 1473 * mpi3mr_process_trigger_data_event_bh - Process trigger event 1474 * data 1475 * @mrioc: Adapter instance reference 1476 * @event_data: Event data 1477 * 1478 * This function releases diage buffers or issues diag fault 1479 * based on trigger conditions 1480 * 1481 * Return: Nothing 1482 */ 1483 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, 1484 struct trigger_event_data *event_data) 1485 { 1486 struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; 1487 struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; 1488 unsigned long flags; 1489 int retval = 0; 1490 u8 trigger_type = event_data->trigger_type; 1491 union mpi3mr_trigger_data *trigger_data = 1492 &event_data->trigger_specific_data; 1493 1494 if (event_data->snapdump) { 1495 if (trace_hdb) 1496 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1497 trigger_data, 1); 1498 if (fw_hdb) 1499 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1500 trigger_data, 1); 1501 mpi3mr_soft_reset_handler(mrioc, 1502 MPI3MR_RESET_FROM_TRIGGER, 1); 1503 return; 1504 } 1505 1506 if (trace_hdb) { 1507 retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); 1508 if (!retval) { 1509 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1510 trigger_data, 1); 1511 } 1512 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1513 mrioc->trace_release_trigger_active = false; 1514 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1515 } 1516 if (fw_hdb) { 1517 retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); 1518 if (!retval) { 1519 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1520 trigger_data, 1); 1521 } 1522 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1523 mrioc->fw_release_trigger_active = false; 1524 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1525 } 1526 } 1527 1528 /** 1529 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1530 * @mrioc: Adapter instance reference 1531 * @encl_pg0: Enclosure page 0. 1532 * @is_added: Added event or not 1533 * 1534 * Return nothing. 1535 */ 1536 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1537 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1538 { 1539 char *reason_str = NULL; 1540 1541 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1542 return; 1543 1544 if (is_added) 1545 reason_str = "enclosure added"; 1546 else 1547 reason_str = "enclosure dev status changed"; 1548 1549 ioc_info(mrioc, 1550 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1551 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1552 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1553 ioc_info(mrioc, 1554 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1555 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1556 le16_to_cpu(encl_pg0->flags), 1557 ((le16_to_cpu(encl_pg0->flags) & 1558 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1559 } 1560 1561 /** 1562 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1563 * @mrioc: Adapter instance reference 1564 * @fwevt: Firmware event reference 1565 * 1566 * Prints information about the Enclosure device status or 1567 * Enclosure add events if logging is enabled and add or remove 1568 * the enclosure from the controller's internal list of 1569 * enclosures. 1570 * 1571 * Return: Nothing. 1572 */ 1573 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1574 struct mpi3mr_fwevt *fwevt) 1575 { 1576 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1577 struct mpi3_enclosure_page0 *encl_pg0; 1578 u16 encl_handle; 1579 u8 added, present; 1580 1581 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1582 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1583 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1584 1585 1586 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1587 present = ((le16_to_cpu(encl_pg0->flags) & 1588 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1589 1590 if (encl_handle) 1591 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1592 encl_handle); 1593 if (!enclosure_dev && present) { 1594 enclosure_dev = 1595 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1596 GFP_KERNEL); 1597 if (!enclosure_dev) 1598 return; 1599 list_add_tail(&enclosure_dev->list, 1600 &mrioc->enclosure_list); 1601 } 1602 if (enclosure_dev) { 1603 if (!present) { 1604 list_del(&enclosure_dev->list); 1605 kfree(enclosure_dev); 1606 } else 1607 memcpy(&enclosure_dev->pg0, encl_pg0, 1608 sizeof(enclosure_dev->pg0)); 1609 1610 } 1611 } 1612 1613 /** 1614 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1615 * @mrioc: Adapter instance reference 1616 * @event_data: SAS topology change list event data 1617 * 1618 * Prints information about the SAS topology change event. 1619 * 1620 * Return: Nothing. 1621 */ 1622 static void 1623 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1624 struct mpi3_event_data_sas_topology_change_list *event_data) 1625 { 1626 int i; 1627 u16 handle; 1628 u8 reason_code, phy_number; 1629 char *status_str = NULL; 1630 u8 link_rate, prev_link_rate; 1631 1632 switch (event_data->exp_status) { 1633 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1634 status_str = "remove"; 1635 break; 1636 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1637 status_str = "responding"; 1638 break; 1639 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1640 status_str = "remove delay"; 1641 break; 1642 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1643 status_str = "direct attached"; 1644 break; 1645 default: 1646 status_str = "unknown status"; 1647 break; 1648 } 1649 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1650 __func__, status_str); 1651 ioc_info(mrioc, 1652 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1653 __func__, le16_to_cpu(event_data->expander_dev_handle), 1654 event_data->io_unit_port, 1655 le16_to_cpu(event_data->enclosure_handle), 1656 event_data->start_phy_num, event_data->num_entries); 1657 for (i = 0; i < event_data->num_entries; i++) { 1658 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1659 if (!handle) 1660 continue; 1661 phy_number = event_data->start_phy_num + i; 1662 reason_code = event_data->phy_entry[i].status & 1663 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1664 switch (reason_code) { 1665 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1666 status_str = "target remove"; 1667 break; 1668 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1669 status_str = "delay target remove"; 1670 break; 1671 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1672 status_str = "link status change"; 1673 break; 1674 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1675 status_str = "link status no change"; 1676 break; 1677 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1678 status_str = "target responding"; 1679 break; 1680 default: 1681 status_str = "unknown"; 1682 break; 1683 } 1684 link_rate = event_data->phy_entry[i].link_rate >> 4; 1685 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1686 ioc_info(mrioc, 1687 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1688 __func__, phy_number, handle, status_str, link_rate, 1689 prev_link_rate); 1690 } 1691 } 1692 1693 /** 1694 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1695 * @mrioc: Adapter instance reference 1696 * @fwevt: Firmware event reference 1697 * 1698 * Prints information about the SAS topology change event and 1699 * for "not responding" event code, removes the device from the 1700 * upper layers. 1701 * 1702 * Return: Nothing. 1703 */ 1704 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1705 struct mpi3mr_fwevt *fwevt) 1706 { 1707 struct mpi3_event_data_sas_topology_change_list *event_data = 1708 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1709 int i; 1710 u16 handle; 1711 u8 reason_code; 1712 u64 exp_sas_address = 0, parent_sas_address = 0; 1713 struct mpi3mr_hba_port *hba_port = NULL; 1714 struct mpi3mr_tgt_dev *tgtdev = NULL; 1715 struct mpi3mr_sas_node *sas_expander = NULL; 1716 unsigned long flags; 1717 u8 link_rate, prev_link_rate, parent_phy_number; 1718 1719 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1720 if (mrioc->sas_transport_enabled) { 1721 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1722 event_data->io_unit_port); 1723 if (le16_to_cpu(event_data->expander_dev_handle)) { 1724 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1725 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1726 le16_to_cpu(event_data->expander_dev_handle)); 1727 if (sas_expander) { 1728 exp_sas_address = sas_expander->sas_address; 1729 hba_port = sas_expander->hba_port; 1730 } 1731 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1732 parent_sas_address = exp_sas_address; 1733 } else 1734 parent_sas_address = mrioc->sas_hba.sas_address; 1735 } 1736 1737 for (i = 0; i < event_data->num_entries; i++) { 1738 if (fwevt->discard) 1739 return; 1740 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1741 if (!handle) 1742 continue; 1743 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1744 if (!tgtdev) 1745 continue; 1746 1747 reason_code = event_data->phy_entry[i].status & 1748 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1749 1750 switch (reason_code) { 1751 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1752 if (tgtdev->host_exposed) 1753 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1754 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1755 mpi3mr_tgtdev_put(tgtdev); 1756 break; 1757 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1758 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1759 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1760 { 1761 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1762 || tgtdev->is_hidden) 1763 break; 1764 link_rate = event_data->phy_entry[i].link_rate >> 4; 1765 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1766 if (link_rate == prev_link_rate) 1767 break; 1768 if (!parent_sas_address) 1769 break; 1770 parent_phy_number = event_data->start_phy_num + i; 1771 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1772 parent_phy_number, link_rate, hba_port); 1773 break; 1774 } 1775 default: 1776 break; 1777 } 1778 if (tgtdev) 1779 mpi3mr_tgtdev_put(tgtdev); 1780 } 1781 1782 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1783 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1784 if (sas_expander) 1785 mpi3mr_expander_remove(mrioc, exp_sas_address, 1786 hba_port); 1787 } 1788 } 1789 1790 /** 1791 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1792 * @mrioc: Adapter instance reference 1793 * @event_data: PCIe topology change list event data 1794 * 1795 * Prints information about the PCIe topology change event. 1796 * 1797 * Return: Nothing. 1798 */ 1799 static void 1800 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1801 struct mpi3_event_data_pcie_topology_change_list *event_data) 1802 { 1803 int i; 1804 u16 handle; 1805 u16 reason_code; 1806 u8 port_number; 1807 char *status_str = NULL; 1808 u8 link_rate, prev_link_rate; 1809 1810 switch (event_data->switch_status) { 1811 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1812 status_str = "remove"; 1813 break; 1814 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1815 status_str = "responding"; 1816 break; 1817 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1818 status_str = "remove delay"; 1819 break; 1820 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1821 status_str = "direct attached"; 1822 break; 1823 default: 1824 status_str = "unknown status"; 1825 break; 1826 } 1827 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1828 __func__, status_str); 1829 ioc_info(mrioc, 1830 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1831 __func__, le16_to_cpu(event_data->switch_dev_handle), 1832 le16_to_cpu(event_data->enclosure_handle), 1833 event_data->start_port_num, event_data->num_entries); 1834 for (i = 0; i < event_data->num_entries; i++) { 1835 handle = 1836 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1837 if (!handle) 1838 continue; 1839 port_number = event_data->start_port_num + i; 1840 reason_code = event_data->port_entry[i].port_status; 1841 switch (reason_code) { 1842 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1843 status_str = "target remove"; 1844 break; 1845 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1846 status_str = "delay target remove"; 1847 break; 1848 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1849 status_str = "link status change"; 1850 break; 1851 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1852 status_str = "link status no change"; 1853 break; 1854 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1855 status_str = "target responding"; 1856 break; 1857 default: 1858 status_str = "unknown"; 1859 break; 1860 } 1861 link_rate = event_data->port_entry[i].current_port_info & 1862 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1863 prev_link_rate = event_data->port_entry[i].previous_port_info & 1864 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1865 ioc_info(mrioc, 1866 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1867 __func__, port_number, handle, status_str, link_rate, 1868 prev_link_rate); 1869 } 1870 } 1871 1872 /** 1873 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1874 * @mrioc: Adapter instance reference 1875 * @fwevt: Firmware event reference 1876 * 1877 * Prints information about the PCIe topology change event and 1878 * for "not responding" event code, removes the device from the 1879 * upper layers. 1880 * 1881 * Return: Nothing. 1882 */ 1883 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1884 struct mpi3mr_fwevt *fwevt) 1885 { 1886 struct mpi3_event_data_pcie_topology_change_list *event_data = 1887 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1888 int i; 1889 u16 handle; 1890 u8 reason_code; 1891 struct mpi3mr_tgt_dev *tgtdev = NULL; 1892 1893 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1894 1895 for (i = 0; i < event_data->num_entries; i++) { 1896 if (fwevt->discard) 1897 return; 1898 handle = 1899 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1900 if (!handle) 1901 continue; 1902 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1903 if (!tgtdev) 1904 continue; 1905 1906 reason_code = event_data->port_entry[i].port_status; 1907 1908 switch (reason_code) { 1909 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1910 if (tgtdev->host_exposed) 1911 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1912 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1913 mpi3mr_tgtdev_put(tgtdev); 1914 break; 1915 default: 1916 break; 1917 } 1918 if (tgtdev) 1919 mpi3mr_tgtdev_put(tgtdev); 1920 } 1921 } 1922 1923 /** 1924 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1925 * @mrioc: Adapter instance reference 1926 * @fwevt: Firmware event reference 1927 * 1928 * Extracts the event data and calls application interfacing 1929 * function to process the event further. 1930 * 1931 * Return: Nothing. 1932 */ 1933 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1934 struct mpi3mr_fwevt *fwevt) 1935 { 1936 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1937 fwevt->event_data_size); 1938 } 1939 1940 /** 1941 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1942 * @sdev: SCSI device reference 1943 * @data: Queue depth reference 1944 * 1945 * This is an iterator function called for each SCSI device in a 1946 * target to update the QD of each SCSI device. 1947 * 1948 * Return: Nothing. 1949 */ 1950 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1951 { 1952 u16 *q_depth = (u16 *)data; 1953 1954 scsi_change_queue_depth(sdev, (int)*q_depth); 1955 sdev->max_queue_depth = sdev->queue_depth; 1956 } 1957 1958 /** 1959 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1960 * @mrioc: Adapter instance reference 1961 * @tg: Throttle group information pointer 1962 * 1963 * Accessor to reduce QD for each device associated with the 1964 * given throttle group. 1965 * 1966 * Return: None. 1967 */ 1968 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1969 struct mpi3mr_throttle_group_info *tg) 1970 { 1971 unsigned long flags; 1972 struct mpi3mr_tgt_dev *tgtdev; 1973 struct mpi3mr_stgt_priv_data *tgt_priv; 1974 1975 1976 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1977 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1978 if (tgtdev->starget && tgtdev->starget->hostdata) { 1979 tgt_priv = tgtdev->starget->hostdata; 1980 if (tgt_priv->throttle_group == tg) { 1981 dprint_event_bh(mrioc, 1982 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1983 tgt_priv->perst_id, tgtdev->q_depth, 1984 tg->modified_qd); 1985 starget_for_each_device(tgtdev->starget, 1986 (void *)&tg->modified_qd, 1987 mpi3mr_update_sdev_qd); 1988 } 1989 } 1990 } 1991 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1992 } 1993 1994 /** 1995 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1996 * @mrioc: Adapter instance reference 1997 * @fwevt: Firmware event reference 1998 * 1999 * Identifies the firmware event and calls corresponding bottomg 2000 * half handler and sends event acknowledgment if required. 2001 * 2002 * Return: Nothing. 2003 */ 2004 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 2005 struct mpi3mr_fwevt *fwevt) 2006 { 2007 struct mpi3_device_page0 *dev_pg0 = NULL; 2008 u16 perst_id, handle, dev_info; 2009 struct mpi3_device0_sas_sata_format *sasinf = NULL; 2010 unsigned int timeout; 2011 2012 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 2013 mrioc->current_event = fwevt; 2014 2015 if (mrioc->stop_drv_processing) 2016 goto out; 2017 2018 if (mrioc->unrecoverable) { 2019 dprint_event_bh(mrioc, 2020 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 2021 fwevt->event_id); 2022 goto out; 2023 } 2024 2025 if (!fwevt->process_evt) 2026 goto evt_ack; 2027 2028 switch (fwevt->event_id) { 2029 case MPI3_EVENT_DEVICE_ADDED: 2030 { 2031 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2032 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2033 handle = le16_to_cpu(dev_pg0->dev_handle); 2034 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2035 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 2036 else if (mrioc->sas_transport_enabled && 2037 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 2038 sasinf = &dev_pg0->device_specific.sas_sata_format; 2039 dev_info = le16_to_cpu(sasinf->device_info); 2040 if (!mrioc->sas_hba.num_phys) 2041 mpi3mr_sas_host_add(mrioc); 2042 else 2043 mpi3mr_sas_host_refresh(mrioc); 2044 2045 if (mpi3mr_is_expander_device(dev_info)) 2046 mpi3mr_expander_add(mrioc, handle); 2047 } 2048 break; 2049 } 2050 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2051 { 2052 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2053 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2054 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2055 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 2056 break; 2057 } 2058 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2059 { 2060 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 2061 break; 2062 } 2063 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2064 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2065 { 2066 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 2067 break; 2068 } 2069 2070 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2071 { 2072 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 2073 break; 2074 } 2075 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2076 { 2077 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 2078 break; 2079 } 2080 case MPI3_EVENT_LOG_DATA: 2081 { 2082 mpi3mr_logdata_evt_bh(mrioc, fwevt); 2083 break; 2084 } 2085 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 2086 { 2087 struct mpi3mr_throttle_group_info *tg; 2088 2089 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 2090 dprint_event_bh(mrioc, 2091 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 2092 tg->id, tg->need_qd_reduction); 2093 if (tg->need_qd_reduction) { 2094 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 2095 tg->need_qd_reduction = 0; 2096 } 2097 break; 2098 } 2099 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2100 { 2101 timeout = MPI3MR_RESET_TIMEOUT * 2; 2102 while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && 2103 !mrioc->unrecoverable && !mrioc->pci_err_recovery) { 2104 msleep(500); 2105 if (!timeout--) { 2106 mrioc->unrecoverable = 1; 2107 break; 2108 } 2109 } 2110 2111 if (mrioc->unrecoverable || mrioc->pci_err_recovery) 2112 break; 2113 2114 dprint_event_bh(mrioc, 2115 "scan for non responding and newly added devices after soft reset started\n"); 2116 if (mrioc->sas_transport_enabled) { 2117 mpi3mr_refresh_sas_ports(mrioc); 2118 mpi3mr_refresh_expanders(mrioc); 2119 } 2120 mpi3mr_refresh_tgtdevs(mrioc); 2121 ioc_info(mrioc, 2122 "scan for non responding and newly added devices after soft reset completed\n"); 2123 break; 2124 } 2125 case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: 2126 { 2127 mpi3mr_process_trigger_data_event_bh(mrioc, 2128 (struct trigger_event_data *)fwevt->event_data); 2129 break; 2130 } 2131 default: 2132 break; 2133 } 2134 2135 evt_ack: 2136 if (fwevt->send_ack) 2137 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2138 fwevt->evt_ctx); 2139 out: 2140 /* Put fwevt reference count to neutralize kref_init increment */ 2141 mpi3mr_fwevt_put(fwevt); 2142 mrioc->current_event = NULL; 2143 } 2144 2145 /** 2146 * mpi3mr_fwevt_worker - Firmware event worker 2147 * @work: Work struct containing firmware event 2148 * 2149 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2150 * 2151 * Return: Nothing. 2152 */ 2153 static void mpi3mr_fwevt_worker(struct work_struct *work) 2154 { 2155 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2156 work); 2157 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2158 /* 2159 * Put fwevt reference count after 2160 * dequeuing it from worker queue 2161 */ 2162 mpi3mr_fwevt_put(fwevt); 2163 } 2164 2165 /** 2166 * mpi3mr_create_tgtdev - Create and add a target device 2167 * @mrioc: Adapter instance reference 2168 * @dev_pg0: Device Page 0 data 2169 * 2170 * If the device specified by the device page 0 data is not 2171 * present in the driver's internal list, allocate the memory 2172 * for the device, populate the data and add to the list, else 2173 * update the device data. The key is persistent ID. 2174 * 2175 * Return: 0 on success, -ENOMEM on memory allocation failure 2176 */ 2177 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2178 struct mpi3_device_page0 *dev_pg0) 2179 { 2180 int retval = 0; 2181 struct mpi3mr_tgt_dev *tgtdev = NULL; 2182 u16 perst_id = 0; 2183 unsigned long flags; 2184 2185 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2186 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2187 return retval; 2188 2189 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2190 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2191 if (tgtdev) 2192 tgtdev->state = MPI3MR_DEV_CREATED; 2193 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2194 2195 if (tgtdev) { 2196 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2197 mpi3mr_tgtdev_put(tgtdev); 2198 } else { 2199 tgtdev = mpi3mr_alloc_tgtdev(); 2200 if (!tgtdev) 2201 return -ENOMEM; 2202 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2203 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2204 } 2205 2206 return retval; 2207 } 2208 2209 /** 2210 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2211 * @mrioc: Adapter instance reference 2212 * 2213 * Flush pending commands in the delayed lists due to a 2214 * controller reset or driver removal as a cleanup. 2215 * 2216 * Return: Nothing 2217 */ 2218 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2219 { 2220 struct delayed_dev_rmhs_node *_rmhs_node; 2221 struct delayed_evt_ack_node *_evtack_node; 2222 2223 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2224 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2225 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2226 struct delayed_dev_rmhs_node, list); 2227 list_del(&_rmhs_node->list); 2228 kfree(_rmhs_node); 2229 } 2230 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2231 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2232 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2233 struct delayed_evt_ack_node, list); 2234 list_del(&_evtack_node->list); 2235 kfree(_evtack_node); 2236 } 2237 } 2238 2239 /** 2240 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2241 * @mrioc: Adapter instance reference 2242 * @drv_cmd: Internal command tracker 2243 * 2244 * Issues a target reset TM to the firmware from the device 2245 * removal TM pend list or retry the removal handshake sequence 2246 * based on the IOU control request IOC status. 2247 * 2248 * Return: Nothing 2249 */ 2250 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2251 struct mpi3mr_drv_cmd *drv_cmd) 2252 { 2253 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2254 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2255 2256 if (drv_cmd->state & MPI3MR_CMD_RESET) 2257 goto clear_drv_cmd; 2258 2259 ioc_info(mrioc, 2260 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2261 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2262 drv_cmd->ioc_loginfo); 2263 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2264 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2265 drv_cmd->retry_count++; 2266 ioc_info(mrioc, 2267 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2268 __func__, drv_cmd->dev_handle, 2269 drv_cmd->retry_count); 2270 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2271 drv_cmd, drv_cmd->iou_rc); 2272 return; 2273 } 2274 ioc_err(mrioc, 2275 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2276 __func__, drv_cmd->dev_handle); 2277 } else { 2278 ioc_info(mrioc, 2279 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2280 __func__, drv_cmd->dev_handle); 2281 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2282 } 2283 2284 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2285 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2286 struct delayed_dev_rmhs_node, list); 2287 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2288 drv_cmd->retry_count = 0; 2289 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2290 ioc_info(mrioc, 2291 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2292 __func__, drv_cmd->dev_handle); 2293 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2294 drv_cmd->iou_rc); 2295 list_del(&delayed_dev_rmhs->list); 2296 kfree(delayed_dev_rmhs); 2297 return; 2298 } 2299 2300 clear_drv_cmd: 2301 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2302 drv_cmd->callback = NULL; 2303 drv_cmd->retry_count = 0; 2304 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2305 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2306 } 2307 2308 /** 2309 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2310 * @mrioc: Adapter instance reference 2311 * @drv_cmd: Internal command tracker 2312 * 2313 * Issues a target reset TM to the firmware from the device 2314 * removal TM pend list or issue IO unit control request as 2315 * part of device removal or hidden acknowledgment handshake. 2316 * 2317 * Return: Nothing 2318 */ 2319 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2320 struct mpi3mr_drv_cmd *drv_cmd) 2321 { 2322 struct mpi3_iounit_control_request iou_ctrl; 2323 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2324 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2325 int retval; 2326 2327 if (drv_cmd->state & MPI3MR_CMD_RESET) 2328 goto clear_drv_cmd; 2329 2330 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2331 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2332 2333 if (tm_reply) 2334 pr_info(IOCNAME 2335 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2336 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2337 drv_cmd->ioc_loginfo, 2338 le32_to_cpu(tm_reply->termination_count)); 2339 2340 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2341 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2342 2343 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2344 2345 drv_cmd->state = MPI3MR_CMD_PENDING; 2346 drv_cmd->is_waiting = 0; 2347 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2348 iou_ctrl.operation = drv_cmd->iou_rc; 2349 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2350 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2351 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2352 2353 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2354 1); 2355 if (retval) { 2356 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2357 mrioc->name); 2358 goto clear_drv_cmd; 2359 } 2360 2361 return; 2362 clear_drv_cmd: 2363 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2364 drv_cmd->callback = NULL; 2365 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2366 drv_cmd->retry_count = 0; 2367 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2368 } 2369 2370 /** 2371 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2372 * @mrioc: Adapter instance reference 2373 * @handle: Device handle 2374 * @cmdparam: Internal command tracker 2375 * @iou_rc: IO unit reason code 2376 * 2377 * Issues a target reset TM to the firmware or add it to a pend 2378 * list as part of device removal or hidden acknowledgment 2379 * handshake. 2380 * 2381 * Return: Nothing 2382 */ 2383 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2384 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2385 { 2386 struct mpi3_scsi_task_mgmt_request tm_req; 2387 int retval = 0; 2388 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2389 u8 retrycount = 5; 2390 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2391 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2392 struct mpi3mr_tgt_dev *tgtdev = NULL; 2393 unsigned long flags; 2394 2395 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2396 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2397 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2398 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2399 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2400 2401 if (drv_cmd) 2402 goto issue_cmd; 2403 do { 2404 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2405 MPI3MR_NUM_DEVRMCMD); 2406 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2407 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2408 break; 2409 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2410 } 2411 } while (retrycount--); 2412 2413 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2414 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2415 GFP_ATOMIC); 2416 if (!delayed_dev_rmhs) 2417 return; 2418 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2419 delayed_dev_rmhs->handle = handle; 2420 delayed_dev_rmhs->iou_rc = iou_rc; 2421 list_add_tail(&delayed_dev_rmhs->list, 2422 &mrioc->delayed_rmhs_list); 2423 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2424 __func__, handle); 2425 return; 2426 } 2427 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2428 2429 issue_cmd: 2430 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2431 ioc_info(mrioc, 2432 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2433 __func__, handle, cmd_idx); 2434 2435 memset(&tm_req, 0, sizeof(tm_req)); 2436 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2437 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2438 goto out; 2439 } 2440 drv_cmd->state = MPI3MR_CMD_PENDING; 2441 drv_cmd->is_waiting = 0; 2442 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2443 drv_cmd->dev_handle = handle; 2444 drv_cmd->iou_rc = iou_rc; 2445 tm_req.dev_handle = cpu_to_le16(handle); 2446 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2447 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2448 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2449 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2450 2451 set_bit(handle, mrioc->removepend_bitmap); 2452 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2453 if (retval) { 2454 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2455 __func__); 2456 goto out_failed; 2457 } 2458 out: 2459 return; 2460 out_failed: 2461 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2462 drv_cmd->callback = NULL; 2463 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2464 drv_cmd->retry_count = 0; 2465 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2466 } 2467 2468 /** 2469 * mpi3mr_complete_evt_ack - event ack request completion 2470 * @mrioc: Adapter instance reference 2471 * @drv_cmd: Internal command tracker 2472 * 2473 * This is the completion handler for non blocking event 2474 * acknowledgment sent to the firmware and this will issue any 2475 * pending event acknowledgment request. 2476 * 2477 * Return: Nothing 2478 */ 2479 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2480 struct mpi3mr_drv_cmd *drv_cmd) 2481 { 2482 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2483 struct delayed_evt_ack_node *delayed_evtack = NULL; 2484 2485 if (drv_cmd->state & MPI3MR_CMD_RESET) 2486 goto clear_drv_cmd; 2487 2488 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2489 dprint_event_th(mrioc, 2490 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2491 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2492 drv_cmd->ioc_loginfo); 2493 } 2494 2495 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2496 delayed_evtack = 2497 list_entry(mrioc->delayed_evtack_cmds_list.next, 2498 struct delayed_evt_ack_node, list); 2499 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2500 delayed_evtack->event_ctx); 2501 list_del(&delayed_evtack->list); 2502 kfree(delayed_evtack); 2503 return; 2504 } 2505 clear_drv_cmd: 2506 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2507 drv_cmd->callback = NULL; 2508 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2509 } 2510 2511 /** 2512 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2513 * @mrioc: Adapter instance reference 2514 * @event: MPI3 event id 2515 * @cmdparam: Internal command tracker 2516 * @event_ctx: event context 2517 * 2518 * Issues event acknowledgment request to the firmware if there 2519 * is a free command to send the event ack else it to a pend 2520 * list so that it will be processed on a completion of a prior 2521 * event acknowledgment . 2522 * 2523 * Return: Nothing 2524 */ 2525 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2526 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2527 { 2528 struct mpi3_event_ack_request evtack_req; 2529 int retval = 0; 2530 u8 retrycount = 5; 2531 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2532 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2533 struct delayed_evt_ack_node *delayed_evtack = NULL; 2534 2535 if (drv_cmd) { 2536 dprint_event_th(mrioc, 2537 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2538 event, event_ctx); 2539 goto issue_cmd; 2540 } 2541 dprint_event_th(mrioc, 2542 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2543 event, event_ctx); 2544 do { 2545 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2546 MPI3MR_NUM_EVTACKCMD); 2547 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2548 if (!test_and_set_bit(cmd_idx, 2549 mrioc->evtack_cmds_bitmap)) 2550 break; 2551 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2552 } 2553 } while (retrycount--); 2554 2555 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2556 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2557 GFP_ATOMIC); 2558 if (!delayed_evtack) 2559 return; 2560 INIT_LIST_HEAD(&delayed_evtack->list); 2561 delayed_evtack->event = event; 2562 delayed_evtack->event_ctx = event_ctx; 2563 list_add_tail(&delayed_evtack->list, 2564 &mrioc->delayed_evtack_cmds_list); 2565 dprint_event_th(mrioc, 2566 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2567 event, event_ctx); 2568 return; 2569 } 2570 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2571 2572 issue_cmd: 2573 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2574 2575 memset(&evtack_req, 0, sizeof(evtack_req)); 2576 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2577 dprint_event_th(mrioc, 2578 "sending event ack failed due to command in use\n"); 2579 goto out; 2580 } 2581 drv_cmd->state = MPI3MR_CMD_PENDING; 2582 drv_cmd->is_waiting = 0; 2583 drv_cmd->callback = mpi3mr_complete_evt_ack; 2584 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2585 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2586 evtack_req.event = event; 2587 evtack_req.event_context = cpu_to_le32(event_ctx); 2588 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2589 sizeof(evtack_req), 1); 2590 if (retval) { 2591 dprint_event_th(mrioc, 2592 "posting event ack request is failed\n"); 2593 goto out_failed; 2594 } 2595 2596 dprint_event_th(mrioc, 2597 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2598 event, event_ctx); 2599 out: 2600 return; 2601 out_failed: 2602 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2603 drv_cmd->callback = NULL; 2604 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2605 } 2606 2607 /** 2608 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2609 * @mrioc: Adapter instance reference 2610 * @event_reply: event data 2611 * 2612 * Checks for the reason code and based on that either block I/O 2613 * to device, or unblock I/O to the device, or start the device 2614 * removal handshake with reason as remove with the firmware for 2615 * PCIe devices. 2616 * 2617 * Return: Nothing 2618 */ 2619 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2620 struct mpi3_event_notification_reply *event_reply) 2621 { 2622 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2623 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2624 int i; 2625 u16 handle; 2626 u8 reason_code; 2627 struct mpi3mr_tgt_dev *tgtdev = NULL; 2628 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2629 2630 for (i = 0; i < topo_evt->num_entries; i++) { 2631 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2632 if (!handle) 2633 continue; 2634 reason_code = topo_evt->port_entry[i].port_status; 2635 scsi_tgt_priv_data = NULL; 2636 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2637 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2638 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2639 tgtdev->starget->hostdata; 2640 switch (reason_code) { 2641 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2642 if (scsi_tgt_priv_data) { 2643 scsi_tgt_priv_data->dev_removed = 1; 2644 scsi_tgt_priv_data->dev_removedelay = 0; 2645 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2646 } 2647 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2648 MPI3_CTRL_OP_REMOVE_DEVICE); 2649 break; 2650 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2651 if (scsi_tgt_priv_data) { 2652 scsi_tgt_priv_data->dev_removedelay = 1; 2653 atomic_inc(&scsi_tgt_priv_data->block_io); 2654 } 2655 break; 2656 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2657 if (scsi_tgt_priv_data && 2658 scsi_tgt_priv_data->dev_removedelay) { 2659 scsi_tgt_priv_data->dev_removedelay = 0; 2660 atomic_dec_if_positive 2661 (&scsi_tgt_priv_data->block_io); 2662 } 2663 break; 2664 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2665 default: 2666 break; 2667 } 2668 if (tgtdev) 2669 mpi3mr_tgtdev_put(tgtdev); 2670 } 2671 } 2672 2673 /** 2674 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2675 * @mrioc: Adapter instance reference 2676 * @event_reply: event data 2677 * 2678 * Checks for the reason code and based on that either block I/O 2679 * to device, or unblock I/O to the device, or start the device 2680 * removal handshake with reason as remove with the firmware for 2681 * SAS/SATA devices. 2682 * 2683 * Return: Nothing 2684 */ 2685 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2686 struct mpi3_event_notification_reply *event_reply) 2687 { 2688 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2689 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2690 int i; 2691 u16 handle; 2692 u8 reason_code; 2693 struct mpi3mr_tgt_dev *tgtdev = NULL; 2694 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2695 2696 for (i = 0; i < topo_evt->num_entries; i++) { 2697 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2698 if (!handle) 2699 continue; 2700 reason_code = topo_evt->phy_entry[i].status & 2701 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2702 scsi_tgt_priv_data = NULL; 2703 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2704 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2705 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2706 tgtdev->starget->hostdata; 2707 switch (reason_code) { 2708 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2709 if (scsi_tgt_priv_data) { 2710 scsi_tgt_priv_data->dev_removed = 1; 2711 scsi_tgt_priv_data->dev_removedelay = 0; 2712 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2713 } 2714 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2715 MPI3_CTRL_OP_REMOVE_DEVICE); 2716 break; 2717 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2718 if (scsi_tgt_priv_data) { 2719 scsi_tgt_priv_data->dev_removedelay = 1; 2720 atomic_inc(&scsi_tgt_priv_data->block_io); 2721 } 2722 break; 2723 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2724 if (scsi_tgt_priv_data && 2725 scsi_tgt_priv_data->dev_removedelay) { 2726 scsi_tgt_priv_data->dev_removedelay = 0; 2727 atomic_dec_if_positive 2728 (&scsi_tgt_priv_data->block_io); 2729 } 2730 break; 2731 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2732 default: 2733 break; 2734 } 2735 if (tgtdev) 2736 mpi3mr_tgtdev_put(tgtdev); 2737 } 2738 } 2739 2740 /** 2741 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2742 * @mrioc: Adapter instance reference 2743 * @event_reply: event data 2744 * 2745 * Checks for the reason code and based on that either block I/O 2746 * to device, or unblock I/O to the device, or start the device 2747 * removal handshake with reason as remove/hide acknowledgment 2748 * with the firmware. 2749 * 2750 * Return: Nothing 2751 */ 2752 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2753 struct mpi3_event_notification_reply *event_reply) 2754 { 2755 u16 dev_handle = 0; 2756 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2757 struct mpi3mr_tgt_dev *tgtdev = NULL; 2758 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2759 struct mpi3_event_data_device_status_change *evtdata = 2760 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2761 2762 if (mrioc->stop_drv_processing) 2763 goto out; 2764 2765 dev_handle = le16_to_cpu(evtdata->dev_handle); 2766 2767 switch (evtdata->reason_code) { 2768 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2769 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2770 block = 1; 2771 break; 2772 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2773 delete = 1; 2774 hide = 1; 2775 break; 2776 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2777 delete = 1; 2778 remove = 1; 2779 break; 2780 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2781 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2782 ublock = 1; 2783 break; 2784 default: 2785 break; 2786 } 2787 2788 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2789 if (!tgtdev) 2790 goto out; 2791 if (hide) 2792 tgtdev->is_hidden = hide; 2793 if (tgtdev->starget && tgtdev->starget->hostdata) { 2794 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2795 tgtdev->starget->hostdata; 2796 if (block) 2797 atomic_inc(&scsi_tgt_priv_data->block_io); 2798 if (delete) 2799 scsi_tgt_priv_data->dev_removed = 1; 2800 if (ublock) 2801 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2802 } 2803 if (remove) 2804 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2805 MPI3_CTRL_OP_REMOVE_DEVICE); 2806 if (hide) 2807 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2808 MPI3_CTRL_OP_HIDDEN_ACK); 2809 2810 out: 2811 if (tgtdev) 2812 mpi3mr_tgtdev_put(tgtdev); 2813 } 2814 2815 /** 2816 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2817 * @mrioc: Adapter instance reference 2818 * @event_reply: event data 2819 * 2820 * Blocks and unblocks host level I/O based on the reason code 2821 * 2822 * Return: Nothing 2823 */ 2824 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2825 struct mpi3_event_notification_reply *event_reply) 2826 { 2827 struct mpi3_event_data_prepare_for_reset *evtdata = 2828 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2829 2830 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2831 dprint_event_th(mrioc, 2832 "prepare for reset event top half with rc=start\n"); 2833 if (mrioc->prepare_for_reset) 2834 return; 2835 mrioc->prepare_for_reset = 1; 2836 mrioc->prepare_for_reset_timeout_counter = 0; 2837 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2838 dprint_event_th(mrioc, 2839 "prepare for reset top half with rc=abort\n"); 2840 mrioc->prepare_for_reset = 0; 2841 mrioc->prepare_for_reset_timeout_counter = 0; 2842 } 2843 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2844 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2845 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2846 le32_to_cpu(event_reply->event_context)); 2847 } 2848 2849 /** 2850 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2851 * @mrioc: Adapter instance reference 2852 * @event_reply: event data 2853 * 2854 * Identifies the new shutdown timeout value and update. 2855 * 2856 * Return: Nothing 2857 */ 2858 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2859 struct mpi3_event_notification_reply *event_reply) 2860 { 2861 struct mpi3_event_data_energy_pack_change *evtdata = 2862 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2863 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2864 2865 if (shutdown_timeout <= 0) { 2866 ioc_warn(mrioc, 2867 "%s :Invalid Shutdown Timeout received = %d\n", 2868 __func__, shutdown_timeout); 2869 return; 2870 } 2871 2872 ioc_info(mrioc, 2873 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2874 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2875 mrioc->facts.shutdown_timeout = shutdown_timeout; 2876 } 2877 2878 /** 2879 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2880 * @mrioc: Adapter instance reference 2881 * @event_reply: event data 2882 * 2883 * Displays Cable manegemt event details. 2884 * 2885 * Return: Nothing 2886 */ 2887 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2888 struct mpi3_event_notification_reply *event_reply) 2889 { 2890 struct mpi3_event_data_cable_management *evtdata = 2891 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2892 2893 switch (evtdata->status) { 2894 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2895 { 2896 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2897 "Devices connected to this cable are not detected.\n" 2898 "This cable requires %d mW of power.\n", 2899 evtdata->receptacle_id, 2900 le32_to_cpu(evtdata->active_cable_power_requirement)); 2901 break; 2902 } 2903 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2904 { 2905 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2906 evtdata->receptacle_id); 2907 break; 2908 } 2909 default: 2910 break; 2911 } 2912 } 2913 2914 /** 2915 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2916 * @mrioc: Adapter instance reference 2917 * 2918 * Add driver specific event to make sure that the driver won't process the 2919 * events until all the devices are refreshed during soft reset. 2920 * 2921 * Return: Nothing 2922 */ 2923 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2924 { 2925 struct mpi3mr_fwevt *fwevt = NULL; 2926 2927 fwevt = mpi3mr_alloc_fwevt(0); 2928 if (!fwevt) { 2929 dprint_event_th(mrioc, 2930 "failed to schedule bottom half handler for event(0x%02x)\n", 2931 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2932 return; 2933 } 2934 fwevt->mrioc = mrioc; 2935 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2936 fwevt->send_ack = 0; 2937 fwevt->process_evt = 1; 2938 fwevt->evt_ctx = 0; 2939 fwevt->event_data_size = 0; 2940 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2941 } 2942 2943 /** 2944 * mpi3mr_os_handle_events - Firmware event handler 2945 * @mrioc: Adapter instance reference 2946 * @event_reply: event data 2947 * 2948 * Identify whteher the event has to handled and acknowledged 2949 * and either process the event in the tophalf and/or schedule a 2950 * bottom half through mpi3mr_fwevt_worker. 2951 * 2952 * Return: Nothing 2953 */ 2954 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2955 struct mpi3_event_notification_reply *event_reply) 2956 { 2957 u16 evt_type, sz; 2958 struct mpi3mr_fwevt *fwevt = NULL; 2959 bool ack_req = 0, process_evt_bh = 0; 2960 2961 if (mrioc->stop_drv_processing) 2962 return; 2963 2964 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2965 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2966 ack_req = 1; 2967 2968 evt_type = event_reply->event; 2969 mpi3mr_event_trigger(mrioc, event_reply->event); 2970 2971 switch (evt_type) { 2972 case MPI3_EVENT_DEVICE_ADDED: 2973 { 2974 struct mpi3_device_page0 *dev_pg0 = 2975 (struct mpi3_device_page0 *)event_reply->event_data; 2976 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2977 ioc_err(mrioc, 2978 "%s :Failed to add device in the device add event\n", 2979 __func__); 2980 else 2981 process_evt_bh = 1; 2982 break; 2983 } 2984 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2985 { 2986 process_evt_bh = 1; 2987 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2988 break; 2989 } 2990 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2991 { 2992 process_evt_bh = 1; 2993 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2994 break; 2995 } 2996 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2997 { 2998 process_evt_bh = 1; 2999 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 3000 break; 3001 } 3002 case MPI3_EVENT_PREPARE_FOR_RESET: 3003 { 3004 mpi3mr_preparereset_evt_th(mrioc, event_reply); 3005 ack_req = 0; 3006 break; 3007 } 3008 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 3009 { 3010 mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); 3011 break; 3012 } 3013 case MPI3_EVENT_DEVICE_INFO_CHANGED: 3014 case MPI3_EVENT_LOG_DATA: 3015 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 3016 case MPI3_EVENT_ENCL_DEVICE_ADDED: 3017 { 3018 process_evt_bh = 1; 3019 break; 3020 } 3021 case MPI3_EVENT_ENERGY_PACK_CHANGE: 3022 { 3023 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 3024 break; 3025 } 3026 case MPI3_EVENT_CABLE_MGMT: 3027 { 3028 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 3029 break; 3030 } 3031 case MPI3_EVENT_SAS_DISCOVERY: 3032 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 3033 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 3034 case MPI3_EVENT_PCIE_ENUMERATION: 3035 break; 3036 default: 3037 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 3038 __func__, evt_type); 3039 break; 3040 } 3041 if (process_evt_bh || ack_req) { 3042 sz = event_reply->event_data_length * 4; 3043 fwevt = mpi3mr_alloc_fwevt(sz); 3044 if (!fwevt) { 3045 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 3046 __func__, __FILE__, __LINE__, __func__); 3047 return; 3048 } 3049 3050 memcpy(fwevt->event_data, event_reply->event_data, sz); 3051 fwevt->mrioc = mrioc; 3052 fwevt->event_id = evt_type; 3053 fwevt->send_ack = ack_req; 3054 fwevt->process_evt = process_evt_bh; 3055 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 3056 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 3057 } 3058 } 3059 3060 /** 3061 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 3062 * @mrioc: Adapter instance reference 3063 * @scmd: SCSI command reference 3064 * @scsiio_req: MPI3 SCSI IO request 3065 * 3066 * Identifies the protection information flags from the SCSI 3067 * command and set appropriate flags in the MPI3 SCSI IO 3068 * request. 3069 * 3070 * Return: Nothing 3071 */ 3072 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 3073 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3074 { 3075 u16 eedp_flags = 0; 3076 unsigned char prot_op = scsi_get_prot_op(scmd); 3077 3078 switch (prot_op) { 3079 case SCSI_PROT_NORMAL: 3080 return; 3081 case SCSI_PROT_READ_STRIP: 3082 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3083 break; 3084 case SCSI_PROT_WRITE_INSERT: 3085 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3086 break; 3087 case SCSI_PROT_READ_INSERT: 3088 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3089 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3090 break; 3091 case SCSI_PROT_WRITE_STRIP: 3092 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3093 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3094 break; 3095 case SCSI_PROT_READ_PASS: 3096 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3097 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3098 break; 3099 case SCSI_PROT_WRITE_PASS: 3100 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 3101 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 3102 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 3103 0xffff; 3104 } else 3105 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3106 3107 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3108 break; 3109 default: 3110 return; 3111 } 3112 3113 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 3114 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 3115 3116 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 3117 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 3118 3119 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3120 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3121 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3122 scsiio_req->cdb.eedp32.primary_reference_tag = 3123 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3124 } 3125 3126 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3127 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3128 3129 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3130 3131 switch (scsi_prot_interval(scmd)) { 3132 case 512: 3133 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3134 break; 3135 case 520: 3136 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3137 break; 3138 case 4080: 3139 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3140 break; 3141 case 4088: 3142 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3143 break; 3144 case 4096: 3145 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3146 break; 3147 case 4104: 3148 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3149 break; 3150 case 4160: 3151 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3152 break; 3153 default: 3154 break; 3155 } 3156 3157 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3158 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3159 } 3160 3161 /** 3162 * mpi3mr_build_sense_buffer - Map sense information 3163 * @desc: Sense type 3164 * @buf: Sense buffer to populate 3165 * @key: Sense key 3166 * @asc: Additional sense code 3167 * @ascq: Additional sense code qualifier 3168 * 3169 * Maps the given sense information into either descriptor or 3170 * fixed format sense data. 3171 * 3172 * Return: Nothing 3173 */ 3174 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3175 u8 asc, u8 ascq) 3176 { 3177 if (desc) { 3178 buf[0] = 0x72; /* descriptor, current */ 3179 buf[1] = key; 3180 buf[2] = asc; 3181 buf[3] = ascq; 3182 buf[7] = 0; 3183 } else { 3184 buf[0] = 0x70; /* fixed, current */ 3185 buf[2] = key; 3186 buf[7] = 0xa; 3187 buf[12] = asc; 3188 buf[13] = ascq; 3189 } 3190 } 3191 3192 /** 3193 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3194 * @scmd: SCSI command reference 3195 * @ioc_status: status of MPI3 request 3196 * 3197 * Maps the EEDP error status of the SCSI IO request to sense 3198 * data. 3199 * 3200 * Return: Nothing 3201 */ 3202 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3203 u16 ioc_status) 3204 { 3205 u8 ascq = 0; 3206 3207 switch (ioc_status) { 3208 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3209 ascq = 0x01; 3210 break; 3211 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3212 ascq = 0x02; 3213 break; 3214 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3215 ascq = 0x03; 3216 break; 3217 default: 3218 ascq = 0x00; 3219 break; 3220 } 3221 3222 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3223 0x10, ascq); 3224 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3225 } 3226 3227 /** 3228 * mpi3mr_process_op_reply_desc - reply descriptor handler 3229 * @mrioc: Adapter instance reference 3230 * @reply_desc: Operational reply descriptor 3231 * @reply_dma: place holder for reply DMA address 3232 * @qidx: Operational queue index 3233 * 3234 * Process the operational reply descriptor and identifies the 3235 * descriptor type. Based on the descriptor map the MPI3 request 3236 * status to a SCSI command status and calls scsi_done call 3237 * back. 3238 * 3239 * Return: Nothing 3240 */ 3241 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3242 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3243 { 3244 u16 reply_desc_type, host_tag = 0; 3245 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3246 u32 ioc_loginfo = 0; 3247 struct mpi3_status_reply_descriptor *status_desc = NULL; 3248 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3249 struct mpi3_success_reply_descriptor *success_desc = NULL; 3250 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3251 struct scsi_cmnd *scmd = NULL; 3252 struct scmd_priv *priv = NULL; 3253 u8 *sense_buf = NULL; 3254 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3255 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3256 u16 dev_handle = 0xFFFF; 3257 struct scsi_sense_hdr sshdr; 3258 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3259 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3260 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3261 struct mpi3mr_throttle_group_info *tg = NULL; 3262 u8 throttle_enabled_dev = 0; 3263 3264 *reply_dma = 0; 3265 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3266 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3267 switch (reply_desc_type) { 3268 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3269 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3270 host_tag = le16_to_cpu(status_desc->host_tag); 3271 ioc_status = le16_to_cpu(status_desc->ioc_status); 3272 if (ioc_status & 3273 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3274 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3275 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3276 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3277 break; 3278 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3279 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3280 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3281 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3282 *reply_dma); 3283 if (!scsi_reply) { 3284 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3285 mrioc->name); 3286 goto out; 3287 } 3288 host_tag = le16_to_cpu(scsi_reply->host_tag); 3289 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3290 scsi_status = scsi_reply->scsi_status; 3291 scsi_state = scsi_reply->scsi_state; 3292 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3293 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3294 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3295 sense_count = le32_to_cpu(scsi_reply->sense_count); 3296 resp_data = le32_to_cpu(scsi_reply->response_data); 3297 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3298 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3299 if (ioc_status & 3300 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3301 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3302 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3303 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3304 panic("%s: Ran out of sense buffers\n", mrioc->name); 3305 if (sense_buf) { 3306 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3307 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 3308 sshdr.asc, sshdr.ascq); 3309 } 3310 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3311 break; 3312 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3313 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3314 host_tag = le16_to_cpu(success_desc->host_tag); 3315 break; 3316 default: 3317 break; 3318 } 3319 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3320 if (!scmd) { 3321 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3322 mrioc->name, host_tag); 3323 goto out; 3324 } 3325 priv = scsi_cmd_priv(scmd); 3326 3327 data_len_blks = scsi_bufflen(scmd) >> 9; 3328 sdev_priv_data = scmd->device->hostdata; 3329 if (sdev_priv_data) { 3330 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3331 if (stgt_priv_data) { 3332 tg = stgt_priv_data->throttle_group; 3333 throttle_enabled_dev = 3334 stgt_priv_data->io_throttle_enabled; 3335 dev_handle = stgt_priv_data->dev_handle; 3336 } 3337 } 3338 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3339 throttle_enabled_dev)) { 3340 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3341 &mrioc->pend_large_data_sz); 3342 if (tg) { 3343 tg_pend_data_len = atomic_sub_return(data_len_blks, 3344 &tg->pend_large_data_sz); 3345 if (tg->io_divert && ((ioc_pend_data_len <= 3346 mrioc->io_throttle_low) && 3347 (tg_pend_data_len <= tg->low))) { 3348 tg->io_divert = 0; 3349 mpi3mr_set_io_divert_for_all_vd_in_tg( 3350 mrioc, tg, 0); 3351 } 3352 } else { 3353 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3354 stgt_priv_data->io_divert = 0; 3355 } 3356 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3357 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3358 if (!tg) { 3359 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3360 stgt_priv_data->io_divert = 0; 3361 3362 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3363 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3364 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3365 tg->io_divert = 0; 3366 mpi3mr_set_io_divert_for_all_vd_in_tg( 3367 mrioc, tg, 0); 3368 } 3369 } 3370 } 3371 3372 if (success_desc) { 3373 scmd->result = DID_OK << 16; 3374 goto out_success; 3375 } 3376 3377 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3378 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3379 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3380 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3381 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3382 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3383 3384 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3385 sense_buf) { 3386 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3387 3388 memcpy(scmd->sense_buffer, sense_buf, sz); 3389 } 3390 3391 switch (ioc_status) { 3392 case MPI3_IOCSTATUS_BUSY: 3393 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3394 scmd->result = SAM_STAT_BUSY; 3395 break; 3396 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3397 scmd->result = DID_NO_CONNECT << 16; 3398 break; 3399 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3400 scmd->result = DID_SOFT_ERROR << 16; 3401 break; 3402 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3403 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3404 scmd->result = DID_RESET << 16; 3405 break; 3406 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3407 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3408 scmd->result = DID_SOFT_ERROR << 16; 3409 else 3410 scmd->result = (DID_OK << 16) | scsi_status; 3411 break; 3412 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3413 scmd->result = (DID_OK << 16) | scsi_status; 3414 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3415 break; 3416 if (xfer_count < scmd->underflow) { 3417 if (scsi_status == SAM_STAT_BUSY) 3418 scmd->result = SAM_STAT_BUSY; 3419 else 3420 scmd->result = DID_SOFT_ERROR << 16; 3421 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3422 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3423 scmd->result = DID_SOFT_ERROR << 16; 3424 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3425 scmd->result = DID_RESET << 16; 3426 break; 3427 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3428 scsi_set_resid(scmd, 0); 3429 fallthrough; 3430 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3431 case MPI3_IOCSTATUS_SUCCESS: 3432 scmd->result = (DID_OK << 16) | scsi_status; 3433 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3434 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3435 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3436 scmd->result = DID_SOFT_ERROR << 16; 3437 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3438 scmd->result = DID_RESET << 16; 3439 break; 3440 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3441 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3442 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3443 mpi3mr_map_eedp_error(scmd, ioc_status); 3444 break; 3445 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3446 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3447 case MPI3_IOCSTATUS_INVALID_SGL: 3448 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3449 case MPI3_IOCSTATUS_INVALID_FIELD: 3450 case MPI3_IOCSTATUS_INVALID_STATE: 3451 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3452 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3453 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3454 default: 3455 scmd->result = DID_SOFT_ERROR << 16; 3456 break; 3457 } 3458 3459 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3460 (scmd->cmnd[0] != ATA_16) && 3461 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3462 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3463 scmd->result); 3464 scsi_print_command(scmd); 3465 ioc_info(mrioc, 3466 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3467 __func__, dev_handle, ioc_status, ioc_loginfo, 3468 priv->req_q_idx + 1); 3469 ioc_info(mrioc, 3470 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3471 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3472 if (sense_buf) { 3473 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3474 ioc_info(mrioc, 3475 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3476 __func__, sense_count, sshdr.sense_key, 3477 sshdr.asc, sshdr.ascq); 3478 } 3479 } 3480 out_success: 3481 if (priv->meta_sg_valid) { 3482 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3483 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3484 } 3485 mpi3mr_clear_scmd_priv(mrioc, scmd); 3486 scsi_dma_unmap(scmd); 3487 scsi_done(scmd); 3488 out: 3489 if (sense_buf) 3490 mpi3mr_repost_sense_buf(mrioc, 3491 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3492 } 3493 3494 /** 3495 * mpi3mr_get_chain_idx - get free chain buffer index 3496 * @mrioc: Adapter instance reference 3497 * 3498 * Try to get a free chain buffer index from the free pool. 3499 * 3500 * Return: -1 on failure or the free chain buffer index 3501 */ 3502 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3503 { 3504 u8 retry_count = 5; 3505 int cmd_idx = -1; 3506 unsigned long flags; 3507 3508 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3509 do { 3510 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3511 mrioc->chain_buf_count); 3512 if (cmd_idx < mrioc->chain_buf_count) { 3513 set_bit(cmd_idx, mrioc->chain_bitmap); 3514 break; 3515 } 3516 cmd_idx = -1; 3517 } while (retry_count--); 3518 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3519 return cmd_idx; 3520 } 3521 3522 /** 3523 * mpi3mr_prepare_sg_scmd - build scatter gather list 3524 * @mrioc: Adapter instance reference 3525 * @scmd: SCSI command reference 3526 * @scsiio_req: MPI3 SCSI IO request 3527 * 3528 * This function maps SCSI command's data and protection SGEs to 3529 * MPI request SGEs. If required additional 4K chain buffer is 3530 * used to send the SGEs. 3531 * 3532 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3533 */ 3534 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3535 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3536 { 3537 dma_addr_t chain_dma; 3538 struct scatterlist *sg_scmd; 3539 void *sg_local, *chain; 3540 u32 chain_length; 3541 int sges_left, chain_idx; 3542 u32 sges_in_segment; 3543 u8 simple_sgl_flags; 3544 u8 simple_sgl_flags_last; 3545 u8 last_chain_sgl_flags; 3546 struct chain_element *chain_req; 3547 struct scmd_priv *priv = NULL; 3548 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3549 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3550 3551 priv = scsi_cmd_priv(scmd); 3552 3553 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3554 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3555 simple_sgl_flags_last = simple_sgl_flags | 3556 MPI3_SGE_FLAGS_END_OF_LIST; 3557 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3558 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3559 3560 if (meta_sg) 3561 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3562 else 3563 sg_local = &scsiio_req->sgl; 3564 3565 if (!scsiio_req->data_length && !meta_sg) { 3566 mpi3mr_build_zero_len_sge(sg_local); 3567 return 0; 3568 } 3569 3570 if (meta_sg) { 3571 sg_scmd = scsi_prot_sglist(scmd); 3572 sges_left = dma_map_sg(&mrioc->pdev->dev, 3573 scsi_prot_sglist(scmd), 3574 scsi_prot_sg_count(scmd), 3575 scmd->sc_data_direction); 3576 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3577 } else { 3578 /* 3579 * Some firmware versions byte-swap the REPORT ZONES command 3580 * reply from ATA-ZAC devices by directly accessing in the host 3581 * buffer. This does not respect the default command DMA 3582 * direction and causes IOMMU page faults on some architectures 3583 * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3584 * Avoid such issue by making the REPORT ZONES buffer mapping 3585 * bi-directional. 3586 */ 3587 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3588 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3589 sg_scmd = scsi_sglist(scmd); 3590 sges_left = scsi_dma_map(scmd); 3591 } 3592 3593 if (sges_left < 0) { 3594 sdev_printk(KERN_ERR, scmd->device, 3595 "scsi_dma_map failed: request for %d bytes!\n", 3596 scsi_bufflen(scmd)); 3597 return -ENOMEM; 3598 } 3599 if (sges_left > mrioc->max_sgl_entries) { 3600 sdev_printk(KERN_ERR, scmd->device, 3601 "scsi_dma_map returned unsupported sge count %d!\n", 3602 sges_left); 3603 return -ENOMEM; 3604 } 3605 3606 sges_in_segment = (mrioc->facts.op_req_sz - 3607 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3608 3609 if (scsiio_req->sgl[0].eedp.flags == 3610 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3611 sg_local += sizeof(struct mpi3_sge_common); 3612 sges_in_segment--; 3613 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3614 } 3615 3616 if (scsiio_req->msg_flags == 3617 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3618 sges_in_segment--; 3619 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3620 } 3621 3622 if (meta_sg) 3623 sges_in_segment = 1; 3624 3625 if (sges_left <= sges_in_segment) 3626 goto fill_in_last_segment; 3627 3628 /* fill in main message segment when there is a chain following */ 3629 while (sges_in_segment > 1) { 3630 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3631 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3632 sg_scmd = sg_next(sg_scmd); 3633 sg_local += sizeof(struct mpi3_sge_common); 3634 sges_left--; 3635 sges_in_segment--; 3636 } 3637 3638 chain_idx = mpi3mr_get_chain_idx(mrioc); 3639 if (chain_idx < 0) 3640 return -1; 3641 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3642 if (meta_sg) 3643 priv->meta_chain_idx = chain_idx; 3644 else 3645 priv->chain_idx = chain_idx; 3646 3647 chain = chain_req->addr; 3648 chain_dma = chain_req->dma_addr; 3649 sges_in_segment = sges_left; 3650 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3651 3652 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3653 chain_length, chain_dma); 3654 3655 sg_local = chain; 3656 3657 fill_in_last_segment: 3658 while (sges_left > 0) { 3659 if (sges_left == 1) 3660 mpi3mr_add_sg_single(sg_local, 3661 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3662 sg_dma_address(sg_scmd)); 3663 else 3664 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3665 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3666 sg_scmd = sg_next(sg_scmd); 3667 sg_local += sizeof(struct mpi3_sge_common); 3668 sges_left--; 3669 } 3670 3671 return 0; 3672 } 3673 3674 /** 3675 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3676 * @mrioc: Adapter instance reference 3677 * @scmd: SCSI command reference 3678 * @scsiio_req: MPI3 SCSI IO request 3679 * 3680 * This function calls mpi3mr_prepare_sg_scmd for constructing 3681 * both data SGEs and protection information SGEs in the MPI 3682 * format from the SCSI Command as appropriate . 3683 * 3684 * Return: return value of mpi3mr_prepare_sg_scmd. 3685 */ 3686 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3687 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3688 { 3689 int ret; 3690 3691 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3692 if (ret) 3693 return ret; 3694 3695 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3696 /* There is a valid meta sg */ 3697 scsiio_req->flags |= 3698 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3699 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3700 } 3701 3702 return ret; 3703 } 3704 3705 /** 3706 * mpi3mr_tm_response_name - get TM response as a string 3707 * @resp_code: TM response code 3708 * 3709 * Convert known task management response code as a readable 3710 * string. 3711 * 3712 * Return: response code string. 3713 */ 3714 static const char *mpi3mr_tm_response_name(u8 resp_code) 3715 { 3716 char *desc; 3717 3718 switch (resp_code) { 3719 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3720 desc = "task management request completed"; 3721 break; 3722 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3723 desc = "invalid frame"; 3724 break; 3725 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3726 desc = "task management request not supported"; 3727 break; 3728 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3729 desc = "task management request failed"; 3730 break; 3731 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3732 desc = "task management request succeeded"; 3733 break; 3734 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3735 desc = "invalid LUN"; 3736 break; 3737 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3738 desc = "overlapped tag attempted"; 3739 break; 3740 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3741 desc = "task queued, however not sent to target"; 3742 break; 3743 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3744 desc = "task management request denied by NVMe device"; 3745 break; 3746 default: 3747 desc = "unknown"; 3748 break; 3749 } 3750 3751 return desc; 3752 } 3753 3754 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3755 { 3756 int i; 3757 int num_of_reply_queues = 3758 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3759 3760 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3761 mpi3mr_process_op_reply_q(mrioc, 3762 mrioc->intr_info[i].op_reply_q); 3763 } 3764 3765 /** 3766 * mpi3mr_issue_tm - Issue Task Management request 3767 * @mrioc: Adapter instance reference 3768 * @tm_type: Task Management type 3769 * @handle: Device handle 3770 * @lun: lun ID 3771 * @htag: Host tag of the TM request 3772 * @timeout: TM timeout value 3773 * @drv_cmd: Internal command tracker 3774 * @resp_code: Response code place holder 3775 * @scmd: SCSI command 3776 * 3777 * Issues a Task Management Request to the controller for a 3778 * specified target, lun and command and wait for its completion 3779 * and check TM response. Recover the TM if it timed out by 3780 * issuing controller reset. 3781 * 3782 * Return: 0 on success, non-zero on errors 3783 */ 3784 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3785 u16 handle, uint lun, u16 htag, ulong timeout, 3786 struct mpi3mr_drv_cmd *drv_cmd, 3787 u8 *resp_code, struct scsi_cmnd *scmd) 3788 { 3789 struct mpi3_scsi_task_mgmt_request tm_req; 3790 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3791 int retval = 0; 3792 struct mpi3mr_tgt_dev *tgtdev = NULL; 3793 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3794 struct scmd_priv *cmd_priv = NULL; 3795 struct scsi_device *sdev = NULL; 3796 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3797 3798 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3799 __func__, tm_type, handle); 3800 if (mrioc->unrecoverable) { 3801 retval = -1; 3802 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3803 __func__); 3804 goto out; 3805 } 3806 3807 memset(&tm_req, 0, sizeof(tm_req)); 3808 mutex_lock(&drv_cmd->mutex); 3809 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3810 retval = -1; 3811 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3812 mutex_unlock(&drv_cmd->mutex); 3813 goto out; 3814 } 3815 if (mrioc->reset_in_progress) { 3816 retval = -1; 3817 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3818 mutex_unlock(&drv_cmd->mutex); 3819 goto out; 3820 } 3821 if (mrioc->block_on_pci_err) { 3822 retval = -1; 3823 dprint_tm(mrioc, "sending task management failed due to\n" 3824 "pci error recovery in progress\n"); 3825 mutex_unlock(&drv_cmd->mutex); 3826 goto out; 3827 } 3828 3829 drv_cmd->state = MPI3MR_CMD_PENDING; 3830 drv_cmd->is_waiting = 1; 3831 drv_cmd->callback = NULL; 3832 tm_req.dev_handle = cpu_to_le16(handle); 3833 tm_req.task_type = tm_type; 3834 tm_req.host_tag = cpu_to_le16(htag); 3835 3836 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3837 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3838 3839 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3840 3841 if (scmd) { 3842 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 3843 cmd_priv = scsi_cmd_priv(scmd); 3844 if (!cmd_priv) 3845 goto out_unlock; 3846 3847 struct op_req_qinfo *op_req_q; 3848 3849 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; 3850 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); 3851 tm_req.task_request_queue_id = 3852 cpu_to_le16(op_req_q->qid); 3853 } 3854 sdev = scmd->device; 3855 sdev_priv_data = sdev->hostdata; 3856 scsi_tgt_priv_data = ((sdev_priv_data) ? 3857 sdev_priv_data->tgt_priv_data : NULL); 3858 } else { 3859 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3860 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3861 tgtdev->starget->hostdata; 3862 } 3863 3864 if (scsi_tgt_priv_data) 3865 atomic_inc(&scsi_tgt_priv_data->block_io); 3866 3867 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3868 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3869 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3870 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3871 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3872 } 3873 3874 init_completion(&drv_cmd->done); 3875 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3876 if (retval) { 3877 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3878 goto out_unlock; 3879 } 3880 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3881 3882 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3883 drv_cmd->is_waiting = 0; 3884 retval = -1; 3885 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3886 dprint_tm(mrioc, 3887 "task management request timed out after %ld seconds\n", 3888 timeout); 3889 if (mrioc->logging_level & MPI3_DEBUG_TM) 3890 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3891 mpi3mr_soft_reset_handler(mrioc, 3892 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3893 } 3894 goto out_unlock; 3895 } 3896 3897 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3898 dprint_tm(mrioc, "invalid task management reply message\n"); 3899 retval = -1; 3900 goto out_unlock; 3901 } 3902 3903 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3904 3905 switch (drv_cmd->ioc_status) { 3906 case MPI3_IOCSTATUS_SUCCESS: 3907 *resp_code = le32_to_cpu(tm_reply->response_data) & 3908 MPI3MR_RI_MASK_RESPCODE; 3909 break; 3910 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3911 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3912 break; 3913 default: 3914 dprint_tm(mrioc, 3915 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3916 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3917 retval = -1; 3918 goto out_unlock; 3919 } 3920 3921 switch (*resp_code) { 3922 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3923 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3924 break; 3925 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3926 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3927 retval = -1; 3928 break; 3929 default: 3930 retval = -1; 3931 break; 3932 } 3933 3934 dprint_tm(mrioc, 3935 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3936 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3937 le32_to_cpu(tm_reply->termination_count), 3938 mpi3mr_tm_response_name(*resp_code), *resp_code); 3939 3940 if (!retval) { 3941 mpi3mr_ioc_disable_intr(mrioc); 3942 mpi3mr_poll_pend_io_completions(mrioc); 3943 mpi3mr_ioc_enable_intr(mrioc); 3944 mpi3mr_poll_pend_io_completions(mrioc); 3945 mpi3mr_process_admin_reply_q(mrioc); 3946 } 3947 switch (tm_type) { 3948 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3949 if (!scsi_tgt_priv_data) 3950 break; 3951 scsi_tgt_priv_data->pend_count = 0; 3952 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3953 mpi3mr_count_tgt_pending, 3954 (void *)scsi_tgt_priv_data->starget); 3955 break; 3956 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3957 if (!sdev_priv_data) 3958 break; 3959 sdev_priv_data->pend_count = 0; 3960 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3961 mpi3mr_count_dev_pending, (void *)sdev); 3962 break; 3963 default: 3964 break; 3965 } 3966 mpi3mr_global_trigger(mrioc, 3967 MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); 3968 3969 out_unlock: 3970 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3971 mutex_unlock(&drv_cmd->mutex); 3972 if (scsi_tgt_priv_data) 3973 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3974 if (tgtdev) 3975 mpi3mr_tgtdev_put(tgtdev); 3976 out: 3977 return retval; 3978 } 3979 3980 /** 3981 * mpi3mr_bios_param - BIOS param callback 3982 * @sdev: SCSI device reference 3983 * @bdev: Block device reference 3984 * @capacity: Capacity in logical sectors 3985 * @params: Parameter array 3986 * 3987 * Just the parameters with heads/secots/cylinders. 3988 * 3989 * Return: 0 always 3990 */ 3991 static int mpi3mr_bios_param(struct scsi_device *sdev, 3992 struct block_device *bdev, sector_t capacity, int params[]) 3993 { 3994 int heads; 3995 int sectors; 3996 sector_t cylinders; 3997 ulong dummy; 3998 3999 heads = 64; 4000 sectors = 32; 4001 4002 dummy = heads * sectors; 4003 cylinders = capacity; 4004 sector_div(cylinders, dummy); 4005 4006 if ((ulong)capacity >= 0x200000) { 4007 heads = 255; 4008 sectors = 63; 4009 dummy = heads * sectors; 4010 cylinders = capacity; 4011 sector_div(cylinders, dummy); 4012 } 4013 4014 params[0] = heads; 4015 params[1] = sectors; 4016 params[2] = cylinders; 4017 return 0; 4018 } 4019 4020 /** 4021 * mpi3mr_map_queues - Map queues callback handler 4022 * @shost: SCSI host reference 4023 * 4024 * Maps default and poll queues. 4025 * 4026 * Return: return zero. 4027 */ 4028 static void mpi3mr_map_queues(struct Scsi_Host *shost) 4029 { 4030 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4031 int i, qoff, offset; 4032 struct blk_mq_queue_map *map = NULL; 4033 4034 offset = mrioc->op_reply_q_offset; 4035 4036 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 4037 map = &shost->tag_set.map[i]; 4038 4039 map->nr_queues = 0; 4040 4041 if (i == HCTX_TYPE_DEFAULT) 4042 map->nr_queues = mrioc->default_qcount; 4043 else if (i == HCTX_TYPE_POLL) 4044 map->nr_queues = mrioc->active_poll_qcount; 4045 4046 if (!map->nr_queues) { 4047 BUG_ON(i == HCTX_TYPE_DEFAULT); 4048 continue; 4049 } 4050 4051 /* 4052 * The poll queue(s) doesn't have an IRQ (and hence IRQ 4053 * affinity), so use the regular blk-mq cpu mapping 4054 */ 4055 map->queue_offset = qoff; 4056 if (i != HCTX_TYPE_POLL) 4057 blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); 4058 else 4059 blk_mq_map_queues(map); 4060 4061 qoff += map->nr_queues; 4062 offset += map->nr_queues; 4063 } 4064 } 4065 4066 /** 4067 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 4068 * @mrioc: Adapter instance reference 4069 * 4070 * Calculate the pending I/Os for the controller and return. 4071 * 4072 * Return: Number of pending I/Os 4073 */ 4074 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 4075 { 4076 u16 i; 4077 uint pend_ios = 0; 4078 4079 for (i = 0; i < mrioc->num_op_reply_q; i++) 4080 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 4081 return pend_ios; 4082 } 4083 4084 /** 4085 * mpi3mr_print_pending_host_io - print pending I/Os 4086 * @mrioc: Adapter instance reference 4087 * 4088 * Print number of pending I/Os and each I/O details prior to 4089 * reset for debug purpose. 4090 * 4091 * Return: Nothing 4092 */ 4093 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 4094 { 4095 struct Scsi_Host *shost = mrioc->shost; 4096 4097 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 4098 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 4099 blk_mq_tagset_busy_iter(&shost->tag_set, 4100 mpi3mr_print_scmd, (void *)mrioc); 4101 } 4102 4103 /** 4104 * mpi3mr_wait_for_host_io - block for I/Os to complete 4105 * @mrioc: Adapter instance reference 4106 * @timeout: time out in seconds 4107 * Waits for pending I/Os for the given adapter to complete or 4108 * to hit the timeout. 4109 * 4110 * Return: Nothing 4111 */ 4112 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 4113 { 4114 enum mpi3mr_iocstate iocstate; 4115 int i = 0; 4116 4117 iocstate = mpi3mr_get_iocstate(mrioc); 4118 if (iocstate != MRIOC_STATE_READY) 4119 return; 4120 4121 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4122 return; 4123 ioc_info(mrioc, 4124 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 4125 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 4126 4127 for (i = 0; i < timeout; i++) { 4128 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4129 break; 4130 iocstate = mpi3mr_get_iocstate(mrioc); 4131 if (iocstate != MRIOC_STATE_READY) 4132 break; 4133 msleep(1000); 4134 } 4135 4136 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 4137 mpi3mr_get_fw_pending_ios(mrioc)); 4138 } 4139 4140 /** 4141 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 4142 * @mrioc: Adapter instance reference 4143 * @scmd: SCSI command reference 4144 * @scsiio_req: MPI3 SCSI IO request 4145 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 4146 * @wslen: write same max length 4147 * 4148 * Gets values of unmap, ndob and number of blocks from write 4149 * same scsi io and based on these values it sets divert IO flag 4150 * and reason for diverting IO to firmware. 4151 * 4152 * Return: Nothing 4153 */ 4154 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 4155 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 4156 u32 *scsiio_flags, u16 wslen) 4157 { 4158 u8 unmap = 0, ndob = 0; 4159 u8 opcode = scmd->cmnd[0]; 4160 u32 num_blocks = 0; 4161 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4162 4163 if (opcode == WRITE_SAME_16) { 4164 unmap = scmd->cmnd[1] & 0x08; 4165 ndob = scmd->cmnd[1] & 0x01; 4166 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4167 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4168 unmap = scmd->cmnd[10] & 0x08; 4169 ndob = scmd->cmnd[10] & 0x01; 4170 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4171 } else 4172 return; 4173 4174 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4175 scsiio_req->msg_flags |= 4176 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4177 *scsiio_flags |= 4178 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4179 } 4180 } 4181 4182 /** 4183 * mpi3mr_eh_host_reset - Host reset error handling callback 4184 * @scmd: SCSI command reference 4185 * 4186 * Issue controller reset 4187 * 4188 * Return: SUCCESS of successful reset else FAILED 4189 */ 4190 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4191 { 4192 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4193 int retval = FAILED, ret; 4194 4195 ret = mpi3mr_soft_reset_handler(mrioc, 4196 MPI3MR_RESET_FROM_EH_HOS, 1); 4197 if (ret) 4198 goto out; 4199 4200 retval = SUCCESS; 4201 out: 4202 sdev_printk(KERN_INFO, scmd->device, 4203 "Host reset is %s for scmd(%p)\n", 4204 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4205 4206 return retval; 4207 } 4208 4209 /** 4210 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4211 * @scmd: SCSI command reference 4212 * 4213 * Checks whether pending I/Os are present for the RAID volume; 4214 * if not there's no need to reset the adapter. 4215 * 4216 * Return: SUCCESS of successful reset else FAILED 4217 */ 4218 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4219 { 4220 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4221 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4222 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4223 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4224 int retval = FAILED; 4225 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 4226 4227 sdev_priv_data = scmd->device->hostdata; 4228 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4229 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4230 dev_type = stgt_priv_data->dev_type; 4231 } 4232 4233 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4234 mpi3mr_wait_for_host_io(mrioc, 4235 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4236 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4237 while (mrioc->reset_in_progress || 4238 mrioc->prepare_for_reset || 4239 mrioc->block_on_pci_err) { 4240 ssleep(1); 4241 if (!timeout--) { 4242 retval = FAILED; 4243 goto out; 4244 } 4245 } 4246 retval = SUCCESS; 4247 goto out; 4248 } 4249 } 4250 if (retval == FAILED) 4251 mpi3mr_print_pending_host_io(mrioc); 4252 4253 out: 4254 sdev_printk(KERN_INFO, scmd->device, 4255 "Bus reset is %s for scmd(%p)\n", 4256 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4257 return retval; 4258 } 4259 4260 /** 4261 * mpi3mr_eh_target_reset - Target reset error handling callback 4262 * @scmd: SCSI command reference 4263 * 4264 * Issue Target reset Task Management and verify the scmd is 4265 * terminated successfully and return status accordingly. 4266 * 4267 * Return: SUCCESS of successful termination of the scmd else 4268 * FAILED 4269 */ 4270 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4271 { 4272 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4273 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4274 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4275 u16 dev_handle; 4276 u8 resp_code = 0; 4277 int retval = FAILED, ret = 0; 4278 4279 sdev_printk(KERN_INFO, scmd->device, 4280 "Attempting Target Reset! scmd(%p)\n", scmd); 4281 scsi_print_command(scmd); 4282 4283 sdev_priv_data = scmd->device->hostdata; 4284 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4285 sdev_printk(KERN_INFO, scmd->device, 4286 "SCSI device is not available\n"); 4287 retval = SUCCESS; 4288 goto out; 4289 } 4290 4291 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4292 dev_handle = stgt_priv_data->dev_handle; 4293 if (stgt_priv_data->dev_removed) { 4294 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4295 sdev_printk(KERN_INFO, scmd->device, 4296 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4297 mrioc->name, dev_handle); 4298 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4299 retval = SUCCESS; 4300 else 4301 retval = FAILED; 4302 goto out; 4303 } 4304 sdev_printk(KERN_INFO, scmd->device, 4305 "Target Reset is issued to handle(0x%04x)\n", 4306 dev_handle); 4307 4308 ret = mpi3mr_issue_tm(mrioc, 4309 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4310 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4311 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4312 4313 if (ret) 4314 goto out; 4315 4316 if (stgt_priv_data->pend_count) { 4317 sdev_printk(KERN_INFO, scmd->device, 4318 "%s: target has %d pending commands, target reset is failed\n", 4319 mrioc->name, stgt_priv_data->pend_count); 4320 goto out; 4321 } 4322 4323 retval = SUCCESS; 4324 out: 4325 sdev_printk(KERN_INFO, scmd->device, 4326 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4327 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4328 4329 return retval; 4330 } 4331 4332 /** 4333 * mpi3mr_eh_dev_reset- Device reset error handling callback 4334 * @scmd: SCSI command reference 4335 * 4336 * Issue lun reset Task Management and verify the scmd is 4337 * terminated successfully and return status accordingly. 4338 * 4339 * Return: SUCCESS of successful termination of the scmd else 4340 * FAILED 4341 */ 4342 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4343 { 4344 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4345 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4346 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4347 u16 dev_handle; 4348 u8 resp_code = 0; 4349 int retval = FAILED, ret = 0; 4350 4351 sdev_printk(KERN_INFO, scmd->device, 4352 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4353 scsi_print_command(scmd); 4354 4355 sdev_priv_data = scmd->device->hostdata; 4356 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4357 sdev_printk(KERN_INFO, scmd->device, 4358 "SCSI device is not available\n"); 4359 retval = SUCCESS; 4360 goto out; 4361 } 4362 4363 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4364 dev_handle = stgt_priv_data->dev_handle; 4365 if (stgt_priv_data->dev_removed) { 4366 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4367 sdev_printk(KERN_INFO, scmd->device, 4368 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4369 mrioc->name, dev_handle); 4370 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4371 retval = SUCCESS; 4372 else 4373 retval = FAILED; 4374 goto out; 4375 } 4376 sdev_printk(KERN_INFO, scmd->device, 4377 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4378 4379 ret = mpi3mr_issue_tm(mrioc, 4380 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4381 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4382 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4383 4384 if (ret) 4385 goto out; 4386 4387 if (sdev_priv_data->pend_count) { 4388 sdev_printk(KERN_INFO, scmd->device, 4389 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4390 mrioc->name, sdev_priv_data->pend_count); 4391 goto out; 4392 } 4393 retval = SUCCESS; 4394 out: 4395 sdev_printk(KERN_INFO, scmd->device, 4396 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4397 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4398 4399 return retval; 4400 } 4401 4402 /** 4403 * mpi3mr_eh_abort - Callback function for abort error handling 4404 * @scmd: SCSI command reference 4405 * 4406 * Issues Abort Task Management if the command is in LLD scope 4407 * and verifies if it is aborted successfully, and return status 4408 * accordingly. 4409 * 4410 * Return: SUCCESS if the abort was successful, otherwise FAILED 4411 */ 4412 static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) 4413 { 4414 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4415 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4416 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4417 struct scmd_priv *cmd_priv; 4418 u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; 4419 u8 resp_code = 0; 4420 int retval = FAILED, ret = 0; 4421 struct request *rq = scsi_cmd_to_rq(scmd); 4422 unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); 4423 unsigned long scmd_age_sec = scmd_age_ms / HZ; 4424 4425 sdev_printk(KERN_INFO, scmd->device, 4426 "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); 4427 4428 sdev_printk(KERN_INFO, scmd->device, 4429 "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", 4430 mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, 4431 scmd->retries, scmd->allowed); 4432 4433 scsi_print_command(scmd); 4434 4435 sdev_priv_data = scmd->device->hostdata; 4436 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4437 sdev_printk(KERN_INFO, scmd->device, 4438 "%s: Device not available, Skip issuing abort task\n", 4439 mrioc->name); 4440 retval = SUCCESS; 4441 goto out; 4442 } 4443 4444 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4445 dev_handle = stgt_priv_data->dev_handle; 4446 4447 cmd_priv = scsi_cmd_priv(scmd); 4448 if (!cmd_priv->in_lld_scope || 4449 cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { 4450 sdev_printk(KERN_INFO, scmd->device, 4451 "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", 4452 mrioc->name, scmd); 4453 retval = SUCCESS; 4454 goto out; 4455 } 4456 4457 if (stgt_priv_data->dev_removed) { 4458 sdev_printk(KERN_INFO, scmd->device, 4459 "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", 4460 mrioc->name, dev_handle); 4461 retval = FAILED; 4462 goto out; 4463 } 4464 4465 ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4466 dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4467 timeout, &mrioc->host_tm_cmds, &resp_code, scmd); 4468 4469 if (ret) 4470 goto out; 4471 4472 if (cmd_priv->in_lld_scope) { 4473 sdev_printk(KERN_INFO, scmd->device, 4474 "%s: Abort task failed. scmd (0x%p) was not terminated\n", 4475 mrioc->name, scmd); 4476 goto out; 4477 } 4478 4479 retval = SUCCESS; 4480 out: 4481 sdev_printk(KERN_INFO, scmd->device, 4482 "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, 4483 ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); 4484 4485 return retval; 4486 } 4487 4488 /** 4489 * mpi3mr_scan_start - Scan start callback handler 4490 * @shost: SCSI host reference 4491 * 4492 * Issue port enable request asynchronously. 4493 * 4494 * Return: Nothing 4495 */ 4496 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4497 { 4498 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4499 4500 mrioc->scan_started = 1; 4501 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4502 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4503 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4504 mrioc->scan_started = 0; 4505 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4506 } 4507 } 4508 4509 /** 4510 * mpi3mr_scan_finished - Scan finished callback handler 4511 * @shost: SCSI host reference 4512 * @time: Jiffies from the scan start 4513 * 4514 * Checks whether the port enable is completed or timedout or 4515 * failed and set the scan status accordingly after taking any 4516 * recovery if required. 4517 * 4518 * Return: 1 on scan finished or timed out, 0 for in progress 4519 */ 4520 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4521 unsigned long time) 4522 { 4523 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4524 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4525 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4526 4527 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4528 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4529 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4530 mpi3mr_print_fault_info(mrioc); 4531 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4532 mrioc->scan_started = 0; 4533 mrioc->init_cmds.is_waiting = 0; 4534 mrioc->init_cmds.callback = NULL; 4535 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4536 } 4537 4538 if (time >= (pe_timeout * HZ)) { 4539 ioc_err(mrioc, "port enable failed due to time out\n"); 4540 mpi3mr_check_rh_fault_ioc(mrioc, 4541 MPI3MR_RESET_FROM_PE_TIMEOUT); 4542 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4543 mrioc->scan_started = 0; 4544 mrioc->init_cmds.is_waiting = 0; 4545 mrioc->init_cmds.callback = NULL; 4546 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4547 } 4548 4549 if (mrioc->scan_started) 4550 return 0; 4551 4552 if (mrioc->scan_failed) { 4553 ioc_err(mrioc, 4554 "port enable failed with status=0x%04x\n", 4555 mrioc->scan_failed); 4556 } else 4557 ioc_info(mrioc, "port enable is successfully completed\n"); 4558 4559 mpi3mr_start_watchdog(mrioc); 4560 mrioc->is_driver_loading = 0; 4561 mrioc->stop_bsgs = 0; 4562 return 1; 4563 } 4564 4565 /** 4566 * mpi3mr_sdev_destroy - Slave destroy callback handler 4567 * @sdev: SCSI device reference 4568 * 4569 * Cleanup and free per device(lun) private data. 4570 * 4571 * Return: Nothing. 4572 */ 4573 static void mpi3mr_sdev_destroy(struct scsi_device *sdev) 4574 { 4575 struct Scsi_Host *shost; 4576 struct mpi3mr_ioc *mrioc; 4577 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4578 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4579 unsigned long flags; 4580 struct scsi_target *starget; 4581 struct sas_rphy *rphy = NULL; 4582 4583 if (!sdev->hostdata) 4584 return; 4585 4586 starget = scsi_target(sdev); 4587 shost = dev_to_shost(&starget->dev); 4588 mrioc = shost_priv(shost); 4589 scsi_tgt_priv_data = starget->hostdata; 4590 4591 scsi_tgt_priv_data->num_luns--; 4592 4593 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4594 if (starget->channel == mrioc->scsi_device_channel) 4595 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4596 else if (mrioc->sas_transport_enabled && !starget->channel) { 4597 rphy = dev_to_rphy(starget->dev.parent); 4598 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4599 rphy->identify.sas_address, rphy); 4600 } 4601 4602 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4603 tgt_dev->starget = NULL; 4604 if (tgt_dev) 4605 mpi3mr_tgtdev_put(tgt_dev); 4606 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4607 4608 kfree(sdev->hostdata); 4609 sdev->hostdata = NULL; 4610 } 4611 4612 /** 4613 * mpi3mr_target_destroy - Target destroy callback handler 4614 * @starget: SCSI target reference 4615 * 4616 * Cleanup and free per target private data. 4617 * 4618 * Return: Nothing. 4619 */ 4620 static void mpi3mr_target_destroy(struct scsi_target *starget) 4621 { 4622 struct Scsi_Host *shost; 4623 struct mpi3mr_ioc *mrioc; 4624 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4625 struct mpi3mr_tgt_dev *tgt_dev; 4626 unsigned long flags; 4627 4628 if (!starget->hostdata) 4629 return; 4630 4631 shost = dev_to_shost(&starget->dev); 4632 mrioc = shost_priv(shost); 4633 scsi_tgt_priv_data = starget->hostdata; 4634 4635 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4636 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4637 if (tgt_dev && (tgt_dev->starget == starget) && 4638 (tgt_dev->perst_id == starget->id)) 4639 tgt_dev->starget = NULL; 4640 if (tgt_dev) { 4641 scsi_tgt_priv_data->tgt_dev = NULL; 4642 scsi_tgt_priv_data->perst_id = 0; 4643 mpi3mr_tgtdev_put(tgt_dev); 4644 mpi3mr_tgtdev_put(tgt_dev); 4645 } 4646 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4647 4648 kfree(starget->hostdata); 4649 starget->hostdata = NULL; 4650 } 4651 4652 /** 4653 * mpi3mr_sdev_configure - Slave configure callback handler 4654 * @sdev: SCSI device reference 4655 * @lim: queue limits 4656 * 4657 * Configure queue depth, max hardware sectors and virt boundary 4658 * as required 4659 * 4660 * Return: 0 always. 4661 */ 4662 static int mpi3mr_sdev_configure(struct scsi_device *sdev, 4663 struct queue_limits *lim) 4664 { 4665 struct scsi_target *starget; 4666 struct Scsi_Host *shost; 4667 struct mpi3mr_ioc *mrioc; 4668 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4669 unsigned long flags; 4670 int retval = 0; 4671 struct sas_rphy *rphy = NULL; 4672 4673 starget = scsi_target(sdev); 4674 shost = dev_to_shost(&starget->dev); 4675 mrioc = shost_priv(shost); 4676 4677 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4678 if (starget->channel == mrioc->scsi_device_channel) 4679 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4680 else if (mrioc->sas_transport_enabled && !starget->channel) { 4681 rphy = dev_to_rphy(starget->dev.parent); 4682 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4683 rphy->identify.sas_address, rphy); 4684 } 4685 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4686 if (!tgt_dev) 4687 return -ENXIO; 4688 4689 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4690 4691 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4692 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4693 4694 mpi3mr_configure_tgt_dev(tgt_dev, lim); 4695 mpi3mr_tgtdev_put(tgt_dev); 4696 return retval; 4697 } 4698 4699 /** 4700 * mpi3mr_sdev_init -Slave alloc callback handler 4701 * @sdev: SCSI device reference 4702 * 4703 * Allocate per device(lun) private data and initialize it. 4704 * 4705 * Return: 0 on success -ENOMEM on memory allocation failure. 4706 */ 4707 static int mpi3mr_sdev_init(struct scsi_device *sdev) 4708 { 4709 struct Scsi_Host *shost; 4710 struct mpi3mr_ioc *mrioc; 4711 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4712 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4713 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4714 unsigned long flags; 4715 struct scsi_target *starget; 4716 int retval = 0; 4717 struct sas_rphy *rphy = NULL; 4718 4719 starget = scsi_target(sdev); 4720 shost = dev_to_shost(&starget->dev); 4721 mrioc = shost_priv(shost); 4722 scsi_tgt_priv_data = starget->hostdata; 4723 4724 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4725 4726 if (starget->channel == mrioc->scsi_device_channel) 4727 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4728 else if (mrioc->sas_transport_enabled && !starget->channel) { 4729 rphy = dev_to_rphy(starget->dev.parent); 4730 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4731 rphy->identify.sas_address, rphy); 4732 } 4733 4734 if (tgt_dev) { 4735 if (tgt_dev->starget == NULL) 4736 tgt_dev->starget = starget; 4737 mpi3mr_tgtdev_put(tgt_dev); 4738 retval = 0; 4739 } else { 4740 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4741 return -ENXIO; 4742 } 4743 4744 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4745 4746 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4747 if (!scsi_dev_priv_data) 4748 return -ENOMEM; 4749 4750 scsi_dev_priv_data->lun_id = sdev->lun; 4751 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4752 sdev->hostdata = scsi_dev_priv_data; 4753 4754 scsi_tgt_priv_data->num_luns++; 4755 4756 return retval; 4757 } 4758 4759 /** 4760 * mpi3mr_target_alloc - Target alloc callback handler 4761 * @starget: SCSI target reference 4762 * 4763 * Allocate per target private data and initialize it. 4764 * 4765 * Return: 0 on success -ENOMEM on memory allocation failure. 4766 */ 4767 static int mpi3mr_target_alloc(struct scsi_target *starget) 4768 { 4769 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4770 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4771 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4772 struct mpi3mr_tgt_dev *tgt_dev; 4773 unsigned long flags; 4774 int retval = 0; 4775 struct sas_rphy *rphy = NULL; 4776 4777 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4778 if (!scsi_tgt_priv_data) 4779 return -ENOMEM; 4780 4781 starget->hostdata = scsi_tgt_priv_data; 4782 4783 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4784 if (starget->channel == mrioc->scsi_device_channel) { 4785 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4786 if (tgt_dev && !tgt_dev->is_hidden) { 4787 scsi_tgt_priv_data->starget = starget; 4788 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4789 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4790 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4791 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4792 tgt_dev->starget = starget; 4793 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4794 retval = 0; 4795 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4796 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4797 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4798 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4799 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4800 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4801 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4802 scsi_tgt_priv_data->dev_nvme_dif = 1; 4803 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4804 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4805 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4806 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4807 } else 4808 retval = -ENXIO; 4809 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4810 rphy = dev_to_rphy(starget->dev.parent); 4811 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4812 rphy->identify.sas_address, rphy); 4813 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4814 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4815 scsi_tgt_priv_data->starget = starget; 4816 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4817 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4818 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4819 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4820 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4821 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4822 tgt_dev->starget = starget; 4823 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4824 retval = 0; 4825 } else 4826 retval = -ENXIO; 4827 } 4828 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4829 4830 return retval; 4831 } 4832 4833 /** 4834 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4835 * @mrioc: Adapter instance reference 4836 * @scmd: SCSI Command reference 4837 * 4838 * The controller hardware cannot handle certain unmap commands 4839 * for NVMe drives, this routine checks those and return true 4840 * and completes the SCSI command with proper status and sense 4841 * data. 4842 * 4843 * Return: TRUE for not allowed unmap, FALSE otherwise. 4844 */ 4845 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4846 struct scsi_cmnd *scmd) 4847 { 4848 unsigned char *buf; 4849 u16 param_len, desc_len, trunc_param_len; 4850 4851 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4852 4853 if (mrioc->pdev->revision) { 4854 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4855 trunc_param_len -= (param_len - 8) & 0xF; 4856 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4857 dprint_scsi_err(mrioc, 4858 "truncating param_len from (%d) to (%d)\n", 4859 param_len, trunc_param_len); 4860 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4861 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4862 } 4863 return false; 4864 } 4865 4866 if (!param_len) { 4867 ioc_warn(mrioc, 4868 "%s: cdb received with zero parameter length\n", 4869 __func__); 4870 scsi_print_command(scmd); 4871 scmd->result = DID_OK << 16; 4872 scsi_done(scmd); 4873 return true; 4874 } 4875 4876 if (param_len < 24) { 4877 ioc_warn(mrioc, 4878 "%s: cdb received with invalid param_len: %d\n", 4879 __func__, param_len); 4880 scsi_print_command(scmd); 4881 scmd->result = SAM_STAT_CHECK_CONDITION; 4882 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4883 0x1A, 0); 4884 scsi_done(scmd); 4885 return true; 4886 } 4887 if (param_len != scsi_bufflen(scmd)) { 4888 ioc_warn(mrioc, 4889 "%s: cdb received with param_len: %d bufflen: %d\n", 4890 __func__, param_len, scsi_bufflen(scmd)); 4891 scsi_print_command(scmd); 4892 scmd->result = SAM_STAT_CHECK_CONDITION; 4893 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4894 0x1A, 0); 4895 scsi_done(scmd); 4896 return true; 4897 } 4898 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4899 if (!buf) { 4900 scsi_print_command(scmd); 4901 scmd->result = SAM_STAT_CHECK_CONDITION; 4902 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4903 0x55, 0x03); 4904 scsi_done(scmd); 4905 return true; 4906 } 4907 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4908 desc_len = get_unaligned_be16(&buf[2]); 4909 4910 if (desc_len < 16) { 4911 ioc_warn(mrioc, 4912 "%s: Invalid descriptor length in param list: %d\n", 4913 __func__, desc_len); 4914 scsi_print_command(scmd); 4915 scmd->result = SAM_STAT_CHECK_CONDITION; 4916 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4917 0x26, 0); 4918 scsi_done(scmd); 4919 kfree(buf); 4920 return true; 4921 } 4922 4923 if (param_len > (desc_len + 8)) { 4924 trunc_param_len = desc_len + 8; 4925 scsi_print_command(scmd); 4926 dprint_scsi_err(mrioc, 4927 "truncating param_len(%d) to desc_len+8(%d)\n", 4928 param_len, trunc_param_len); 4929 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4930 scsi_print_command(scmd); 4931 } 4932 4933 kfree(buf); 4934 return false; 4935 } 4936 4937 /** 4938 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4939 * @scmd: SCSI Command reference 4940 * 4941 * Checks whether a cdb is allowed during shutdown or not. 4942 * 4943 * Return: TRUE for allowed commands, FALSE otherwise. 4944 */ 4945 4946 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4947 { 4948 switch (scmd->cmnd[0]) { 4949 case SYNCHRONIZE_CACHE: 4950 case START_STOP: 4951 return true; 4952 default: 4953 return false; 4954 } 4955 } 4956 4957 /** 4958 * mpi3mr_qcmd - I/O request despatcher 4959 * @shost: SCSI Host reference 4960 * @scmd: SCSI Command reference 4961 * 4962 * Issues the SCSI Command as an MPI3 request. 4963 * 4964 * Return: 0 on successful queueing of the request or if the 4965 * request is completed with failure. 4966 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4967 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4968 */ 4969 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4970 struct scsi_cmnd *scmd) 4971 { 4972 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4973 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4974 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4975 struct scmd_priv *scmd_priv_data = NULL; 4976 struct mpi3_scsi_io_request *scsiio_req = NULL; 4977 struct op_req_qinfo *op_req_q = NULL; 4978 int retval = 0; 4979 u16 dev_handle; 4980 u16 host_tag; 4981 u32 scsiio_flags = 0, data_len_blks = 0; 4982 struct request *rq = scsi_cmd_to_rq(scmd); 4983 int iprio_class; 4984 u8 is_pcie_dev = 0; 4985 u32 tracked_io_sz = 0; 4986 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4987 struct mpi3mr_throttle_group_info *tg = NULL; 4988 4989 if (mrioc->unrecoverable) { 4990 scmd->result = DID_ERROR << 16; 4991 scsi_done(scmd); 4992 goto out; 4993 } 4994 4995 sdev_priv_data = scmd->device->hostdata; 4996 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4997 scmd->result = DID_NO_CONNECT << 16; 4998 scsi_done(scmd); 4999 goto out; 5000 } 5001 5002 if (mrioc->stop_drv_processing && 5003 !(mpi3mr_allow_scmd_to_fw(scmd))) { 5004 scmd->result = DID_NO_CONNECT << 16; 5005 scsi_done(scmd); 5006 goto out; 5007 } 5008 5009 stgt_priv_data = sdev_priv_data->tgt_priv_data; 5010 dev_handle = stgt_priv_data->dev_handle; 5011 5012 /* Avoid error handling escalation when device is removed or blocked */ 5013 5014 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5015 scmd->cmnd[0] == TEST_UNIT_READY && 5016 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 5017 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5018 scsi_done(scmd); 5019 goto out; 5020 } 5021 5022 if (mrioc->reset_in_progress || mrioc->prepare_for_reset 5023 || mrioc->block_on_pci_err) { 5024 retval = SCSI_MLQUEUE_HOST_BUSY; 5025 goto out; 5026 } 5027 5028 if (atomic_read(&stgt_priv_data->block_io)) { 5029 if (mrioc->stop_drv_processing) { 5030 scmd->result = DID_NO_CONNECT << 16; 5031 scsi_done(scmd); 5032 goto out; 5033 } 5034 retval = SCSI_MLQUEUE_DEVICE_BUSY; 5035 goto out; 5036 } 5037 5038 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 5039 scmd->result = DID_NO_CONNECT << 16; 5040 scsi_done(scmd); 5041 goto out; 5042 } 5043 if (stgt_priv_data->dev_removed) { 5044 scmd->result = DID_NO_CONNECT << 16; 5045 scsi_done(scmd); 5046 goto out; 5047 } 5048 5049 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 5050 is_pcie_dev = 1; 5051 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 5052 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5053 mpi3mr_check_return_unmap(mrioc, scmd)) 5054 goto out; 5055 5056 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 5057 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 5058 scmd->result = DID_ERROR << 16; 5059 scsi_done(scmd); 5060 goto out; 5061 } 5062 5063 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5064 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 5065 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5066 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 5067 else 5068 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 5069 5070 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 5071 5072 if (sdev_priv_data->ncq_prio_enable) { 5073 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5074 if (iprio_class == IOPRIO_CLASS_RT) 5075 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 5076 } 5077 5078 if (scmd->cmd_len > 16) 5079 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 5080 5081 scmd_priv_data = scsi_cmd_priv(scmd); 5082 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 5083 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 5084 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 5085 scsiio_req->host_tag = cpu_to_le16(host_tag); 5086 5087 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 5088 5089 if (stgt_priv_data->wslen) 5090 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 5091 stgt_priv_data->wslen); 5092 5093 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 5094 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 5095 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 5096 scsiio_req->flags = cpu_to_le32(scsiio_flags); 5097 int_to_scsilun(sdev_priv_data->lun_id, 5098 (struct scsi_lun *)scsiio_req->lun); 5099 5100 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 5101 mpi3mr_clear_scmd_priv(mrioc, scmd); 5102 retval = SCSI_MLQUEUE_HOST_BUSY; 5103 goto out; 5104 } 5105 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 5106 data_len_blks = scsi_bufflen(scmd) >> 9; 5107 if ((data_len_blks >= mrioc->io_throttle_data_length) && 5108 stgt_priv_data->io_throttle_enabled) { 5109 tracked_io_sz = data_len_blks; 5110 tg = stgt_priv_data->throttle_group; 5111 if (tg) { 5112 ioc_pend_data_len = atomic_add_return(data_len_blks, 5113 &mrioc->pend_large_data_sz); 5114 tg_pend_data_len = atomic_add_return(data_len_blks, 5115 &tg->pend_large_data_sz); 5116 if (!tg->io_divert && ((ioc_pend_data_len >= 5117 mrioc->io_throttle_high) || 5118 (tg_pend_data_len >= tg->high))) { 5119 tg->io_divert = 1; 5120 tg->need_qd_reduction = 1; 5121 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 5122 tg, 1); 5123 mpi3mr_queue_qd_reduction_event(mrioc, tg); 5124 } 5125 } else { 5126 ioc_pend_data_len = atomic_add_return(data_len_blks, 5127 &mrioc->pend_large_data_sz); 5128 if (ioc_pend_data_len >= mrioc->io_throttle_high) 5129 stgt_priv_data->io_divert = 1; 5130 } 5131 } 5132 5133 if (stgt_priv_data->io_divert) { 5134 scsiio_req->msg_flags |= 5135 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 5136 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 5137 } 5138 scsiio_req->flags |= cpu_to_le32(scsiio_flags); 5139 5140 if (mpi3mr_op_request_post(mrioc, op_req_q, 5141 scmd_priv_data->mpi3mr_scsiio_req)) { 5142 mpi3mr_clear_scmd_priv(mrioc, scmd); 5143 retval = SCSI_MLQUEUE_HOST_BUSY; 5144 if (tracked_io_sz) { 5145 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 5146 if (tg) 5147 atomic_sub(tracked_io_sz, 5148 &tg->pend_large_data_sz); 5149 } 5150 goto out; 5151 } 5152 5153 out: 5154 return retval; 5155 } 5156 5157 static const struct scsi_host_template mpi3mr_driver_template = { 5158 .module = THIS_MODULE, 5159 .name = "MPI3 Storage Controller", 5160 .proc_name = MPI3MR_DRIVER_NAME, 5161 .queuecommand = mpi3mr_qcmd, 5162 .target_alloc = mpi3mr_target_alloc, 5163 .sdev_init = mpi3mr_sdev_init, 5164 .sdev_configure = mpi3mr_sdev_configure, 5165 .target_destroy = mpi3mr_target_destroy, 5166 .sdev_destroy = mpi3mr_sdev_destroy, 5167 .scan_finished = mpi3mr_scan_finished, 5168 .scan_start = mpi3mr_scan_start, 5169 .change_queue_depth = mpi3mr_change_queue_depth, 5170 .eh_abort_handler = mpi3mr_eh_abort, 5171 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 5172 .eh_target_reset_handler = mpi3mr_eh_target_reset, 5173 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 5174 .eh_host_reset_handler = mpi3mr_eh_host_reset, 5175 .bios_param = mpi3mr_bios_param, 5176 .map_queues = mpi3mr_map_queues, 5177 .mq_poll = mpi3mr_blk_mq_poll, 5178 .no_write_same = 1, 5179 .can_queue = 1, 5180 .this_id = -1, 5181 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 5182 /* max xfer supported is 1M (2K in 512 byte sized sectors) 5183 */ 5184 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 5185 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 5186 .max_segment_size = 0xffffffff, 5187 .track_queue_depth = 1, 5188 .cmd_size = sizeof(struct scmd_priv), 5189 .shost_groups = mpi3mr_host_groups, 5190 .sdev_groups = mpi3mr_dev_groups, 5191 }; 5192 5193 /** 5194 * mpi3mr_init_drv_cmd - Initialize internal command tracker 5195 * @cmdptr: Internal command tracker 5196 * @host_tag: Host tag used for the specific command 5197 * 5198 * Initialize the internal command tracker structure with 5199 * specified host tag. 5200 * 5201 * Return: Nothing. 5202 */ 5203 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 5204 u16 host_tag) 5205 { 5206 mutex_init(&cmdptr->mutex); 5207 cmdptr->reply = NULL; 5208 cmdptr->state = MPI3MR_CMD_NOTUSED; 5209 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 5210 cmdptr->host_tag = host_tag; 5211 } 5212 5213 /** 5214 * osintfc_mrioc_security_status -Check controller secure status 5215 * @pdev: PCI device instance 5216 * 5217 * Read the Device Serial Number capability from PCI config 5218 * space and decide whether the controller is secure or not. 5219 * 5220 * Return: 0 on success, non-zero on failure. 5221 */ 5222 static int 5223 osintfc_mrioc_security_status(struct pci_dev *pdev) 5224 { 5225 u32 cap_data; 5226 int base; 5227 u32 ctlr_status; 5228 u32 debug_status; 5229 int retval = 0; 5230 5231 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 5232 if (!base) { 5233 dev_err(&pdev->dev, 5234 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 5235 return -1; 5236 } 5237 5238 pci_read_config_dword(pdev, base + 4, &cap_data); 5239 5240 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 5241 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5242 5243 switch (ctlr_status) { 5244 case MPI3MR_INVALID_DEVICE: 5245 dev_err(&pdev->dev, 5246 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5247 __func__, pdev->device, pdev->subsystem_vendor, 5248 pdev->subsystem_device); 5249 retval = -1; 5250 break; 5251 case MPI3MR_CONFIG_SECURE_DEVICE: 5252 if (!debug_status) 5253 dev_info(&pdev->dev, 5254 "%s: Config secure ctlr is detected\n", 5255 __func__); 5256 break; 5257 case MPI3MR_HARD_SECURE_DEVICE: 5258 break; 5259 case MPI3MR_TAMPERED_DEVICE: 5260 dev_err(&pdev->dev, 5261 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5262 __func__, pdev->device, pdev->subsystem_vendor, 5263 pdev->subsystem_device); 5264 retval = -1; 5265 break; 5266 default: 5267 retval = -1; 5268 break; 5269 } 5270 5271 if (!retval && debug_status) { 5272 dev_err(&pdev->dev, 5273 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5274 __func__, pdev->device, pdev->subsystem_vendor, 5275 pdev->subsystem_device); 5276 retval = -1; 5277 } 5278 5279 return retval; 5280 } 5281 5282 /** 5283 * mpi3mr_probe - PCI probe callback 5284 * @pdev: PCI device instance 5285 * @id: PCI device ID details 5286 * 5287 * controller initialization routine. Checks the security status 5288 * of the controller and if it is invalid or tampered return the 5289 * probe without initializing the controller. Otherwise, 5290 * allocate per adapter instance through shost_priv and 5291 * initialize controller specific data structures, initializae 5292 * the controller hardware, add shost to the SCSI subsystem. 5293 * 5294 * Return: 0 on success, non-zero on failure. 5295 */ 5296 5297 static int 5298 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5299 { 5300 struct mpi3mr_ioc *mrioc = NULL; 5301 struct Scsi_Host *shost = NULL; 5302 int retval = 0, i; 5303 5304 if (osintfc_mrioc_security_status(pdev)) { 5305 warn_non_secure_ctlr = 1; 5306 return 1; /* For Invalid and Tampered device */ 5307 } 5308 5309 shost = scsi_host_alloc(&mpi3mr_driver_template, 5310 sizeof(struct mpi3mr_ioc)); 5311 if (!shost) { 5312 retval = -ENODEV; 5313 goto shost_failed; 5314 } 5315 5316 mrioc = shost_priv(shost); 5317 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); 5318 if (retval < 0) 5319 goto id_alloc_failed; 5320 mrioc->id = (u8)retval; 5321 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5322 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5323 INIT_LIST_HEAD(&mrioc->list); 5324 spin_lock(&mrioc_list_lock); 5325 list_add_tail(&mrioc->list, &mrioc_list); 5326 spin_unlock(&mrioc_list_lock); 5327 5328 spin_lock_init(&mrioc->admin_req_lock); 5329 spin_lock_init(&mrioc->reply_free_queue_lock); 5330 spin_lock_init(&mrioc->sbq_lock); 5331 spin_lock_init(&mrioc->fwevt_lock); 5332 spin_lock_init(&mrioc->tgtdev_lock); 5333 spin_lock_init(&mrioc->watchdog_lock); 5334 spin_lock_init(&mrioc->chain_buf_lock); 5335 spin_lock_init(&mrioc->sas_node_lock); 5336 spin_lock_init(&mrioc->trigger_lock); 5337 5338 INIT_LIST_HEAD(&mrioc->fwevt_list); 5339 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5340 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5341 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5342 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5343 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5344 INIT_LIST_HEAD(&mrioc->enclosure_list); 5345 5346 mutex_init(&mrioc->reset_mutex); 5347 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5348 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5349 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5350 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5351 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5352 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5353 5354 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5355 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5356 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5357 5358 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5359 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5360 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5361 5362 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5363 !pdev->revision) 5364 mrioc->enable_segqueue = false; 5365 else 5366 mrioc->enable_segqueue = true; 5367 5368 init_waitqueue_head(&mrioc->reset_waitq); 5369 mrioc->logging_level = logging_level; 5370 mrioc->shost = shost; 5371 mrioc->pdev = pdev; 5372 mrioc->stop_bsgs = 1; 5373 5374 mrioc->max_sgl_entries = max_sgl_entries; 5375 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5376 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5377 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5378 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5379 else { 5380 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5381 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5382 } 5383 5384 /* init shost parameters */ 5385 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5386 shost->max_lun = -1; 5387 shost->unique_id = mrioc->id; 5388 5389 shost->max_channel = 0; 5390 shost->max_id = 0xFFFFFFFF; 5391 5392 shost->host_tagset = 1; 5393 5394 if (prot_mask >= 0) 5395 scsi_host_set_prot(shost, prot_mask); 5396 else { 5397 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5398 | SHOST_DIF_TYPE2_PROTECTION 5399 | SHOST_DIF_TYPE3_PROTECTION; 5400 scsi_host_set_prot(shost, prot_mask); 5401 } 5402 5403 ioc_info(mrioc, 5404 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5405 __func__, 5406 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5407 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5408 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5409 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5410 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5411 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5412 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5413 5414 if (prot_guard_mask) 5415 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5416 else 5417 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5418 5419 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5420 "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); 5421 if (!mrioc->fwevt_worker_thread) { 5422 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5423 __FILE__, __LINE__, __func__); 5424 retval = -ENODEV; 5425 goto fwevtthread_failed; 5426 } 5427 5428 mrioc->is_driver_loading = 1; 5429 mrioc->cpu_count = num_online_cpus(); 5430 if (mpi3mr_setup_resources(mrioc)) { 5431 ioc_err(mrioc, "setup resources failed\n"); 5432 retval = -ENODEV; 5433 goto resource_alloc_failed; 5434 } 5435 if (mpi3mr_init_ioc(mrioc)) { 5436 ioc_err(mrioc, "initializing IOC failed\n"); 5437 retval = -ENODEV; 5438 goto init_ioc_failed; 5439 } 5440 5441 shost->nr_hw_queues = mrioc->num_op_reply_q; 5442 if (mrioc->active_poll_qcount) 5443 shost->nr_maps = 3; 5444 5445 shost->can_queue = mrioc->max_host_ios; 5446 shost->sg_tablesize = mrioc->max_sgl_entries; 5447 shost->max_id = mrioc->facts.max_perids + 1; 5448 5449 retval = scsi_add_host(shost, &pdev->dev); 5450 if (retval) { 5451 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5452 __FILE__, __LINE__, __func__); 5453 goto addhost_failed; 5454 } 5455 5456 scsi_scan_host(shost); 5457 mpi3mr_bsg_init(mrioc); 5458 return retval; 5459 5460 addhost_failed: 5461 mpi3mr_stop_watchdog(mrioc); 5462 mpi3mr_cleanup_ioc(mrioc); 5463 init_ioc_failed: 5464 mpi3mr_free_mem(mrioc); 5465 mpi3mr_cleanup_resources(mrioc); 5466 resource_alloc_failed: 5467 destroy_workqueue(mrioc->fwevt_worker_thread); 5468 fwevtthread_failed: 5469 ida_free(&mrioc_ida, mrioc->id); 5470 spin_lock(&mrioc_list_lock); 5471 list_del(&mrioc->list); 5472 spin_unlock(&mrioc_list_lock); 5473 id_alloc_failed: 5474 scsi_host_put(shost); 5475 shost_failed: 5476 return retval; 5477 } 5478 5479 /** 5480 * mpi3mr_remove - PCI remove callback 5481 * @pdev: PCI device instance 5482 * 5483 * Cleanup the IOC by issuing MUR and shutdown notification. 5484 * Free up all memory and resources associated with the 5485 * controllerand target devices, unregister the shost. 5486 * 5487 * Return: Nothing. 5488 */ 5489 static void mpi3mr_remove(struct pci_dev *pdev) 5490 { 5491 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5492 struct mpi3mr_ioc *mrioc; 5493 struct workqueue_struct *wq; 5494 unsigned long flags; 5495 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5496 struct mpi3mr_hba_port *port, *hba_port_next; 5497 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5498 5499 if (!shost) 5500 return; 5501 5502 mrioc = shost_priv(shost); 5503 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5504 ssleep(1); 5505 5506 if (mrioc->block_on_pci_err) { 5507 mrioc->block_on_pci_err = false; 5508 scsi_unblock_requests(shost); 5509 mrioc->unrecoverable = 1; 5510 } 5511 5512 if (!pci_device_is_present(mrioc->pdev) || 5513 mrioc->pci_err_recovery) { 5514 mrioc->unrecoverable = 1; 5515 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5516 } 5517 5518 mpi3mr_bsg_exit(mrioc); 5519 mrioc->stop_drv_processing = 1; 5520 mpi3mr_cleanup_fwevt_list(mrioc); 5521 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5522 wq = mrioc->fwevt_worker_thread; 5523 mrioc->fwevt_worker_thread = NULL; 5524 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5525 if (wq) 5526 destroy_workqueue(wq); 5527 5528 if (mrioc->sas_transport_enabled) 5529 sas_remove_host(shost); 5530 else 5531 scsi_remove_host(shost); 5532 5533 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5534 list) { 5535 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5536 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5537 mpi3mr_tgtdev_put(tgtdev); 5538 } 5539 mpi3mr_stop_watchdog(mrioc); 5540 mpi3mr_cleanup_ioc(mrioc); 5541 mpi3mr_free_mem(mrioc); 5542 mpi3mr_cleanup_resources(mrioc); 5543 5544 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5545 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5546 &mrioc->sas_expander_list, list) { 5547 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5548 mpi3mr_expander_node_remove(mrioc, sas_expander); 5549 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5550 } 5551 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5552 ioc_info(mrioc, 5553 "removing hba_port entry: %p port: %d from hba_port list\n", 5554 port, port->port_id); 5555 list_del(&port->list); 5556 kfree(port); 5557 } 5558 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5559 5560 if (mrioc->sas_hba.num_phys) { 5561 kfree(mrioc->sas_hba.phy); 5562 mrioc->sas_hba.phy = NULL; 5563 mrioc->sas_hba.num_phys = 0; 5564 } 5565 5566 ida_free(&mrioc_ida, mrioc->id); 5567 spin_lock(&mrioc_list_lock); 5568 list_del(&mrioc->list); 5569 spin_unlock(&mrioc_list_lock); 5570 5571 scsi_host_put(shost); 5572 } 5573 5574 /** 5575 * mpi3mr_shutdown - PCI shutdown callback 5576 * @pdev: PCI device instance 5577 * 5578 * Free up all memory and resources associated with the 5579 * controller 5580 * 5581 * Return: Nothing. 5582 */ 5583 static void mpi3mr_shutdown(struct pci_dev *pdev) 5584 { 5585 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5586 struct mpi3mr_ioc *mrioc; 5587 struct workqueue_struct *wq; 5588 unsigned long flags; 5589 5590 if (!shost) 5591 return; 5592 5593 mrioc = shost_priv(shost); 5594 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5595 ssleep(1); 5596 5597 mrioc->stop_drv_processing = 1; 5598 mpi3mr_cleanup_fwevt_list(mrioc); 5599 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5600 wq = mrioc->fwevt_worker_thread; 5601 mrioc->fwevt_worker_thread = NULL; 5602 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5603 if (wq) 5604 destroy_workqueue(wq); 5605 5606 mpi3mr_stop_watchdog(mrioc); 5607 mpi3mr_cleanup_ioc(mrioc); 5608 mpi3mr_cleanup_resources(mrioc); 5609 } 5610 5611 /** 5612 * mpi3mr_suspend - PCI power management suspend callback 5613 * @dev: Device struct 5614 * 5615 * Change the power state to the given value and cleanup the IOC 5616 * by issuing MUR and shutdown notification 5617 * 5618 * Return: 0 always. 5619 */ 5620 static int __maybe_unused 5621 mpi3mr_suspend(struct device *dev) 5622 { 5623 struct pci_dev *pdev = to_pci_dev(dev); 5624 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5625 struct mpi3mr_ioc *mrioc; 5626 5627 if (!shost) 5628 return 0; 5629 5630 mrioc = shost_priv(shost); 5631 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5632 ssleep(1); 5633 mrioc->stop_drv_processing = 1; 5634 mpi3mr_cleanup_fwevt_list(mrioc); 5635 scsi_block_requests(shost); 5636 mpi3mr_stop_watchdog(mrioc); 5637 mpi3mr_cleanup_ioc(mrioc); 5638 5639 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5640 pdev, pci_name(pdev)); 5641 mpi3mr_cleanup_resources(mrioc); 5642 5643 return 0; 5644 } 5645 5646 /** 5647 * mpi3mr_resume - PCI power management resume callback 5648 * @dev: Device struct 5649 * 5650 * Restore the power state to D0 and reinitialize the controller 5651 * and resume I/O operations to the target devices 5652 * 5653 * Return: 0 on success, non-zero on failure 5654 */ 5655 static int __maybe_unused 5656 mpi3mr_resume(struct device *dev) 5657 { 5658 struct pci_dev *pdev = to_pci_dev(dev); 5659 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5660 struct mpi3mr_ioc *mrioc; 5661 pci_power_t device_state = pdev->current_state; 5662 int r; 5663 5664 if (!shost) 5665 return 0; 5666 5667 mrioc = shost_priv(shost); 5668 5669 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5670 pdev, pci_name(pdev), device_state); 5671 mrioc->pdev = pdev; 5672 mrioc->cpu_count = num_online_cpus(); 5673 r = mpi3mr_setup_resources(mrioc); 5674 if (r) { 5675 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5676 __func__, r); 5677 return r; 5678 } 5679 5680 mrioc->stop_drv_processing = 0; 5681 mpi3mr_invalidate_devhandles(mrioc); 5682 mpi3mr_free_enclosure_list(mrioc); 5683 mpi3mr_memset_buffers(mrioc); 5684 r = mpi3mr_reinit_ioc(mrioc, 1); 5685 if (r) { 5686 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5687 return r; 5688 } 5689 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5690 scsi_unblock_requests(shost); 5691 mrioc->device_refresh_on = 0; 5692 mpi3mr_start_watchdog(mrioc); 5693 5694 return 0; 5695 } 5696 5697 /** 5698 * mpi3mr_pcierr_error_detected - PCI error detected callback 5699 * @pdev: PCI device instance 5700 * @state: channel state 5701 * 5702 * This function is called by the PCI error recovery driver and 5703 * based on the state passed the driver decides what actions to 5704 * be recommended back to PCI driver. 5705 * 5706 * For all of the states if there is no valid mrioc or scsi host 5707 * references in the PCI device then this function will return 5708 * the result as disconnect. 5709 * 5710 * For normal state, this function will return the result as can 5711 * recover. 5712 * 5713 * For frozen state, this function will block for any pending 5714 * controller initialization or re-initialization to complete, 5715 * stop any new interactions with the controller and return 5716 * status as reset required. 5717 * 5718 * For permanent failure state, this function will mark the 5719 * controller as unrecoverable and return status as disconnect. 5720 * 5721 * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or 5722 * DISCONNECT based on the controller state. 5723 */ 5724 static pci_ers_result_t 5725 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5726 { 5727 struct Scsi_Host *shost; 5728 struct mpi3mr_ioc *mrioc; 5729 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5730 5731 dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, 5732 state); 5733 5734 shost = pci_get_drvdata(pdev); 5735 mrioc = shost_priv(shost); 5736 5737 switch (state) { 5738 case pci_channel_io_normal: 5739 return PCI_ERS_RESULT_CAN_RECOVER; 5740 case pci_channel_io_frozen: 5741 mrioc->pci_err_recovery = true; 5742 mrioc->block_on_pci_err = true; 5743 do { 5744 if (mrioc->reset_in_progress || mrioc->is_driver_loading) 5745 ssleep(1); 5746 else 5747 break; 5748 } while (--timeout); 5749 5750 if (!timeout) { 5751 mrioc->pci_err_recovery = true; 5752 mrioc->block_on_pci_err = true; 5753 mrioc->unrecoverable = 1; 5754 mpi3mr_stop_watchdog(mrioc); 5755 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5756 return PCI_ERS_RESULT_DISCONNECT; 5757 } 5758 5759 scsi_block_requests(mrioc->shost); 5760 mpi3mr_stop_watchdog(mrioc); 5761 mpi3mr_cleanup_resources(mrioc); 5762 return PCI_ERS_RESULT_NEED_RESET; 5763 case pci_channel_io_perm_failure: 5764 mrioc->pci_err_recovery = true; 5765 mrioc->block_on_pci_err = true; 5766 mrioc->unrecoverable = 1; 5767 mpi3mr_stop_watchdog(mrioc); 5768 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5769 return PCI_ERS_RESULT_DISCONNECT; 5770 default: 5771 return PCI_ERS_RESULT_DISCONNECT; 5772 } 5773 } 5774 5775 /** 5776 * mpi3mr_pcierr_slot_reset - Post slot reset callback 5777 * @pdev: PCI device instance 5778 * 5779 * This function is called by the PCI error recovery driver 5780 * after a slot or link reset issued by it for the recovery, the 5781 * driver is expected to bring back the controller and 5782 * initialize it. 5783 * 5784 * This function restores PCI state and reinitializes controller 5785 * resources and the controller, this blocks for any pending 5786 * reset to complete. 5787 * 5788 * Returns: PCI_ERS_RESULT_DISCONNECT on failure or 5789 * PCI_ERS_RESULT_RECOVERED 5790 */ 5791 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) 5792 { 5793 struct Scsi_Host *shost; 5794 struct mpi3mr_ioc *mrioc; 5795 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5796 5797 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5798 5799 shost = pci_get_drvdata(pdev); 5800 mrioc = shost_priv(shost); 5801 5802 do { 5803 if (mrioc->reset_in_progress) 5804 ssleep(1); 5805 else 5806 break; 5807 } while (--timeout); 5808 5809 if (!timeout) 5810 goto out_failed; 5811 5812 pci_restore_state(pdev); 5813 5814 if (mpi3mr_setup_resources(mrioc)) { 5815 ioc_err(mrioc, "setup resources failed\n"); 5816 goto out_failed; 5817 } 5818 mrioc->unrecoverable = 0; 5819 mrioc->pci_err_recovery = false; 5820 5821 if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) 5822 goto out_failed; 5823 5824 return PCI_ERS_RESULT_RECOVERED; 5825 5826 out_failed: 5827 mrioc->unrecoverable = 1; 5828 mrioc->block_on_pci_err = false; 5829 scsi_unblock_requests(shost); 5830 mpi3mr_start_watchdog(mrioc); 5831 return PCI_ERS_RESULT_DISCONNECT; 5832 } 5833 5834 /** 5835 * mpi3mr_pcierr_resume - PCI error recovery resume 5836 * callback 5837 * @pdev: PCI device instance 5838 * 5839 * This function enables all I/O and IOCTLs post reset issued as 5840 * part of the PCI error recovery 5841 * 5842 * Return: Nothing. 5843 */ 5844 static void mpi3mr_pcierr_resume(struct pci_dev *pdev) 5845 { 5846 struct Scsi_Host *shost; 5847 struct mpi3mr_ioc *mrioc; 5848 5849 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5850 5851 shost = pci_get_drvdata(pdev); 5852 mrioc = shost_priv(shost); 5853 5854 if (mrioc->block_on_pci_err) { 5855 mrioc->block_on_pci_err = false; 5856 scsi_unblock_requests(shost); 5857 mpi3mr_start_watchdog(mrioc); 5858 } 5859 } 5860 5861 /** 5862 * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback 5863 * @pdev: PCI device instance 5864 * 5865 * This is called only if mpi3mr_pcierr_error_detected returns 5866 * PCI_ERS_RESULT_CAN_RECOVER. 5867 * 5868 * Return: PCI_ERS_RESULT_DISCONNECT when the controller is 5869 * unrecoverable or when the shost/mrioc reference cannot be 5870 * found, else return PCI_ERS_RESULT_RECOVERED 5871 */ 5872 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) 5873 { 5874 struct Scsi_Host *shost; 5875 struct mpi3mr_ioc *mrioc; 5876 5877 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5878 5879 shost = pci_get_drvdata(pdev); 5880 mrioc = shost_priv(shost); 5881 5882 if (mrioc->unrecoverable) 5883 return PCI_ERS_RESULT_DISCONNECT; 5884 5885 return PCI_ERS_RESULT_RECOVERED; 5886 } 5887 5888 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5889 { 5890 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5891 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5892 }, 5893 { 5894 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5895 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5896 }, 5897 { 5898 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5899 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5900 }, 5901 { 0 } 5902 }; 5903 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5904 5905 static const struct pci_error_handlers mpi3mr_err_handler = { 5906 .error_detected = mpi3mr_pcierr_error_detected, 5907 .mmio_enabled = mpi3mr_pcierr_mmio_enabled, 5908 .slot_reset = mpi3mr_pcierr_slot_reset, 5909 .resume = mpi3mr_pcierr_resume, 5910 }; 5911 5912 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5913 5914 static struct pci_driver mpi3mr_pci_driver = { 5915 .name = MPI3MR_DRIVER_NAME, 5916 .id_table = mpi3mr_pci_id_table, 5917 .probe = mpi3mr_probe, 5918 .remove = mpi3mr_remove, 5919 .shutdown = mpi3mr_shutdown, 5920 .err_handler = &mpi3mr_err_handler, 5921 .driver.pm = &mpi3mr_pm_ops, 5922 }; 5923 5924 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5925 { 5926 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5927 } 5928 static DRIVER_ATTR_RO(event_counter); 5929 5930 static int __init mpi3mr_init(void) 5931 { 5932 int ret_val; 5933 5934 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5935 MPI3MR_DRIVER_VERSION); 5936 5937 mpi3mr_transport_template = 5938 sas_attach_transport(&mpi3mr_transport_functions); 5939 if (!mpi3mr_transport_template) { 5940 pr_err("%s failed to load due to sas transport attach failure\n", 5941 MPI3MR_DRIVER_NAME); 5942 return -ENODEV; 5943 } 5944 5945 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5946 if (ret_val) { 5947 pr_err("%s failed to load due to pci register driver failure\n", 5948 MPI3MR_DRIVER_NAME); 5949 goto err_pci_reg_fail; 5950 } 5951 5952 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5953 &driver_attr_event_counter); 5954 if (ret_val) 5955 goto err_event_counter; 5956 5957 return ret_val; 5958 5959 err_event_counter: 5960 pci_unregister_driver(&mpi3mr_pci_driver); 5961 5962 err_pci_reg_fail: 5963 sas_release_transport(mpi3mr_transport_template); 5964 return ret_val; 5965 } 5966 5967 static void __exit mpi3mr_exit(void) 5968 { 5969 if (warn_non_secure_ctlr) 5970 pr_warn( 5971 "Unloading %s version %s while managing a non secure controller\n", 5972 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5973 else 5974 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5975 MPI3MR_DRIVER_VERSION); 5976 5977 driver_remove_file(&mpi3mr_pci_driver.driver, 5978 &driver_attr_event_counter); 5979 pci_unregister_driver(&mpi3mr_pci_driver); 5980 sas_release_transport(mpi3mr_transport_template); 5981 ida_destroy(&mrioc_ida); 5982 } 5983 5984 module_init(mpi3mr_init); 5985 module_exit(mpi3mr_exit); 5986