1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /* 53 * SAS Log info code for a NCQ collateral abort after an NCQ error: 54 * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR 55 * See: drivers/message/fusion/lsi/mpi_log_sas.h 56 */ 57 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 58 59 /** 60 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 61 * @mrioc: Adapter instance reference 62 * @scmd: SCSI command reference 63 * 64 * Calculate the host tag based on block tag for a given scmd. 65 * 66 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 67 */ 68 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 69 struct scsi_cmnd *scmd) 70 { 71 struct scmd_priv *priv = NULL; 72 u32 unique_tag; 73 u16 host_tag, hw_queue; 74 75 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 76 77 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 78 if (hw_queue >= mrioc->num_op_reply_q) 79 return MPI3MR_HOSTTAG_INVALID; 80 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 81 82 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 83 return MPI3MR_HOSTTAG_INVALID; 84 85 priv = scsi_cmd_priv(scmd); 86 /*host_tag 0 is invalid hence incrementing by 1*/ 87 priv->host_tag = host_tag + 1; 88 priv->scmd = scmd; 89 priv->in_lld_scope = 1; 90 priv->req_q_idx = hw_queue; 91 priv->meta_chain_idx = -1; 92 priv->chain_idx = -1; 93 priv->meta_sg_valid = 0; 94 return priv->host_tag; 95 } 96 97 /** 98 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 99 * @mrioc: Adapter instance reference 100 * @host_tag: Host tag 101 * @qidx: Operational queue index 102 * 103 * Identify the block tag from the host tag and queue index and 104 * retrieve associated scsi command using scsi_host_find_tag(). 105 * 106 * Return: SCSI command reference or NULL. 107 */ 108 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 109 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 110 { 111 struct scsi_cmnd *scmd = NULL; 112 struct scmd_priv *priv = NULL; 113 u32 unique_tag = host_tag - 1; 114 115 if (WARN_ON(host_tag > mrioc->max_host_ios)) 116 goto out; 117 118 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 119 120 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 121 if (scmd) { 122 priv = scsi_cmd_priv(scmd); 123 if (!priv->in_lld_scope) 124 scmd = NULL; 125 } 126 out: 127 return scmd; 128 } 129 130 /** 131 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 132 * @mrioc: Adapter instance reference 133 * @scmd: SCSI command reference 134 * 135 * Invalidate the SCSI command private data to mark the command 136 * is not in LLD scope anymore. 137 * 138 * Return: Nothing. 139 */ 140 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 141 struct scsi_cmnd *scmd) 142 { 143 struct scmd_priv *priv = NULL; 144 145 priv = scsi_cmd_priv(scmd); 146 147 if (WARN_ON(priv->in_lld_scope == 0)) 148 return; 149 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 150 priv->req_q_idx = 0xFFFF; 151 priv->scmd = NULL; 152 priv->in_lld_scope = 0; 153 priv->meta_sg_valid = 0; 154 if (priv->chain_idx >= 0) { 155 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 156 priv->chain_idx = -1; 157 } 158 if (priv->meta_chain_idx >= 0) { 159 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 160 priv->meta_chain_idx = -1; 161 } 162 } 163 164 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 165 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 166 static void mpi3mr_fwevt_worker(struct work_struct *work); 167 168 /** 169 * mpi3mr_fwevt_free - firmware event memory dealloctor 170 * @r: k reference pointer of the firmware event 171 * 172 * Free firmware event memory when no reference. 173 */ 174 static void mpi3mr_fwevt_free(struct kref *r) 175 { 176 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 177 } 178 179 /** 180 * mpi3mr_fwevt_get - k reference incrementor 181 * @fwevt: Firmware event reference 182 * 183 * Increment firmware event reference count. 184 */ 185 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 186 { 187 kref_get(&fwevt->ref_count); 188 } 189 190 /** 191 * mpi3mr_fwevt_put - k reference decrementor 192 * @fwevt: Firmware event reference 193 * 194 * decrement firmware event reference count. 195 */ 196 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 197 { 198 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 199 } 200 201 /** 202 * mpi3mr_alloc_fwevt - Allocate firmware event 203 * @len: length of firmware event data to allocate 204 * 205 * Allocate firmware event with required length and initialize 206 * the reference counter. 207 * 208 * Return: firmware event reference. 209 */ 210 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 211 { 212 struct mpi3mr_fwevt *fwevt; 213 214 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 215 if (!fwevt) 216 return NULL; 217 218 kref_init(&fwevt->ref_count); 219 return fwevt; 220 } 221 222 /** 223 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 224 * @mrioc: Adapter instance reference 225 * @fwevt: Firmware event reference 226 * 227 * Add the given firmware event to the firmware event list. 228 * 229 * Return: Nothing. 230 */ 231 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 232 struct mpi3mr_fwevt *fwevt) 233 { 234 unsigned long flags; 235 236 if (!mrioc->fwevt_worker_thread) 237 return; 238 239 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 240 /* get fwevt reference count while adding it to fwevt_list */ 241 mpi3mr_fwevt_get(fwevt); 242 INIT_LIST_HEAD(&fwevt->list); 243 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 244 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 245 /* get fwevt reference count while enqueueing it to worker queue */ 246 mpi3mr_fwevt_get(fwevt); 247 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 248 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 249 } 250 251 /** 252 * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to 253 * the list 254 * @mrioc: Adapter instance reference 255 * @event_data: Event data 256 * 257 * Add the given hdb trigger data event to the firmware event 258 * list. 259 * 260 * Return: Nothing. 261 */ 262 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 263 struct trigger_event_data *event_data) 264 { 265 struct mpi3mr_fwevt *fwevt; 266 u16 sz = sizeof(*event_data); 267 268 fwevt = mpi3mr_alloc_fwevt(sz); 269 if (!fwevt) { 270 ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); 271 return; 272 } 273 274 fwevt->mrioc = mrioc; 275 fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; 276 fwevt->send_ack = 0; 277 fwevt->process_evt = 1; 278 fwevt->evt_ctx = 0; 279 fwevt->event_data_size = sz; 280 memcpy(fwevt->event_data, event_data, sz); 281 282 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 283 } 284 285 /** 286 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 287 * @mrioc: Adapter instance reference 288 * @fwevt: Firmware event reference 289 * 290 * Delete the given firmware event from the firmware event list. 291 * 292 * Return: Nothing. 293 */ 294 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 295 struct mpi3mr_fwevt *fwevt) 296 { 297 unsigned long flags; 298 299 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 300 if (!list_empty(&fwevt->list)) { 301 list_del_init(&fwevt->list); 302 /* 303 * Put fwevt reference count after 304 * removing it from fwevt_list 305 */ 306 mpi3mr_fwevt_put(fwevt); 307 } 308 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 309 } 310 311 /** 312 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 313 * @mrioc: Adapter instance reference 314 * 315 * Dequeue a firmware event from the firmware event list. 316 * 317 * Return: firmware event. 318 */ 319 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 320 struct mpi3mr_ioc *mrioc) 321 { 322 unsigned long flags; 323 struct mpi3mr_fwevt *fwevt = NULL; 324 325 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 326 if (!list_empty(&mrioc->fwevt_list)) { 327 fwevt = list_first_entry(&mrioc->fwevt_list, 328 struct mpi3mr_fwevt, list); 329 list_del_init(&fwevt->list); 330 /* 331 * Put fwevt reference count after 332 * removing it from fwevt_list 333 */ 334 mpi3mr_fwevt_put(fwevt); 335 } 336 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 337 338 return fwevt; 339 } 340 341 /** 342 * mpi3mr_cancel_work - cancel firmware event 343 * @fwevt: fwevt object which needs to be canceled 344 * 345 * Return: Nothing. 346 */ 347 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 348 { 349 /* 350 * Wait on the fwevt to complete. If this returns 1, then 351 * the event was never executed. 352 * 353 * If it did execute, we wait for it to finish, and the put will 354 * happen from mpi3mr_process_fwevt() 355 */ 356 if (cancel_work_sync(&fwevt->work)) { 357 /* 358 * Put fwevt reference count after 359 * dequeuing it from worker queue 360 */ 361 mpi3mr_fwevt_put(fwevt); 362 /* 363 * Put fwevt reference count to neutralize 364 * kref_init increment 365 */ 366 mpi3mr_fwevt_put(fwevt); 367 } 368 } 369 370 /** 371 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 372 * @mrioc: Adapter instance reference 373 * 374 * Flush all pending firmware events from the firmware event 375 * list. 376 * 377 * Return: Nothing. 378 */ 379 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 380 { 381 struct mpi3mr_fwevt *fwevt = NULL; 382 383 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 384 !mrioc->fwevt_worker_thread) 385 return; 386 387 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 388 mpi3mr_cancel_work(fwevt); 389 390 if (mrioc->current_event) { 391 fwevt = mrioc->current_event; 392 /* 393 * Don't call cancel_work_sync() API for the 394 * fwevt work if the controller reset is 395 * get called as part of processing the 396 * same fwevt work (or) when worker thread is 397 * waiting for device add/remove APIs to complete. 398 * Otherwise we will see deadlock. 399 */ 400 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 401 fwevt->discard = 1; 402 return; 403 } 404 405 mpi3mr_cancel_work(fwevt); 406 } 407 } 408 409 /** 410 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 411 * @mrioc: Adapter instance reference 412 * @tg: Throttle group information pointer 413 * 414 * Accessor to queue on synthetically generated driver event to 415 * the event worker thread, the driver event will be used to 416 * reduce the QD of all VDs in the TG from the worker thread. 417 * 418 * Return: None. 419 */ 420 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 421 struct mpi3mr_throttle_group_info *tg) 422 { 423 struct mpi3mr_fwevt *fwevt; 424 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 425 426 /* 427 * If the QD reduction event is already queued due to throttle and if 428 * the QD is not restored through device info change event 429 * then dont queue further reduction events 430 */ 431 if (tg->fw_qd != tg->modified_qd) 432 return; 433 434 fwevt = mpi3mr_alloc_fwevt(sz); 435 if (!fwevt) { 436 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 437 return; 438 } 439 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 440 fwevt->mrioc = mrioc; 441 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 442 fwevt->send_ack = 0; 443 fwevt->process_evt = 1; 444 fwevt->evt_ctx = 0; 445 fwevt->event_data_size = sz; 446 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 447 448 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 449 tg->id); 450 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 451 } 452 453 /** 454 * mpi3mr_invalidate_devhandles -Invalidate device handles 455 * @mrioc: Adapter instance reference 456 * 457 * Invalidate the device handles in the target device structures 458 * . Called post reset prior to reinitializing the controller. 459 * 460 * Return: Nothing. 461 */ 462 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 463 { 464 struct mpi3mr_tgt_dev *tgtdev; 465 struct mpi3mr_stgt_priv_data *tgt_priv; 466 467 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 468 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 469 if (tgtdev->starget && tgtdev->starget->hostdata) { 470 tgt_priv = tgtdev->starget->hostdata; 471 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 472 tgt_priv->io_throttle_enabled = 0; 473 tgt_priv->io_divert = 0; 474 tgt_priv->throttle_group = NULL; 475 tgt_priv->wslen = 0; 476 if (tgtdev->host_exposed) 477 atomic_set(&tgt_priv->block_io, 1); 478 } 479 } 480 } 481 482 /** 483 * mpi3mr_print_scmd - print individual SCSI command 484 * @rq: Block request 485 * @data: Adapter instance reference 486 * 487 * Print the SCSI command details if it is in LLD scope. 488 * 489 * Return: true always. 490 */ 491 static bool mpi3mr_print_scmd(struct request *rq, void *data) 492 { 493 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 494 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 495 struct scmd_priv *priv = NULL; 496 497 if (scmd) { 498 priv = scsi_cmd_priv(scmd); 499 if (!priv->in_lld_scope) 500 goto out; 501 502 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 503 __func__, priv->host_tag, priv->req_q_idx + 1); 504 scsi_print_command(scmd); 505 } 506 507 out: 508 return(true); 509 } 510 511 /** 512 * mpi3mr_flush_scmd - Flush individual SCSI command 513 * @rq: Block request 514 * @data: Adapter instance reference 515 * 516 * Return the SCSI command to the upper layers if it is in LLD 517 * scope. 518 * 519 * Return: true always. 520 */ 521 522 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 523 { 524 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 525 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 526 struct scmd_priv *priv = NULL; 527 528 if (scmd) { 529 priv = scsi_cmd_priv(scmd); 530 if (!priv->in_lld_scope) 531 goto out; 532 533 if (priv->meta_sg_valid) 534 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 535 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 536 mpi3mr_clear_scmd_priv(mrioc, scmd); 537 scsi_dma_unmap(scmd); 538 scmd->result = DID_RESET << 16; 539 scsi_print_command(scmd); 540 scsi_done(scmd); 541 mrioc->flush_io_count++; 542 } 543 544 out: 545 return(true); 546 } 547 548 /** 549 * mpi3mr_count_dev_pending - Count commands pending for a lun 550 * @rq: Block request 551 * @data: SCSI device reference 552 * 553 * This is an iterator function called for each SCSI command in 554 * a host and if the command is pending in the LLD for the 555 * specific device(lun) then device specific pending I/O counter 556 * is updated in the device structure. 557 * 558 * Return: true always. 559 */ 560 561 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 562 { 563 struct scsi_device *sdev = (struct scsi_device *)data; 564 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 565 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 566 struct scmd_priv *priv; 567 568 if (scmd) { 569 priv = scsi_cmd_priv(scmd); 570 if (!priv->in_lld_scope) 571 goto out; 572 if (scmd->device == sdev) 573 sdev_priv_data->pend_count++; 574 } 575 576 out: 577 return true; 578 } 579 580 /** 581 * mpi3mr_count_tgt_pending - Count commands pending for target 582 * @rq: Block request 583 * @data: SCSI target reference 584 * 585 * This is an iterator function called for each SCSI command in 586 * a host and if the command is pending in the LLD for the 587 * specific target then target specific pending I/O counter is 588 * updated in the target structure. 589 * 590 * Return: true always. 591 */ 592 593 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 594 { 595 struct scsi_target *starget = (struct scsi_target *)data; 596 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 597 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 598 struct scmd_priv *priv; 599 600 if (scmd) { 601 priv = scsi_cmd_priv(scmd); 602 if (!priv->in_lld_scope) 603 goto out; 604 if (scmd->device && (scsi_target(scmd->device) == starget)) 605 stgt_priv_data->pend_count++; 606 } 607 608 out: 609 return true; 610 } 611 612 /** 613 * mpi3mr_flush_host_io - Flush host I/Os 614 * @mrioc: Adapter instance reference 615 * 616 * Flush all of the pending I/Os by calling 617 * blk_mq_tagset_busy_iter() for each possible tag. This is 618 * executed post controller reset 619 * 620 * Return: Nothing. 621 */ 622 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 623 { 624 struct Scsi_Host *shost = mrioc->shost; 625 626 mrioc->flush_io_count = 0; 627 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 628 blk_mq_tagset_busy_iter(&shost->tag_set, 629 mpi3mr_flush_scmd, (void *)mrioc); 630 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 631 mrioc->flush_io_count); 632 } 633 634 /** 635 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 636 * @mrioc: Adapter instance reference 637 * 638 * This function waits for currently running IO poll threads to 639 * exit and then flushes all host I/Os and any internal pending 640 * cmds. This is executed after controller is marked as 641 * unrecoverable. 642 * 643 * Return: Nothing. 644 */ 645 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 646 { 647 struct Scsi_Host *shost = mrioc->shost; 648 int i; 649 650 if (!mrioc->unrecoverable) 651 return; 652 653 if (mrioc->op_reply_qinfo) { 654 for (i = 0; i < mrioc->num_queues; i++) { 655 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 656 udelay(500); 657 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 658 } 659 } 660 mrioc->flush_io_count = 0; 661 blk_mq_tagset_busy_iter(&shost->tag_set, 662 mpi3mr_flush_scmd, (void *)mrioc); 663 mpi3mr_flush_delayed_cmd_lists(mrioc); 664 mpi3mr_flush_drv_cmds(mrioc); 665 } 666 667 /** 668 * mpi3mr_alloc_tgtdev - target device allocator 669 * 670 * Allocate target device instance and initialize the reference 671 * count 672 * 673 * Return: target device instance. 674 */ 675 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 676 { 677 struct mpi3mr_tgt_dev *tgtdev; 678 679 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 680 if (!tgtdev) 681 return NULL; 682 kref_init(&tgtdev->ref_count); 683 return tgtdev; 684 } 685 686 /** 687 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 688 * @mrioc: Adapter instance reference 689 * @tgtdev: Target device 690 * 691 * Add the target device to the target device list 692 * 693 * Return: Nothing. 694 */ 695 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 696 struct mpi3mr_tgt_dev *tgtdev) 697 { 698 unsigned long flags; 699 700 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 701 mpi3mr_tgtdev_get(tgtdev); 702 INIT_LIST_HEAD(&tgtdev->list); 703 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 704 tgtdev->state = MPI3MR_DEV_CREATED; 705 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 706 } 707 708 /** 709 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 710 * @mrioc: Adapter instance reference 711 * @tgtdev: Target device 712 * @must_delete: Must delete the target device from the list irrespective 713 * of the device state. 714 * 715 * Remove the target device from the target device list 716 * 717 * Return: Nothing. 718 */ 719 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 720 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 721 { 722 unsigned long flags; 723 724 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 725 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 726 if (!list_empty(&tgtdev->list)) { 727 list_del_init(&tgtdev->list); 728 tgtdev->state = MPI3MR_DEV_DELETED; 729 mpi3mr_tgtdev_put(tgtdev); 730 } 731 } 732 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 733 } 734 735 /** 736 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 737 * @mrioc: Adapter instance reference 738 * @handle: Device handle 739 * 740 * Accessor to retrieve target device from the device handle. 741 * Non Lock version 742 * 743 * Return: Target device reference. 744 */ 745 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 746 struct mpi3mr_ioc *mrioc, u16 handle) 747 { 748 struct mpi3mr_tgt_dev *tgtdev; 749 750 assert_spin_locked(&mrioc->tgtdev_lock); 751 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 752 if (tgtdev->dev_handle == handle) 753 goto found_tgtdev; 754 return NULL; 755 756 found_tgtdev: 757 mpi3mr_tgtdev_get(tgtdev); 758 return tgtdev; 759 } 760 761 /** 762 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 763 * @mrioc: Adapter instance reference 764 * @handle: Device handle 765 * 766 * Accessor to retrieve target device from the device handle. 767 * Lock version 768 * 769 * Return: Target device reference. 770 */ 771 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 772 struct mpi3mr_ioc *mrioc, u16 handle) 773 { 774 struct mpi3mr_tgt_dev *tgtdev; 775 unsigned long flags; 776 777 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 778 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 779 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 780 return tgtdev; 781 } 782 783 /** 784 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 785 * @mrioc: Adapter instance reference 786 * @persist_id: Persistent ID 787 * 788 * Accessor to retrieve target device from the Persistent ID. 789 * Non Lock version 790 * 791 * Return: Target device reference. 792 */ 793 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 794 struct mpi3mr_ioc *mrioc, u16 persist_id) 795 { 796 struct mpi3mr_tgt_dev *tgtdev; 797 798 assert_spin_locked(&mrioc->tgtdev_lock); 799 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 800 if (tgtdev->perst_id == persist_id) 801 goto found_tgtdev; 802 return NULL; 803 804 found_tgtdev: 805 mpi3mr_tgtdev_get(tgtdev); 806 return tgtdev; 807 } 808 809 /** 810 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 811 * @mrioc: Adapter instance reference 812 * @persist_id: Persistent ID 813 * 814 * Accessor to retrieve target device from the Persistent ID. 815 * Lock version 816 * 817 * Return: Target device reference. 818 */ 819 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 820 struct mpi3mr_ioc *mrioc, u16 persist_id) 821 { 822 struct mpi3mr_tgt_dev *tgtdev; 823 unsigned long flags; 824 825 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 826 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 827 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 828 return tgtdev; 829 } 830 831 /** 832 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 833 * @mrioc: Adapter instance reference 834 * @tgt_priv: Target private data 835 * 836 * Accessor to return target device from the target private 837 * data. Non Lock version 838 * 839 * Return: Target device reference. 840 */ 841 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 842 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 843 { 844 struct mpi3mr_tgt_dev *tgtdev; 845 846 assert_spin_locked(&mrioc->tgtdev_lock); 847 tgtdev = tgt_priv->tgt_dev; 848 if (tgtdev) 849 mpi3mr_tgtdev_get(tgtdev); 850 return tgtdev; 851 } 852 853 /** 854 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 855 * @mrioc: Adapter instance reference 856 * @tg: Throttle group information pointer 857 * @divert_value: 1 or 0 858 * 859 * Accessor to set io_divert flag for each device associated 860 * with the given throttle group with the given value. 861 * 862 * Return: None. 863 */ 864 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 865 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 866 { 867 unsigned long flags; 868 struct mpi3mr_tgt_dev *tgtdev; 869 struct mpi3mr_stgt_priv_data *tgt_priv; 870 871 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 872 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 873 if (tgtdev->starget && tgtdev->starget->hostdata) { 874 tgt_priv = tgtdev->starget->hostdata; 875 if (tgt_priv->throttle_group == tg) 876 tgt_priv->io_divert = divert_value; 877 } 878 } 879 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 880 } 881 882 /** 883 * mpi3mr_print_device_event_notice - print notice related to post processing of 884 * device event after controller reset. 885 * 886 * @mrioc: Adapter instance reference 887 * @device_add: true for device add event and false for device removal event 888 * 889 * Return: None. 890 */ 891 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 892 bool device_add) 893 { 894 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 895 (device_add ? "addition" : "removal")); 896 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 897 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 898 } 899 900 /** 901 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 902 * @mrioc: Adapter instance reference 903 * @tgtdev: Target device structure 904 * 905 * Checks whether the device is exposed to upper layers and if it 906 * is then remove the device from upper layers by calling 907 * scsi_remove_target(). 908 * 909 * Return: 0 on success, non zero on failure. 910 */ 911 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 912 struct mpi3mr_tgt_dev *tgtdev) 913 { 914 struct mpi3mr_stgt_priv_data *tgt_priv; 915 916 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 917 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 918 if (tgtdev->starget && tgtdev->starget->hostdata) { 919 tgt_priv = tgtdev->starget->hostdata; 920 atomic_set(&tgt_priv->block_io, 0); 921 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 922 } 923 924 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 925 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 926 if (tgtdev->starget) { 927 if (mrioc->current_event) 928 mrioc->current_event->pending_at_sml = 1; 929 scsi_remove_target(&tgtdev->starget->dev); 930 tgtdev->host_exposed = 0; 931 if (mrioc->current_event) { 932 mrioc->current_event->pending_at_sml = 0; 933 if (mrioc->current_event->discard) { 934 mpi3mr_print_device_event_notice(mrioc, 935 false); 936 return; 937 } 938 } 939 } 940 } else 941 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 942 mpi3mr_global_trigger(mrioc, 943 MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); 944 945 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 946 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 947 } 948 949 /** 950 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 951 * @mrioc: Adapter instance reference 952 * @perst_id: Persistent ID of the device 953 * 954 * Checks whether the device can be exposed to upper layers and 955 * if it is not then expose the device to upper layers by 956 * calling scsi_scan_target(). 957 * 958 * Return: 0 on success, non zero on failure. 959 */ 960 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 961 u16 perst_id) 962 { 963 int retval = 0; 964 struct mpi3mr_tgt_dev *tgtdev; 965 966 if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 967 return -1; 968 969 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 970 if (!tgtdev) { 971 retval = -1; 972 goto out; 973 } 974 if (tgtdev->is_hidden || tgtdev->host_exposed) { 975 retval = -1; 976 goto out; 977 } 978 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 979 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 980 tgtdev->host_exposed = 1; 981 if (mrioc->current_event) 982 mrioc->current_event->pending_at_sml = 1; 983 scsi_scan_target(&mrioc->shost->shost_gendev, 984 mrioc->scsi_device_channel, tgtdev->perst_id, 985 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 986 if (!tgtdev->starget) 987 tgtdev->host_exposed = 0; 988 if (mrioc->current_event) { 989 mrioc->current_event->pending_at_sml = 0; 990 if (mrioc->current_event->discard) { 991 mpi3mr_print_device_event_notice(mrioc, true); 992 goto out; 993 } 994 } 995 dprint_event_bh(mrioc, 996 "exposed target device with handle(0x%04x), perst_id(%d)\n", 997 tgtdev->dev_handle, perst_id); 998 goto out; 999 } else 1000 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 1001 out: 1002 if (tgtdev) 1003 mpi3mr_tgtdev_put(tgtdev); 1004 1005 return retval; 1006 } 1007 1008 /** 1009 * mpi3mr_change_queue_depth- Change QD callback handler 1010 * @sdev: SCSI device reference 1011 * @q_depth: Queue depth 1012 * 1013 * Validate and limit QD and call scsi_change_queue_depth. 1014 * 1015 * Return: return value of scsi_change_queue_depth 1016 */ 1017 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 1018 int q_depth) 1019 { 1020 struct scsi_target *starget = scsi_target(sdev); 1021 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1022 int retval = 0; 1023 1024 if (!sdev->tagged_supported) 1025 q_depth = 1; 1026 if (q_depth > shost->can_queue) 1027 q_depth = shost->can_queue; 1028 else if (!q_depth) 1029 q_depth = MPI3MR_DEFAULT_SDEV_QD; 1030 retval = scsi_change_queue_depth(sdev, q_depth); 1031 sdev->max_queue_depth = sdev->queue_depth; 1032 1033 return retval; 1034 } 1035 1036 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, 1037 struct queue_limits *lim) 1038 { 1039 u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; 1040 1041 lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; 1042 lim->virt_boundary_mask = (1 << pgsz) - 1; 1043 } 1044 1045 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, 1046 struct queue_limits *lim) 1047 { 1048 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && 1049 (tgt_dev->dev_spec.pcie_inf.dev_info & 1050 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1051 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) 1052 mpi3mr_configure_nvme_dev(tgt_dev, lim); 1053 } 1054 1055 /** 1056 * mpi3mr_update_sdev - Update SCSI device information 1057 * @sdev: SCSI device reference 1058 * @data: target device reference 1059 * 1060 * This is an iterator function called for each SCSI device in a 1061 * target to update the target specific information into each 1062 * SCSI device. 1063 * 1064 * Return: Nothing. 1065 */ 1066 static void 1067 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1068 { 1069 struct mpi3mr_tgt_dev *tgtdev; 1070 struct queue_limits lim; 1071 1072 tgtdev = (struct mpi3mr_tgt_dev *)data; 1073 if (!tgtdev) 1074 return; 1075 1076 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1077 1078 lim = queue_limits_start_update(sdev->request_queue); 1079 mpi3mr_configure_tgt_dev(tgtdev, &lim); 1080 WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); 1081 } 1082 1083 /** 1084 * mpi3mr_refresh_tgtdevs - Refresh target device exposure 1085 * @mrioc: Adapter instance reference 1086 * 1087 * This is executed post controller reset to identify any 1088 * missing devices during reset and remove from the upper layers 1089 * or expose any newly detected device to the upper layers. 1090 * 1091 * Return: Nothing. 1092 */ 1093 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1094 { 1095 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1096 struct mpi3mr_stgt_priv_data *tgt_priv; 1097 1098 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1099 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1100 list) { 1101 if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || 1102 tgtdev->is_hidden) && 1103 tgtdev->host_exposed && tgtdev->starget && 1104 tgtdev->starget->hostdata) { 1105 tgt_priv = tgtdev->starget->hostdata; 1106 tgt_priv->dev_removed = 1; 1107 atomic_set(&tgt_priv->block_io, 0); 1108 } 1109 } 1110 1111 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1112 list) { 1113 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1114 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1115 tgtdev->perst_id); 1116 if (tgtdev->host_exposed) 1117 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1118 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1119 mpi3mr_tgtdev_put(tgtdev); 1120 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1121 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1122 tgtdev->perst_id); 1123 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1124 } 1125 } 1126 1127 tgtdev = NULL; 1128 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1129 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1130 !tgtdev->is_hidden) { 1131 if (!tgtdev->host_exposed) 1132 mpi3mr_report_tgtdev_to_host(mrioc, 1133 tgtdev->perst_id); 1134 else if (tgtdev->starget) 1135 starget_for_each_device(tgtdev->starget, 1136 (void *)tgtdev, mpi3mr_update_sdev); 1137 } 1138 } 1139 } 1140 1141 /** 1142 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1143 * @mrioc: Adapter instance reference 1144 * @tgtdev: Target device internal structure 1145 * @dev_pg0: New device page0 1146 * @is_added: Flag to indicate the device is just added 1147 * 1148 * Update the information from the device page0 into the driver 1149 * cached target device structure. 1150 * 1151 * Return: Nothing. 1152 */ 1153 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1154 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1155 bool is_added) 1156 { 1157 u16 flags = 0; 1158 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1159 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1160 u8 prot_mask = 0; 1161 1162 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1163 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1164 tgtdev->dev_type = dev_pg0->device_form; 1165 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1166 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1167 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1168 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1169 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1170 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1171 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1172 1173 if (tgtdev->encl_handle) 1174 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1175 tgtdev->encl_handle); 1176 if (enclosure_dev) 1177 tgtdev->enclosure_logical_id = le64_to_cpu( 1178 enclosure_dev->pg0.enclosure_logical_id); 1179 1180 flags = tgtdev->devpg0_flag; 1181 1182 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1183 1184 if (is_added == true) 1185 tgtdev->io_throttle_enabled = 1186 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1187 if (!mrioc->sas_transport_enabled) 1188 tgtdev->non_stl = 1; 1189 1190 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1191 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1192 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1193 break; 1194 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1195 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1196 break; 1197 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1198 default: 1199 tgtdev->wslen = 0; 1200 break; 1201 } 1202 1203 if (tgtdev->starget && tgtdev->starget->hostdata) { 1204 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1205 tgtdev->starget->hostdata; 1206 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1207 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1208 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1209 scsi_tgt_priv_data->io_throttle_enabled = 1210 tgtdev->io_throttle_enabled; 1211 if (is_added == true) 1212 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1213 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1214 } 1215 1216 switch (dev_pg0->access_status) { 1217 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1218 case MPI3_DEVICE0_ASTATUS_PREPARE: 1219 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1220 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1221 break; 1222 default: 1223 tgtdev->is_hidden = 1; 1224 break; 1225 } 1226 1227 switch (tgtdev->dev_type) { 1228 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1229 { 1230 struct mpi3_device0_sas_sata_format *sasinf = 1231 &dev_pg0->device_specific.sas_sata_format; 1232 u16 dev_info = le16_to_cpu(sasinf->device_info); 1233 1234 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1235 tgtdev->dev_spec.sas_sata_inf.sas_address = 1236 le64_to_cpu(sasinf->sas_address); 1237 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1238 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1239 sasinf->attached_phy_identifier; 1240 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1241 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1242 tgtdev->is_hidden = 1; 1243 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1244 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1245 tgtdev->is_hidden = 1; 1246 1247 if (((tgtdev->devpg0_flag & 1248 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1249 && (tgtdev->devpg0_flag & 1250 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1251 (tgtdev->parent_handle == 0xFFFF)) 1252 tgtdev->non_stl = 1; 1253 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1254 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1255 dev_pg0->io_unit_port; 1256 break; 1257 } 1258 case MPI3_DEVICE_DEVFORM_PCIE: 1259 { 1260 struct mpi3_device0_pcie_format *pcieinf = 1261 &dev_pg0->device_specific.pcie_format; 1262 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1263 1264 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1265 tgtdev->dev_spec.pcie_inf.capb = 1266 le32_to_cpu(pcieinf->capabilities); 1267 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1268 /* 2^12 = 4096 */ 1269 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1270 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1271 tgtdev->dev_spec.pcie_inf.mdts = 1272 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1273 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1274 tgtdev->dev_spec.pcie_inf.reset_to = 1275 max_t(u8, pcieinf->controller_reset_to, 1276 MPI3MR_INTADMCMD_TIMEOUT); 1277 tgtdev->dev_spec.pcie_inf.abort_to = 1278 max_t(u8, pcieinf->nvme_abort_to, 1279 MPI3MR_INTADMCMD_TIMEOUT); 1280 } 1281 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1282 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1283 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1284 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1285 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1286 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1287 tgtdev->is_hidden = 1; 1288 tgtdev->non_stl = 1; 1289 if (!mrioc->shost) 1290 break; 1291 prot_mask = scsi_host_get_prot(mrioc->shost); 1292 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1293 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1294 ioc_info(mrioc, 1295 "%s : Disabling DIX0 prot capability\n", __func__); 1296 ioc_info(mrioc, 1297 "because HBA does not support DIX0 operation on NVME drives\n"); 1298 } 1299 break; 1300 } 1301 case MPI3_DEVICE_DEVFORM_VD: 1302 { 1303 struct mpi3_device0_vd_format *vdinf = 1304 &dev_pg0->device_specific.vd_format; 1305 struct mpi3mr_throttle_group_info *tg = NULL; 1306 u16 vdinf_io_throttle_group = 1307 le16_to_cpu(vdinf->io_throttle_group); 1308 1309 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1310 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1311 tgtdev->is_hidden = 1; 1312 tgtdev->non_stl = 1; 1313 tgtdev->dev_spec.vd_inf.reset_to = 1314 max_t(u8, vdinf->vd_reset_to, 1315 MPI3MR_INTADMCMD_TIMEOUT); 1316 tgtdev->dev_spec.vd_inf.abort_to = 1317 max_t(u8, vdinf->vd_abort_to, 1318 MPI3MR_INTADMCMD_TIMEOUT); 1319 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1320 tgtdev->dev_spec.vd_inf.tg_high = 1321 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1322 tgtdev->dev_spec.vd_inf.tg_low = 1323 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1324 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1325 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1326 tg->id = vdinf_io_throttle_group; 1327 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1328 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1329 tg->qd_reduction = 1330 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1331 if (is_added == true) 1332 tg->fw_qd = tgtdev->q_depth; 1333 tg->modified_qd = tgtdev->q_depth; 1334 } 1335 tgtdev->dev_spec.vd_inf.tg = tg; 1336 if (scsi_tgt_priv_data) 1337 scsi_tgt_priv_data->throttle_group = tg; 1338 break; 1339 } 1340 default: 1341 break; 1342 } 1343 } 1344 1345 /** 1346 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1347 * @mrioc: Adapter instance reference 1348 * @fwevt: Firmware event information. 1349 * 1350 * Process Device status Change event and based on device's new 1351 * information, either expose the device to the upper layers, or 1352 * remove the device from upper layers. 1353 * 1354 * Return: Nothing. 1355 */ 1356 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1357 struct mpi3mr_fwevt *fwevt) 1358 { 1359 u16 dev_handle = 0; 1360 u8 uhide = 0, delete = 0, cleanup = 0; 1361 struct mpi3mr_tgt_dev *tgtdev = NULL; 1362 struct mpi3_event_data_device_status_change *evtdata = 1363 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1364 1365 dev_handle = le16_to_cpu(evtdata->dev_handle); 1366 dprint_event_bh(mrioc, 1367 "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", 1368 dev_handle, evtdata->reason_code); 1369 switch (evtdata->reason_code) { 1370 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1371 delete = 1; 1372 break; 1373 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1374 uhide = 1; 1375 break; 1376 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1377 delete = 1; 1378 cleanup = 1; 1379 break; 1380 default: 1381 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1382 evtdata->reason_code); 1383 break; 1384 } 1385 1386 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1387 if (!tgtdev) { 1388 dprint_event_bh(mrioc, 1389 "processing device status change event bottom half,\n" 1390 "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", 1391 dev_handle, evtdata->reason_code); 1392 goto out; 1393 } 1394 if (uhide) { 1395 tgtdev->is_hidden = 0; 1396 if (!tgtdev->host_exposed) 1397 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1398 } 1399 1400 if (delete) 1401 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1402 1403 if (cleanup) { 1404 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1405 mpi3mr_tgtdev_put(tgtdev); 1406 } 1407 1408 out: 1409 if (tgtdev) 1410 mpi3mr_tgtdev_put(tgtdev); 1411 } 1412 1413 /** 1414 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1415 * @mrioc: Adapter instance reference 1416 * @dev_pg0: New device page0 1417 * 1418 * Process Device Info Change event and based on device's new 1419 * information, either expose the device to the upper layers, or 1420 * remove the device from upper layers or update the details of 1421 * the device. 1422 * 1423 * Return: Nothing. 1424 */ 1425 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1426 struct mpi3_device_page0 *dev_pg0) 1427 { 1428 struct mpi3mr_tgt_dev *tgtdev = NULL; 1429 u16 dev_handle = 0, perst_id = 0; 1430 1431 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1432 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1433 dprint_event_bh(mrioc, 1434 "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", 1435 dev_handle, perst_id); 1436 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1437 if (!tgtdev) { 1438 dprint_event_bh(mrioc, 1439 "cannot identify target device for device info\n" 1440 "change event handle(0x%04x), perst_id(%d)\n", 1441 dev_handle, perst_id); 1442 goto out; 1443 } 1444 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1445 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1446 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1447 if (tgtdev->is_hidden && tgtdev->host_exposed) 1448 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1449 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1450 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1451 mpi3mr_update_sdev); 1452 out: 1453 if (tgtdev) 1454 mpi3mr_tgtdev_put(tgtdev); 1455 } 1456 1457 /** 1458 * mpi3mr_free_enclosure_list - release enclosures 1459 * @mrioc: Adapter instance reference 1460 * 1461 * Free memory allocated during encloure add. 1462 * 1463 * Return nothing. 1464 */ 1465 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1466 { 1467 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1468 1469 list_for_each_entry_safe(enclosure_dev, 1470 enclosure_dev_next, &mrioc->enclosure_list, list) { 1471 list_del(&enclosure_dev->list); 1472 kfree(enclosure_dev); 1473 } 1474 } 1475 1476 /** 1477 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1478 * @mrioc: Adapter instance reference 1479 * @handle: Firmware device handle of the enclosure 1480 * 1481 * This searches for enclosure device based on handle, then returns the 1482 * enclosure object. 1483 * 1484 * Return: Enclosure object reference or NULL 1485 */ 1486 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1487 struct mpi3mr_ioc *mrioc, u16 handle) 1488 { 1489 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1490 1491 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1492 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1493 continue; 1494 r = enclosure_dev; 1495 goto out; 1496 } 1497 out: 1498 return r; 1499 } 1500 1501 /** 1502 * mpi3mr_process_trigger_data_event_bh - Process trigger event 1503 * data 1504 * @mrioc: Adapter instance reference 1505 * @event_data: Event data 1506 * 1507 * This function releases diage buffers or issues diag fault 1508 * based on trigger conditions 1509 * 1510 * Return: Nothing 1511 */ 1512 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, 1513 struct trigger_event_data *event_data) 1514 { 1515 struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; 1516 struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; 1517 unsigned long flags; 1518 int retval = 0; 1519 u8 trigger_type = event_data->trigger_type; 1520 union mpi3mr_trigger_data *trigger_data = 1521 &event_data->trigger_specific_data; 1522 1523 if (event_data->snapdump) { 1524 if (trace_hdb) 1525 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1526 trigger_data, 1); 1527 if (fw_hdb) 1528 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1529 trigger_data, 1); 1530 mpi3mr_soft_reset_handler(mrioc, 1531 MPI3MR_RESET_FROM_TRIGGER, 1); 1532 return; 1533 } 1534 1535 if (trace_hdb) { 1536 retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); 1537 if (!retval) { 1538 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1539 trigger_data, 1); 1540 } 1541 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1542 mrioc->trace_release_trigger_active = false; 1543 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1544 } 1545 if (fw_hdb) { 1546 retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); 1547 if (!retval) { 1548 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1549 trigger_data, 1); 1550 } 1551 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1552 mrioc->fw_release_trigger_active = false; 1553 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1554 } 1555 } 1556 1557 /** 1558 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1559 * @mrioc: Adapter instance reference 1560 * @encl_pg0: Enclosure page 0. 1561 * @is_added: Added event or not 1562 * 1563 * Return nothing. 1564 */ 1565 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1566 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1567 { 1568 char *reason_str = NULL; 1569 1570 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1571 return; 1572 1573 if (is_added) 1574 reason_str = "enclosure added"; 1575 else 1576 reason_str = "enclosure dev status changed"; 1577 1578 ioc_info(mrioc, 1579 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1580 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1581 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1582 ioc_info(mrioc, 1583 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1584 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1585 le16_to_cpu(encl_pg0->flags), 1586 ((le16_to_cpu(encl_pg0->flags) & 1587 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1588 } 1589 1590 /** 1591 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1592 * @mrioc: Adapter instance reference 1593 * @fwevt: Firmware event reference 1594 * 1595 * Prints information about the Enclosure device status or 1596 * Enclosure add events if logging is enabled and add or remove 1597 * the enclosure from the controller's internal list of 1598 * enclosures. 1599 * 1600 * Return: Nothing. 1601 */ 1602 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1603 struct mpi3mr_fwevt *fwevt) 1604 { 1605 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1606 struct mpi3_enclosure_page0 *encl_pg0; 1607 u16 encl_handle; 1608 u8 added, present; 1609 1610 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1611 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1612 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1613 1614 1615 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1616 present = ((le16_to_cpu(encl_pg0->flags) & 1617 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1618 1619 if (encl_handle) 1620 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1621 encl_handle); 1622 if (!enclosure_dev && present) { 1623 enclosure_dev = 1624 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1625 GFP_KERNEL); 1626 if (!enclosure_dev) 1627 return; 1628 list_add_tail(&enclosure_dev->list, 1629 &mrioc->enclosure_list); 1630 } 1631 if (enclosure_dev) { 1632 if (!present) { 1633 list_del(&enclosure_dev->list); 1634 kfree(enclosure_dev); 1635 } else 1636 memcpy(&enclosure_dev->pg0, encl_pg0, 1637 sizeof(enclosure_dev->pg0)); 1638 1639 } 1640 } 1641 1642 /** 1643 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1644 * @mrioc: Adapter instance reference 1645 * @event_data: SAS topology change list event data 1646 * 1647 * Prints information about the SAS topology change event. 1648 * 1649 * Return: Nothing. 1650 */ 1651 static void 1652 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1653 struct mpi3_event_data_sas_topology_change_list *event_data) 1654 { 1655 int i; 1656 u16 handle; 1657 u8 reason_code, phy_number; 1658 char *status_str = NULL; 1659 u8 link_rate, prev_link_rate; 1660 1661 switch (event_data->exp_status) { 1662 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1663 status_str = "remove"; 1664 break; 1665 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1666 status_str = "responding"; 1667 break; 1668 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1669 status_str = "remove delay"; 1670 break; 1671 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1672 status_str = "direct attached"; 1673 break; 1674 default: 1675 status_str = "unknown status"; 1676 break; 1677 } 1678 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1679 __func__, status_str); 1680 ioc_info(mrioc, 1681 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1682 __func__, le16_to_cpu(event_data->expander_dev_handle), 1683 event_data->io_unit_port, 1684 le16_to_cpu(event_data->enclosure_handle), 1685 event_data->start_phy_num, event_data->num_entries); 1686 for (i = 0; i < event_data->num_entries; i++) { 1687 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1688 if (!handle) 1689 continue; 1690 phy_number = event_data->start_phy_num + i; 1691 reason_code = event_data->phy_entry[i].status & 1692 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1693 switch (reason_code) { 1694 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1695 status_str = "target remove"; 1696 break; 1697 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1698 status_str = "delay target remove"; 1699 break; 1700 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1701 status_str = "link status change"; 1702 break; 1703 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1704 status_str = "link status no change"; 1705 break; 1706 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1707 status_str = "target responding"; 1708 break; 1709 default: 1710 status_str = "unknown"; 1711 break; 1712 } 1713 link_rate = event_data->phy_entry[i].link_rate >> 4; 1714 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1715 ioc_info(mrioc, 1716 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1717 __func__, phy_number, handle, status_str, link_rate, 1718 prev_link_rate); 1719 } 1720 } 1721 1722 /** 1723 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1724 * @mrioc: Adapter instance reference 1725 * @fwevt: Firmware event reference 1726 * 1727 * Prints information about the SAS topology change event and 1728 * for "not responding" event code, removes the device from the 1729 * upper layers. 1730 * 1731 * Return: Nothing. 1732 */ 1733 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1734 struct mpi3mr_fwevt *fwevt) 1735 { 1736 struct mpi3_event_data_sas_topology_change_list *event_data = 1737 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1738 int i; 1739 u16 handle; 1740 u8 reason_code; 1741 u64 exp_sas_address = 0, parent_sas_address = 0; 1742 struct mpi3mr_hba_port *hba_port = NULL; 1743 struct mpi3mr_tgt_dev *tgtdev = NULL; 1744 struct mpi3mr_sas_node *sas_expander = NULL; 1745 unsigned long flags; 1746 u8 link_rate, prev_link_rate, parent_phy_number; 1747 1748 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1749 if (mrioc->sas_transport_enabled) { 1750 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1751 event_data->io_unit_port); 1752 if (le16_to_cpu(event_data->expander_dev_handle)) { 1753 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1754 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1755 le16_to_cpu(event_data->expander_dev_handle)); 1756 if (sas_expander) { 1757 exp_sas_address = sas_expander->sas_address; 1758 hba_port = sas_expander->hba_port; 1759 } 1760 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1761 parent_sas_address = exp_sas_address; 1762 } else 1763 parent_sas_address = mrioc->sas_hba.sas_address; 1764 } 1765 1766 for (i = 0; i < event_data->num_entries; i++) { 1767 if (fwevt->discard) 1768 return; 1769 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1770 if (!handle) 1771 continue; 1772 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1773 if (!tgtdev) 1774 continue; 1775 1776 reason_code = event_data->phy_entry[i].status & 1777 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1778 1779 switch (reason_code) { 1780 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1781 if (tgtdev->host_exposed) 1782 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1783 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1784 mpi3mr_tgtdev_put(tgtdev); 1785 break; 1786 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1787 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1788 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1789 { 1790 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1791 || tgtdev->is_hidden) 1792 break; 1793 link_rate = event_data->phy_entry[i].link_rate >> 4; 1794 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1795 if (link_rate == prev_link_rate) 1796 break; 1797 if (!parent_sas_address) 1798 break; 1799 parent_phy_number = event_data->start_phy_num + i; 1800 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1801 parent_phy_number, link_rate, hba_port); 1802 break; 1803 } 1804 default: 1805 break; 1806 } 1807 if (tgtdev) 1808 mpi3mr_tgtdev_put(tgtdev); 1809 } 1810 1811 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1812 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1813 if (sas_expander) 1814 mpi3mr_expander_remove(mrioc, exp_sas_address, 1815 hba_port); 1816 } 1817 } 1818 1819 /** 1820 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1821 * @mrioc: Adapter instance reference 1822 * @event_data: PCIe topology change list event data 1823 * 1824 * Prints information about the PCIe topology change event. 1825 * 1826 * Return: Nothing. 1827 */ 1828 static void 1829 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1830 struct mpi3_event_data_pcie_topology_change_list *event_data) 1831 { 1832 int i; 1833 u16 handle; 1834 u16 reason_code; 1835 u8 port_number; 1836 char *status_str = NULL; 1837 u8 link_rate, prev_link_rate; 1838 1839 switch (event_data->switch_status) { 1840 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1841 status_str = "remove"; 1842 break; 1843 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1844 status_str = "responding"; 1845 break; 1846 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1847 status_str = "remove delay"; 1848 break; 1849 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1850 status_str = "direct attached"; 1851 break; 1852 default: 1853 status_str = "unknown status"; 1854 break; 1855 } 1856 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1857 __func__, status_str); 1858 ioc_info(mrioc, 1859 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1860 __func__, le16_to_cpu(event_data->switch_dev_handle), 1861 le16_to_cpu(event_data->enclosure_handle), 1862 event_data->start_port_num, event_data->num_entries); 1863 for (i = 0; i < event_data->num_entries; i++) { 1864 handle = 1865 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1866 if (!handle) 1867 continue; 1868 port_number = event_data->start_port_num + i; 1869 reason_code = event_data->port_entry[i].port_status; 1870 switch (reason_code) { 1871 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1872 status_str = "target remove"; 1873 break; 1874 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1875 status_str = "delay target remove"; 1876 break; 1877 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1878 status_str = "link status change"; 1879 break; 1880 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1881 status_str = "link status no change"; 1882 break; 1883 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1884 status_str = "target responding"; 1885 break; 1886 default: 1887 status_str = "unknown"; 1888 break; 1889 } 1890 link_rate = event_data->port_entry[i].current_port_info & 1891 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1892 prev_link_rate = event_data->port_entry[i].previous_port_info & 1893 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1894 ioc_info(mrioc, 1895 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1896 __func__, port_number, handle, status_str, link_rate, 1897 prev_link_rate); 1898 } 1899 } 1900 1901 /** 1902 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1903 * @mrioc: Adapter instance reference 1904 * @fwevt: Firmware event reference 1905 * 1906 * Prints information about the PCIe topology change event and 1907 * for "not responding" event code, removes the device from the 1908 * upper layers. 1909 * 1910 * Return: Nothing. 1911 */ 1912 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1913 struct mpi3mr_fwevt *fwevt) 1914 { 1915 struct mpi3_event_data_pcie_topology_change_list *event_data = 1916 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1917 int i; 1918 u16 handle; 1919 u8 reason_code; 1920 struct mpi3mr_tgt_dev *tgtdev = NULL; 1921 1922 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1923 1924 for (i = 0; i < event_data->num_entries; i++) { 1925 if (fwevt->discard) 1926 return; 1927 handle = 1928 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1929 if (!handle) 1930 continue; 1931 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1932 if (!tgtdev) 1933 continue; 1934 1935 reason_code = event_data->port_entry[i].port_status; 1936 1937 switch (reason_code) { 1938 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1939 if (tgtdev->host_exposed) 1940 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1941 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1942 mpi3mr_tgtdev_put(tgtdev); 1943 break; 1944 default: 1945 break; 1946 } 1947 if (tgtdev) 1948 mpi3mr_tgtdev_put(tgtdev); 1949 } 1950 } 1951 1952 /** 1953 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1954 * @mrioc: Adapter instance reference 1955 * @fwevt: Firmware event reference 1956 * 1957 * Extracts the event data and calls application interfacing 1958 * function to process the event further. 1959 * 1960 * Return: Nothing. 1961 */ 1962 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1963 struct mpi3mr_fwevt *fwevt) 1964 { 1965 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1966 fwevt->event_data_size); 1967 } 1968 1969 /** 1970 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1971 * @sdev: SCSI device reference 1972 * @data: Queue depth reference 1973 * 1974 * This is an iterator function called for each SCSI device in a 1975 * target to update the QD of each SCSI device. 1976 * 1977 * Return: Nothing. 1978 */ 1979 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1980 { 1981 u16 *q_depth = (u16 *)data; 1982 1983 scsi_change_queue_depth(sdev, (int)*q_depth); 1984 sdev->max_queue_depth = sdev->queue_depth; 1985 } 1986 1987 /** 1988 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1989 * @mrioc: Adapter instance reference 1990 * @tg: Throttle group information pointer 1991 * 1992 * Accessor to reduce QD for each device associated with the 1993 * given throttle group. 1994 * 1995 * Return: None. 1996 */ 1997 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1998 struct mpi3mr_throttle_group_info *tg) 1999 { 2000 unsigned long flags; 2001 struct mpi3mr_tgt_dev *tgtdev; 2002 struct mpi3mr_stgt_priv_data *tgt_priv; 2003 2004 2005 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2006 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 2007 if (tgtdev->starget && tgtdev->starget->hostdata) { 2008 tgt_priv = tgtdev->starget->hostdata; 2009 if (tgt_priv->throttle_group == tg) { 2010 dprint_event_bh(mrioc, 2011 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 2012 tgt_priv->perst_id, tgtdev->q_depth, 2013 tg->modified_qd); 2014 starget_for_each_device(tgtdev->starget, 2015 (void *)&tg->modified_qd, 2016 mpi3mr_update_sdev_qd); 2017 } 2018 } 2019 } 2020 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2021 } 2022 2023 /** 2024 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 2025 * @mrioc: Adapter instance reference 2026 * @fwevt: Firmware event reference 2027 * 2028 * Identifies the firmware event and calls corresponding bottomg 2029 * half handler and sends event acknowledgment if required. 2030 * 2031 * Return: Nothing. 2032 */ 2033 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 2034 struct mpi3mr_fwevt *fwevt) 2035 { 2036 struct mpi3_device_page0 *dev_pg0 = NULL; 2037 u16 perst_id, handle, dev_info; 2038 struct mpi3_device0_sas_sata_format *sasinf = NULL; 2039 unsigned int timeout; 2040 2041 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 2042 mrioc->current_event = fwevt; 2043 2044 if (mrioc->stop_drv_processing) { 2045 dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" 2046 "due to stop_drv_processing\n", fwevt->event_id); 2047 goto out; 2048 } 2049 2050 if (mrioc->unrecoverable) { 2051 dprint_event_bh(mrioc, 2052 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 2053 fwevt->event_id); 2054 goto out; 2055 } 2056 2057 if (!fwevt->process_evt) 2058 goto evt_ack; 2059 2060 dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n", 2061 fwevt->event_id, fwevt->evt_ctx); 2062 2063 switch (fwevt->event_id) { 2064 case MPI3_EVENT_DEVICE_ADDED: 2065 { 2066 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2067 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2068 handle = le16_to_cpu(dev_pg0->dev_handle); 2069 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2070 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 2071 else if (mrioc->sas_transport_enabled && 2072 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 2073 sasinf = &dev_pg0->device_specific.sas_sata_format; 2074 dev_info = le16_to_cpu(sasinf->device_info); 2075 if (!mrioc->sas_hba.num_phys) 2076 mpi3mr_sas_host_add(mrioc); 2077 else 2078 mpi3mr_sas_host_refresh(mrioc); 2079 2080 if (mpi3mr_is_expander_device(dev_info)) 2081 mpi3mr_expander_add(mrioc, handle); 2082 } 2083 break; 2084 } 2085 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2086 { 2087 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2088 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2089 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2090 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 2091 break; 2092 } 2093 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2094 { 2095 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 2096 break; 2097 } 2098 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2099 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2100 { 2101 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 2102 break; 2103 } 2104 2105 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2106 { 2107 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 2108 break; 2109 } 2110 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2111 { 2112 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 2113 break; 2114 } 2115 case MPI3_EVENT_LOG_DATA: 2116 { 2117 mpi3mr_logdata_evt_bh(mrioc, fwevt); 2118 break; 2119 } 2120 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 2121 { 2122 struct mpi3mr_throttle_group_info *tg; 2123 2124 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 2125 dprint_event_bh(mrioc, 2126 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 2127 tg->id, tg->need_qd_reduction); 2128 if (tg->need_qd_reduction) { 2129 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 2130 tg->need_qd_reduction = 0; 2131 } 2132 break; 2133 } 2134 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2135 { 2136 timeout = MPI3MR_RESET_TIMEOUT * 2; 2137 while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && 2138 !mrioc->unrecoverable && !mrioc->pci_err_recovery) { 2139 msleep(500); 2140 if (!timeout--) { 2141 mrioc->unrecoverable = 1; 2142 break; 2143 } 2144 } 2145 2146 if (mrioc->unrecoverable || mrioc->pci_err_recovery) 2147 break; 2148 2149 dprint_event_bh(mrioc, 2150 "scan for non responding and newly added devices after soft reset started\n"); 2151 if (mrioc->sas_transport_enabled) { 2152 mpi3mr_refresh_sas_ports(mrioc); 2153 mpi3mr_refresh_expanders(mrioc); 2154 } 2155 mpi3mr_refresh_tgtdevs(mrioc); 2156 ioc_info(mrioc, 2157 "scan for non responding and newly added devices after soft reset completed\n"); 2158 break; 2159 } 2160 case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: 2161 { 2162 mpi3mr_process_trigger_data_event_bh(mrioc, 2163 (struct trigger_event_data *)fwevt->event_data); 2164 break; 2165 } 2166 default: 2167 break; 2168 } 2169 2170 evt_ack: 2171 if (fwevt->send_ack) 2172 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2173 fwevt->evt_ctx); 2174 out: 2175 /* Put fwevt reference count to neutralize kref_init increment */ 2176 mpi3mr_fwevt_put(fwevt); 2177 mrioc->current_event = NULL; 2178 } 2179 2180 /** 2181 * mpi3mr_fwevt_worker - Firmware event worker 2182 * @work: Work struct containing firmware event 2183 * 2184 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2185 * 2186 * Return: Nothing. 2187 */ 2188 static void mpi3mr_fwevt_worker(struct work_struct *work) 2189 { 2190 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2191 work); 2192 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2193 /* 2194 * Put fwevt reference count after 2195 * dequeuing it from worker queue 2196 */ 2197 mpi3mr_fwevt_put(fwevt); 2198 } 2199 2200 /** 2201 * mpi3mr_create_tgtdev - Create and add a target device 2202 * @mrioc: Adapter instance reference 2203 * @dev_pg0: Device Page 0 data 2204 * 2205 * If the device specified by the device page 0 data is not 2206 * present in the driver's internal list, allocate the memory 2207 * for the device, populate the data and add to the list, else 2208 * update the device data. The key is persistent ID. 2209 * 2210 * Return: 0 on success, -ENOMEM on memory allocation failure 2211 */ 2212 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2213 struct mpi3_device_page0 *dev_pg0) 2214 { 2215 int retval = 0; 2216 struct mpi3mr_tgt_dev *tgtdev = NULL; 2217 u16 perst_id = 0; 2218 unsigned long flags; 2219 2220 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2221 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2222 return retval; 2223 2224 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2225 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2226 if (tgtdev) 2227 tgtdev->state = MPI3MR_DEV_CREATED; 2228 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2229 2230 if (tgtdev) { 2231 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2232 mpi3mr_tgtdev_put(tgtdev); 2233 } else { 2234 tgtdev = mpi3mr_alloc_tgtdev(); 2235 if (!tgtdev) 2236 return -ENOMEM; 2237 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2238 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2239 } 2240 2241 return retval; 2242 } 2243 2244 /** 2245 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2246 * @mrioc: Adapter instance reference 2247 * 2248 * Flush pending commands in the delayed lists due to a 2249 * controller reset or driver removal as a cleanup. 2250 * 2251 * Return: Nothing 2252 */ 2253 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2254 { 2255 struct delayed_dev_rmhs_node *_rmhs_node; 2256 struct delayed_evt_ack_node *_evtack_node; 2257 2258 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2259 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2260 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2261 struct delayed_dev_rmhs_node, list); 2262 list_del(&_rmhs_node->list); 2263 kfree(_rmhs_node); 2264 } 2265 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2266 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2267 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2268 struct delayed_evt_ack_node, list); 2269 list_del(&_evtack_node->list); 2270 kfree(_evtack_node); 2271 } 2272 } 2273 2274 /** 2275 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2276 * @mrioc: Adapter instance reference 2277 * @drv_cmd: Internal command tracker 2278 * 2279 * Issues a target reset TM to the firmware from the device 2280 * removal TM pend list or retry the removal handshake sequence 2281 * based on the IOU control request IOC status. 2282 * 2283 * Return: Nothing 2284 */ 2285 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2286 struct mpi3mr_drv_cmd *drv_cmd) 2287 { 2288 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2289 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2290 2291 if (drv_cmd->state & MPI3MR_CMD_RESET) 2292 goto clear_drv_cmd; 2293 2294 ioc_info(mrioc, 2295 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2296 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2297 drv_cmd->ioc_loginfo); 2298 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2299 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2300 drv_cmd->retry_count++; 2301 ioc_info(mrioc, 2302 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2303 __func__, drv_cmd->dev_handle, 2304 drv_cmd->retry_count); 2305 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2306 drv_cmd, drv_cmd->iou_rc); 2307 return; 2308 } 2309 ioc_err(mrioc, 2310 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2311 __func__, drv_cmd->dev_handle); 2312 } else { 2313 ioc_info(mrioc, 2314 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2315 __func__, drv_cmd->dev_handle); 2316 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2317 } 2318 2319 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2320 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2321 struct delayed_dev_rmhs_node, list); 2322 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2323 drv_cmd->retry_count = 0; 2324 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2325 ioc_info(mrioc, 2326 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2327 __func__, drv_cmd->dev_handle); 2328 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2329 drv_cmd->iou_rc); 2330 list_del(&delayed_dev_rmhs->list); 2331 kfree(delayed_dev_rmhs); 2332 return; 2333 } 2334 2335 clear_drv_cmd: 2336 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2337 drv_cmd->callback = NULL; 2338 drv_cmd->retry_count = 0; 2339 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2340 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2341 } 2342 2343 /** 2344 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2345 * @mrioc: Adapter instance reference 2346 * @drv_cmd: Internal command tracker 2347 * 2348 * Issues a target reset TM to the firmware from the device 2349 * removal TM pend list or issue IO unit control request as 2350 * part of device removal or hidden acknowledgment handshake. 2351 * 2352 * Return: Nothing 2353 */ 2354 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2355 struct mpi3mr_drv_cmd *drv_cmd) 2356 { 2357 struct mpi3_iounit_control_request iou_ctrl; 2358 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2359 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2360 int retval; 2361 2362 if (drv_cmd->state & MPI3MR_CMD_RESET) 2363 goto clear_drv_cmd; 2364 2365 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2366 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2367 2368 if (tm_reply) 2369 pr_info(IOCNAME 2370 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2371 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2372 drv_cmd->ioc_loginfo, 2373 le32_to_cpu(tm_reply->termination_count)); 2374 2375 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2376 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2377 2378 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2379 2380 drv_cmd->state = MPI3MR_CMD_PENDING; 2381 drv_cmd->is_waiting = 0; 2382 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2383 iou_ctrl.operation = drv_cmd->iou_rc; 2384 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2385 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2386 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2387 2388 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2389 1); 2390 if (retval) { 2391 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2392 mrioc->name); 2393 goto clear_drv_cmd; 2394 } 2395 2396 return; 2397 clear_drv_cmd: 2398 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2399 drv_cmd->callback = NULL; 2400 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2401 drv_cmd->retry_count = 0; 2402 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2403 } 2404 2405 /** 2406 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2407 * @mrioc: Adapter instance reference 2408 * @handle: Device handle 2409 * @cmdparam: Internal command tracker 2410 * @iou_rc: IO unit reason code 2411 * 2412 * Issues a target reset TM to the firmware or add it to a pend 2413 * list as part of device removal or hidden acknowledgment 2414 * handshake. 2415 * 2416 * Return: Nothing 2417 */ 2418 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2419 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2420 { 2421 struct mpi3_scsi_task_mgmt_request tm_req; 2422 int retval = 0; 2423 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2424 u8 retrycount = 5; 2425 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2426 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2427 struct mpi3mr_tgt_dev *tgtdev = NULL; 2428 unsigned long flags; 2429 2430 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2431 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2432 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2433 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2434 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2435 2436 if (drv_cmd) 2437 goto issue_cmd; 2438 do { 2439 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2440 MPI3MR_NUM_DEVRMCMD); 2441 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2442 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2443 break; 2444 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2445 } 2446 } while (retrycount--); 2447 2448 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2449 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2450 GFP_ATOMIC); 2451 if (!delayed_dev_rmhs) 2452 return; 2453 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2454 delayed_dev_rmhs->handle = handle; 2455 delayed_dev_rmhs->iou_rc = iou_rc; 2456 list_add_tail(&delayed_dev_rmhs->list, 2457 &mrioc->delayed_rmhs_list); 2458 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2459 __func__, handle); 2460 return; 2461 } 2462 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2463 2464 issue_cmd: 2465 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2466 ioc_info(mrioc, 2467 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2468 __func__, handle, cmd_idx); 2469 2470 memset(&tm_req, 0, sizeof(tm_req)); 2471 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2472 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2473 goto out; 2474 } 2475 drv_cmd->state = MPI3MR_CMD_PENDING; 2476 drv_cmd->is_waiting = 0; 2477 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2478 drv_cmd->dev_handle = handle; 2479 drv_cmd->iou_rc = iou_rc; 2480 tm_req.dev_handle = cpu_to_le16(handle); 2481 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2482 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2483 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2484 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2485 2486 set_bit(handle, mrioc->removepend_bitmap); 2487 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2488 if (retval) { 2489 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2490 __func__); 2491 goto out_failed; 2492 } 2493 out: 2494 return; 2495 out_failed: 2496 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2497 drv_cmd->callback = NULL; 2498 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2499 drv_cmd->retry_count = 0; 2500 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2501 } 2502 2503 /** 2504 * mpi3mr_complete_evt_ack - event ack request completion 2505 * @mrioc: Adapter instance reference 2506 * @drv_cmd: Internal command tracker 2507 * 2508 * This is the completion handler for non blocking event 2509 * acknowledgment sent to the firmware and this will issue any 2510 * pending event acknowledgment request. 2511 * 2512 * Return: Nothing 2513 */ 2514 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2515 struct mpi3mr_drv_cmd *drv_cmd) 2516 { 2517 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2518 struct delayed_evt_ack_node *delayed_evtack = NULL; 2519 2520 if (drv_cmd->state & MPI3MR_CMD_RESET) 2521 goto clear_drv_cmd; 2522 2523 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2524 dprint_event_th(mrioc, 2525 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2526 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2527 drv_cmd->ioc_loginfo); 2528 } 2529 2530 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2531 delayed_evtack = 2532 list_entry(mrioc->delayed_evtack_cmds_list.next, 2533 struct delayed_evt_ack_node, list); 2534 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2535 delayed_evtack->event_ctx); 2536 list_del(&delayed_evtack->list); 2537 kfree(delayed_evtack); 2538 return; 2539 } 2540 clear_drv_cmd: 2541 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2542 drv_cmd->callback = NULL; 2543 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2544 } 2545 2546 /** 2547 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2548 * @mrioc: Adapter instance reference 2549 * @event: MPI3 event id 2550 * @cmdparam: Internal command tracker 2551 * @event_ctx: event context 2552 * 2553 * Issues event acknowledgment request to the firmware if there 2554 * is a free command to send the event ack else it to a pend 2555 * list so that it will be processed on a completion of a prior 2556 * event acknowledgment . 2557 * 2558 * Return: Nothing 2559 */ 2560 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2561 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2562 { 2563 struct mpi3_event_ack_request evtack_req; 2564 int retval = 0; 2565 u8 retrycount = 5; 2566 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2567 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2568 struct delayed_evt_ack_node *delayed_evtack = NULL; 2569 2570 if (drv_cmd) { 2571 dprint_event_th(mrioc, 2572 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2573 event, event_ctx); 2574 goto issue_cmd; 2575 } 2576 dprint_event_th(mrioc, 2577 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2578 event, event_ctx); 2579 do { 2580 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2581 MPI3MR_NUM_EVTACKCMD); 2582 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2583 if (!test_and_set_bit(cmd_idx, 2584 mrioc->evtack_cmds_bitmap)) 2585 break; 2586 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2587 } 2588 } while (retrycount--); 2589 2590 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2591 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2592 GFP_ATOMIC); 2593 if (!delayed_evtack) 2594 return; 2595 INIT_LIST_HEAD(&delayed_evtack->list); 2596 delayed_evtack->event = event; 2597 delayed_evtack->event_ctx = event_ctx; 2598 list_add_tail(&delayed_evtack->list, 2599 &mrioc->delayed_evtack_cmds_list); 2600 dprint_event_th(mrioc, 2601 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2602 event, event_ctx); 2603 return; 2604 } 2605 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2606 2607 issue_cmd: 2608 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2609 2610 memset(&evtack_req, 0, sizeof(evtack_req)); 2611 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2612 dprint_event_th(mrioc, 2613 "sending event ack failed due to command in use\n"); 2614 goto out; 2615 } 2616 drv_cmd->state = MPI3MR_CMD_PENDING; 2617 drv_cmd->is_waiting = 0; 2618 drv_cmd->callback = mpi3mr_complete_evt_ack; 2619 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2620 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2621 evtack_req.event = event; 2622 evtack_req.event_context = cpu_to_le32(event_ctx); 2623 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2624 sizeof(evtack_req), 1); 2625 if (retval) { 2626 dprint_event_th(mrioc, 2627 "posting event ack request is failed\n"); 2628 goto out_failed; 2629 } 2630 2631 dprint_event_th(mrioc, 2632 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2633 event, event_ctx); 2634 out: 2635 return; 2636 out_failed: 2637 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2638 drv_cmd->callback = NULL; 2639 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2640 } 2641 2642 /** 2643 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2644 * @mrioc: Adapter instance reference 2645 * @event_reply: event data 2646 * 2647 * Checks for the reason code and based on that either block I/O 2648 * to device, or unblock I/O to the device, or start the device 2649 * removal handshake with reason as remove with the firmware for 2650 * PCIe devices. 2651 * 2652 * Return: Nothing 2653 */ 2654 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2655 struct mpi3_event_notification_reply *event_reply) 2656 { 2657 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2658 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2659 int i; 2660 u16 handle; 2661 u8 reason_code; 2662 struct mpi3mr_tgt_dev *tgtdev = NULL; 2663 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2664 2665 for (i = 0; i < topo_evt->num_entries; i++) { 2666 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2667 if (!handle) 2668 continue; 2669 reason_code = topo_evt->port_entry[i].port_status; 2670 scsi_tgt_priv_data = NULL; 2671 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2672 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2673 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2674 tgtdev->starget->hostdata; 2675 switch (reason_code) { 2676 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2677 if (scsi_tgt_priv_data) { 2678 scsi_tgt_priv_data->dev_removed = 1; 2679 scsi_tgt_priv_data->dev_removedelay = 0; 2680 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2681 } 2682 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2683 MPI3_CTRL_OP_REMOVE_DEVICE); 2684 break; 2685 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2686 if (scsi_tgt_priv_data) { 2687 scsi_tgt_priv_data->dev_removedelay = 1; 2688 atomic_inc(&scsi_tgt_priv_data->block_io); 2689 } 2690 break; 2691 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2692 if (scsi_tgt_priv_data && 2693 scsi_tgt_priv_data->dev_removedelay) { 2694 scsi_tgt_priv_data->dev_removedelay = 0; 2695 atomic_dec_if_positive 2696 (&scsi_tgt_priv_data->block_io); 2697 } 2698 break; 2699 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2700 default: 2701 break; 2702 } 2703 if (tgtdev) 2704 mpi3mr_tgtdev_put(tgtdev); 2705 } 2706 } 2707 2708 /** 2709 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2710 * @mrioc: Adapter instance reference 2711 * @event_reply: event data 2712 * 2713 * Checks for the reason code and based on that either block I/O 2714 * to device, or unblock I/O to the device, or start the device 2715 * removal handshake with reason as remove with the firmware for 2716 * SAS/SATA devices. 2717 * 2718 * Return: Nothing 2719 */ 2720 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2721 struct mpi3_event_notification_reply *event_reply) 2722 { 2723 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2724 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2725 int i; 2726 u16 handle; 2727 u8 reason_code; 2728 struct mpi3mr_tgt_dev *tgtdev = NULL; 2729 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2730 2731 for (i = 0; i < topo_evt->num_entries; i++) { 2732 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2733 if (!handle) 2734 continue; 2735 reason_code = topo_evt->phy_entry[i].status & 2736 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2737 scsi_tgt_priv_data = NULL; 2738 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2739 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2740 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2741 tgtdev->starget->hostdata; 2742 switch (reason_code) { 2743 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2744 if (scsi_tgt_priv_data) { 2745 scsi_tgt_priv_data->dev_removed = 1; 2746 scsi_tgt_priv_data->dev_removedelay = 0; 2747 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2748 } 2749 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2750 MPI3_CTRL_OP_REMOVE_DEVICE); 2751 break; 2752 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2753 if (scsi_tgt_priv_data) { 2754 scsi_tgt_priv_data->dev_removedelay = 1; 2755 atomic_inc(&scsi_tgt_priv_data->block_io); 2756 } 2757 break; 2758 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2759 if (scsi_tgt_priv_data && 2760 scsi_tgt_priv_data->dev_removedelay) { 2761 scsi_tgt_priv_data->dev_removedelay = 0; 2762 atomic_dec_if_positive 2763 (&scsi_tgt_priv_data->block_io); 2764 } 2765 break; 2766 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2767 default: 2768 break; 2769 } 2770 if (tgtdev) 2771 mpi3mr_tgtdev_put(tgtdev); 2772 } 2773 } 2774 2775 /** 2776 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2777 * @mrioc: Adapter instance reference 2778 * @event_reply: event data 2779 * 2780 * Checks for the reason code and based on that either block I/O 2781 * to device, or unblock I/O to the device, or start the device 2782 * removal handshake with reason as remove/hide acknowledgment 2783 * with the firmware. 2784 * 2785 * Return: Nothing 2786 */ 2787 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2788 struct mpi3_event_notification_reply *event_reply) 2789 { 2790 u16 dev_handle = 0; 2791 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2792 struct mpi3mr_tgt_dev *tgtdev = NULL; 2793 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2794 struct mpi3_event_data_device_status_change *evtdata = 2795 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2796 2797 if (mrioc->stop_drv_processing) 2798 goto out; 2799 2800 dev_handle = le16_to_cpu(evtdata->dev_handle); 2801 dprint_event_th(mrioc, 2802 "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", 2803 evtdata->reason_code, dev_handle); 2804 2805 switch (evtdata->reason_code) { 2806 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2807 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2808 block = 1; 2809 break; 2810 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2811 delete = 1; 2812 hide = 1; 2813 break; 2814 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2815 delete = 1; 2816 remove = 1; 2817 break; 2818 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2819 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2820 ublock = 1; 2821 break; 2822 default: 2823 break; 2824 } 2825 2826 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2827 if (!tgtdev) { 2828 dprint_event_th(mrioc, 2829 "processing device status change event could not identify device for handle(0x%04x)\n", 2830 dev_handle); 2831 goto out; 2832 } 2833 if (hide) 2834 tgtdev->is_hidden = hide; 2835 if (tgtdev->starget && tgtdev->starget->hostdata) { 2836 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2837 tgtdev->starget->hostdata; 2838 if (block) 2839 atomic_inc(&scsi_tgt_priv_data->block_io); 2840 if (delete) 2841 scsi_tgt_priv_data->dev_removed = 1; 2842 if (ublock) 2843 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2844 } 2845 if (remove) 2846 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2847 MPI3_CTRL_OP_REMOVE_DEVICE); 2848 if (hide) 2849 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2850 MPI3_CTRL_OP_HIDDEN_ACK); 2851 2852 out: 2853 if (tgtdev) 2854 mpi3mr_tgtdev_put(tgtdev); 2855 } 2856 2857 /** 2858 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2859 * @mrioc: Adapter instance reference 2860 * @event_reply: event data 2861 * 2862 * Blocks and unblocks host level I/O based on the reason code 2863 * 2864 * Return: Nothing 2865 */ 2866 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2867 struct mpi3_event_notification_reply *event_reply) 2868 { 2869 struct mpi3_event_data_prepare_for_reset *evtdata = 2870 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2871 2872 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2873 dprint_event_th(mrioc, 2874 "prepare for reset event top half with rc=start\n"); 2875 if (mrioc->prepare_for_reset) 2876 return; 2877 scsi_block_requests(mrioc->shost); 2878 mrioc->prepare_for_reset = 1; 2879 mrioc->prepare_for_reset_timeout_counter = 0; 2880 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2881 dprint_event_th(mrioc, 2882 "prepare for reset top half with rc=abort\n"); 2883 mrioc->prepare_for_reset = 0; 2884 scsi_unblock_requests(mrioc->shost); 2885 mrioc->prepare_for_reset_timeout_counter = 0; 2886 } 2887 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2888 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2889 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2890 le32_to_cpu(event_reply->event_context)); 2891 } 2892 2893 /** 2894 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2895 * @mrioc: Adapter instance reference 2896 * @event_reply: event data 2897 * 2898 * Identifies the new shutdown timeout value and update. 2899 * 2900 * Return: Nothing 2901 */ 2902 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2903 struct mpi3_event_notification_reply *event_reply) 2904 { 2905 struct mpi3_event_data_energy_pack_change *evtdata = 2906 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2907 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2908 2909 if (shutdown_timeout <= 0) { 2910 dprint_event_th(mrioc, 2911 "%s :Invalid Shutdown Timeout received = %d\n", 2912 __func__, shutdown_timeout); 2913 return; 2914 } 2915 2916 dprint_event_th(mrioc, 2917 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2918 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2919 mrioc->facts.shutdown_timeout = shutdown_timeout; 2920 } 2921 2922 /** 2923 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2924 * @mrioc: Adapter instance reference 2925 * @event_reply: event data 2926 * 2927 * Displays Cable manegemt event details. 2928 * 2929 * Return: Nothing 2930 */ 2931 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2932 struct mpi3_event_notification_reply *event_reply) 2933 { 2934 struct mpi3_event_data_cable_management *evtdata = 2935 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2936 2937 switch (evtdata->status) { 2938 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2939 { 2940 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2941 "Devices connected to this cable are not detected.\n" 2942 "This cable requires %d mW of power.\n", 2943 evtdata->receptacle_id, 2944 le32_to_cpu(evtdata->active_cable_power_requirement)); 2945 break; 2946 } 2947 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2948 { 2949 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2950 evtdata->receptacle_id); 2951 break; 2952 } 2953 default: 2954 break; 2955 } 2956 } 2957 2958 /** 2959 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2960 * @mrioc: Adapter instance reference 2961 * 2962 * Add driver specific event to make sure that the driver won't process the 2963 * events until all the devices are refreshed during soft reset. 2964 * 2965 * Return: Nothing 2966 */ 2967 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2968 { 2969 struct mpi3mr_fwevt *fwevt = NULL; 2970 2971 fwevt = mpi3mr_alloc_fwevt(0); 2972 if (!fwevt) { 2973 dprint_event_th(mrioc, 2974 "failed to schedule bottom half handler for event(0x%02x)\n", 2975 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2976 return; 2977 } 2978 fwevt->mrioc = mrioc; 2979 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2980 fwevt->send_ack = 0; 2981 fwevt->process_evt = 1; 2982 fwevt->evt_ctx = 0; 2983 fwevt->event_data_size = 0; 2984 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2985 } 2986 2987 /** 2988 * mpi3mr_os_handle_events - Firmware event handler 2989 * @mrioc: Adapter instance reference 2990 * @event_reply: event data 2991 * 2992 * Identifies whether the event has to be handled and acknowledged, 2993 * and either processes the event in the top-half and/or schedule a 2994 * bottom-half through mpi3mr_fwevt_worker(). 2995 * 2996 * Return: Nothing 2997 */ 2998 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2999 struct mpi3_event_notification_reply *event_reply) 3000 { 3001 u16 evt_type, sz; 3002 struct mpi3mr_fwevt *fwevt = NULL; 3003 bool ack_req = 0, process_evt_bh = 0; 3004 3005 if (mrioc->stop_drv_processing) 3006 return; 3007 3008 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 3009 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 3010 ack_req = 1; 3011 3012 evt_type = event_reply->event; 3013 mpi3mr_event_trigger(mrioc, event_reply->event); 3014 3015 switch (evt_type) { 3016 case MPI3_EVENT_DEVICE_ADDED: 3017 { 3018 struct mpi3_device_page0 *dev_pg0 = 3019 (struct mpi3_device_page0 *)event_reply->event_data; 3020 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 3021 dprint_event_th(mrioc, 3022 "failed to process device added event for handle(0x%04x),\n" 3023 "perst_id(%d) in the event top half handler\n", 3024 le16_to_cpu(dev_pg0->dev_handle), 3025 le16_to_cpu(dev_pg0->persistent_id)); 3026 else 3027 process_evt_bh = 1; 3028 break; 3029 } 3030 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 3031 { 3032 process_evt_bh = 1; 3033 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 3034 break; 3035 } 3036 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 3037 { 3038 process_evt_bh = 1; 3039 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 3040 break; 3041 } 3042 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 3043 { 3044 process_evt_bh = 1; 3045 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 3046 break; 3047 } 3048 case MPI3_EVENT_PREPARE_FOR_RESET: 3049 { 3050 mpi3mr_preparereset_evt_th(mrioc, event_reply); 3051 ack_req = 0; 3052 break; 3053 } 3054 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 3055 { 3056 mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); 3057 break; 3058 } 3059 case MPI3_EVENT_DEVICE_INFO_CHANGED: 3060 case MPI3_EVENT_LOG_DATA: 3061 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 3062 case MPI3_EVENT_ENCL_DEVICE_ADDED: 3063 { 3064 process_evt_bh = 1; 3065 break; 3066 } 3067 case MPI3_EVENT_ENERGY_PACK_CHANGE: 3068 { 3069 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 3070 break; 3071 } 3072 case MPI3_EVENT_CABLE_MGMT: 3073 { 3074 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 3075 break; 3076 } 3077 case MPI3_EVENT_SAS_DISCOVERY: 3078 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 3079 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 3080 case MPI3_EVENT_PCIE_ENUMERATION: 3081 break; 3082 default: 3083 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 3084 __func__, evt_type); 3085 break; 3086 } 3087 if (process_evt_bh || ack_req) { 3088 dprint_event_th(mrioc, 3089 "scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n", 3090 evt_type, le32_to_cpu(event_reply->event_context), ack_req); 3091 sz = event_reply->event_data_length * 4; 3092 fwevt = mpi3mr_alloc_fwevt(sz); 3093 if (!fwevt) { 3094 dprint_event_th(mrioc, 3095 "failed to schedule bottom half handler for\n" 3096 "event(0x%02x), ack_required=%d\n", evt_type, ack_req); 3097 return; 3098 } 3099 3100 memcpy(fwevt->event_data, event_reply->event_data, sz); 3101 fwevt->mrioc = mrioc; 3102 fwevt->event_id = evt_type; 3103 fwevt->send_ack = ack_req; 3104 fwevt->process_evt = process_evt_bh; 3105 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 3106 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 3107 } 3108 } 3109 3110 /** 3111 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 3112 * @mrioc: Adapter instance reference 3113 * @scmd: SCSI command reference 3114 * @scsiio_req: MPI3 SCSI IO request 3115 * 3116 * Identifies the protection information flags from the SCSI 3117 * command and set appropriate flags in the MPI3 SCSI IO 3118 * request. 3119 * 3120 * Return: Nothing 3121 */ 3122 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 3123 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3124 { 3125 u16 eedp_flags = 0; 3126 unsigned char prot_op = scsi_get_prot_op(scmd); 3127 3128 switch (prot_op) { 3129 case SCSI_PROT_NORMAL: 3130 return; 3131 case SCSI_PROT_READ_STRIP: 3132 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3133 break; 3134 case SCSI_PROT_WRITE_INSERT: 3135 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3136 break; 3137 case SCSI_PROT_READ_INSERT: 3138 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3139 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3140 break; 3141 case SCSI_PROT_WRITE_STRIP: 3142 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3143 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3144 break; 3145 case SCSI_PROT_READ_PASS: 3146 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3147 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3148 break; 3149 case SCSI_PROT_WRITE_PASS: 3150 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 3151 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 3152 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 3153 0xffff; 3154 } else 3155 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3156 3157 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3158 break; 3159 default: 3160 return; 3161 } 3162 3163 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 3164 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 3165 3166 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 3167 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 3168 3169 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3170 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3171 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3172 scsiio_req->cdb.eedp32.primary_reference_tag = 3173 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3174 } 3175 3176 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3177 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3178 3179 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3180 3181 switch (scsi_prot_interval(scmd)) { 3182 case 512: 3183 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3184 break; 3185 case 520: 3186 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3187 break; 3188 case 4080: 3189 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3190 break; 3191 case 4088: 3192 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3193 break; 3194 case 4096: 3195 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3196 break; 3197 case 4104: 3198 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3199 break; 3200 case 4160: 3201 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3202 break; 3203 default: 3204 break; 3205 } 3206 3207 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3208 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3209 } 3210 3211 /** 3212 * mpi3mr_build_sense_buffer - Map sense information 3213 * @desc: Sense type 3214 * @buf: Sense buffer to populate 3215 * @key: Sense key 3216 * @asc: Additional sense code 3217 * @ascq: Additional sense code qualifier 3218 * 3219 * Maps the given sense information into either descriptor or 3220 * fixed format sense data. 3221 * 3222 * Return: Nothing 3223 */ 3224 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3225 u8 asc, u8 ascq) 3226 { 3227 if (desc) { 3228 buf[0] = 0x72; /* descriptor, current */ 3229 buf[1] = key; 3230 buf[2] = asc; 3231 buf[3] = ascq; 3232 buf[7] = 0; 3233 } else { 3234 buf[0] = 0x70; /* fixed, current */ 3235 buf[2] = key; 3236 buf[7] = 0xa; 3237 buf[12] = asc; 3238 buf[13] = ascq; 3239 } 3240 } 3241 3242 /** 3243 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3244 * @scmd: SCSI command reference 3245 * @ioc_status: status of MPI3 request 3246 * 3247 * Maps the EEDP error status of the SCSI IO request to sense 3248 * data. 3249 * 3250 * Return: Nothing 3251 */ 3252 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3253 u16 ioc_status) 3254 { 3255 u8 ascq = 0; 3256 3257 switch (ioc_status) { 3258 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3259 ascq = 0x01; 3260 break; 3261 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3262 ascq = 0x02; 3263 break; 3264 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3265 ascq = 0x03; 3266 break; 3267 default: 3268 ascq = 0x00; 3269 break; 3270 } 3271 3272 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3273 0x10, ascq); 3274 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3275 } 3276 3277 /** 3278 * mpi3mr_process_op_reply_desc - reply descriptor handler 3279 * @mrioc: Adapter instance reference 3280 * @reply_desc: Operational reply descriptor 3281 * @reply_dma: place holder for reply DMA address 3282 * @qidx: Operational queue index 3283 * 3284 * Process the operational reply descriptor and identifies the 3285 * descriptor type. Based on the descriptor map the MPI3 request 3286 * status to a SCSI command status and calls scsi_done call 3287 * back. 3288 * 3289 * Return: Nothing 3290 */ 3291 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3292 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3293 { 3294 u16 reply_desc_type, host_tag = 0; 3295 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3296 u32 ioc_loginfo = 0; 3297 struct mpi3_status_reply_descriptor *status_desc = NULL; 3298 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3299 struct mpi3_success_reply_descriptor *success_desc = NULL; 3300 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3301 struct scsi_cmnd *scmd = NULL; 3302 struct scmd_priv *priv = NULL; 3303 u8 *sense_buf = NULL; 3304 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3305 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3306 u16 dev_handle = 0xFFFF; 3307 struct scsi_sense_hdr sshdr; 3308 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3309 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3310 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3311 struct mpi3mr_throttle_group_info *tg = NULL; 3312 u8 throttle_enabled_dev = 0; 3313 3314 *reply_dma = 0; 3315 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3316 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3317 switch (reply_desc_type) { 3318 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3319 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3320 host_tag = le16_to_cpu(status_desc->host_tag); 3321 ioc_status = le16_to_cpu(status_desc->ioc_status); 3322 if (ioc_status & 3323 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3324 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3325 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3326 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3327 break; 3328 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3329 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3330 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3331 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3332 *reply_dma); 3333 if (!scsi_reply) { 3334 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3335 mrioc->name); 3336 goto out; 3337 } 3338 host_tag = le16_to_cpu(scsi_reply->host_tag); 3339 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3340 scsi_status = scsi_reply->scsi_status; 3341 scsi_state = scsi_reply->scsi_state; 3342 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3343 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3344 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3345 sense_count = le32_to_cpu(scsi_reply->sense_count); 3346 resp_data = le32_to_cpu(scsi_reply->response_data); 3347 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3348 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3349 if (ioc_status & 3350 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3351 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3352 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3353 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3354 panic("%s: Ran out of sense buffers\n", mrioc->name); 3355 if (sense_buf) { 3356 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3357 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 3358 sshdr.asc, sshdr.ascq); 3359 } 3360 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3361 break; 3362 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3363 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3364 host_tag = le16_to_cpu(success_desc->host_tag); 3365 break; 3366 default: 3367 break; 3368 } 3369 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3370 if (!scmd) { 3371 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3372 mrioc->name, host_tag); 3373 goto out; 3374 } 3375 priv = scsi_cmd_priv(scmd); 3376 3377 data_len_blks = scsi_bufflen(scmd) >> 9; 3378 sdev_priv_data = scmd->device->hostdata; 3379 if (sdev_priv_data) { 3380 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3381 if (stgt_priv_data) { 3382 tg = stgt_priv_data->throttle_group; 3383 throttle_enabled_dev = 3384 stgt_priv_data->io_throttle_enabled; 3385 dev_handle = stgt_priv_data->dev_handle; 3386 } 3387 } 3388 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3389 throttle_enabled_dev)) { 3390 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3391 &mrioc->pend_large_data_sz); 3392 if (tg) { 3393 tg_pend_data_len = atomic_sub_return(data_len_blks, 3394 &tg->pend_large_data_sz); 3395 if (tg->io_divert && ((ioc_pend_data_len <= 3396 mrioc->io_throttle_low) && 3397 (tg_pend_data_len <= tg->low))) { 3398 tg->io_divert = 0; 3399 mpi3mr_set_io_divert_for_all_vd_in_tg( 3400 mrioc, tg, 0); 3401 } 3402 } else { 3403 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3404 stgt_priv_data->io_divert = 0; 3405 } 3406 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3407 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3408 if (!tg) { 3409 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3410 stgt_priv_data->io_divert = 0; 3411 3412 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3413 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3414 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3415 tg->io_divert = 0; 3416 mpi3mr_set_io_divert_for_all_vd_in_tg( 3417 mrioc, tg, 0); 3418 } 3419 } 3420 } 3421 3422 if (success_desc) { 3423 scmd->result = DID_OK << 16; 3424 goto out_success; 3425 } 3426 3427 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3428 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3429 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3430 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3431 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3432 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3433 3434 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3435 sense_buf) { 3436 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3437 3438 memcpy(scmd->sense_buffer, sense_buf, sz); 3439 } 3440 3441 switch (ioc_status) { 3442 case MPI3_IOCSTATUS_BUSY: 3443 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3444 scmd->result = SAM_STAT_BUSY; 3445 break; 3446 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3447 scmd->result = DID_NO_CONNECT << 16; 3448 break; 3449 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3450 if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { 3451 /* 3452 * This is a ATA NCQ command aborted due to another NCQ 3453 * command failure. We must retry this command 3454 * immediately but without incrementing its retry 3455 * counter. 3456 */ 3457 WARN_ON_ONCE(xfer_count != 0); 3458 scmd->result = DID_IMM_RETRY << 16; 3459 } else { 3460 scmd->result = DID_SOFT_ERROR << 16; 3461 } 3462 break; 3463 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3464 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3465 scmd->result = DID_RESET << 16; 3466 break; 3467 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3468 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3469 scmd->result = DID_SOFT_ERROR << 16; 3470 else 3471 scmd->result = (DID_OK << 16) | scsi_status; 3472 break; 3473 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3474 scmd->result = (DID_OK << 16) | scsi_status; 3475 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3476 break; 3477 if (xfer_count < scmd->underflow) { 3478 if (scsi_status == SAM_STAT_BUSY) 3479 scmd->result = SAM_STAT_BUSY; 3480 else 3481 scmd->result = DID_SOFT_ERROR << 16; 3482 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3483 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3484 scmd->result = DID_SOFT_ERROR << 16; 3485 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3486 scmd->result = DID_RESET << 16; 3487 break; 3488 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3489 scsi_set_resid(scmd, 0); 3490 fallthrough; 3491 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3492 case MPI3_IOCSTATUS_SUCCESS: 3493 scmd->result = (DID_OK << 16) | scsi_status; 3494 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3495 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3496 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3497 scmd->result = DID_SOFT_ERROR << 16; 3498 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3499 scmd->result = DID_RESET << 16; 3500 break; 3501 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3502 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3503 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3504 mpi3mr_map_eedp_error(scmd, ioc_status); 3505 break; 3506 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3507 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3508 case MPI3_IOCSTATUS_INVALID_SGL: 3509 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3510 case MPI3_IOCSTATUS_INVALID_FIELD: 3511 case MPI3_IOCSTATUS_INVALID_STATE: 3512 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3513 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3514 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3515 default: 3516 scmd->result = DID_SOFT_ERROR << 16; 3517 break; 3518 } 3519 3520 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3521 (scmd->cmnd[0] != ATA_16) && 3522 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3523 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3524 scmd->result); 3525 scsi_print_command(scmd); 3526 ioc_info(mrioc, 3527 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3528 __func__, dev_handle, ioc_status, ioc_loginfo, 3529 priv->req_q_idx + 1); 3530 ioc_info(mrioc, 3531 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3532 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3533 if (sense_buf) { 3534 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3535 ioc_info(mrioc, 3536 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3537 __func__, sense_count, sshdr.sense_key, 3538 sshdr.asc, sshdr.ascq); 3539 } 3540 } 3541 out_success: 3542 if (priv->meta_sg_valid) { 3543 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3544 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3545 } 3546 mpi3mr_clear_scmd_priv(mrioc, scmd); 3547 scsi_dma_unmap(scmd); 3548 scsi_done(scmd); 3549 out: 3550 if (sense_buf) 3551 mpi3mr_repost_sense_buf(mrioc, 3552 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3553 } 3554 3555 /** 3556 * mpi3mr_get_chain_idx - get free chain buffer index 3557 * @mrioc: Adapter instance reference 3558 * 3559 * Try to get a free chain buffer index from the free pool. 3560 * 3561 * Return: -1 on failure or the free chain buffer index 3562 */ 3563 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3564 { 3565 u8 retry_count = 5; 3566 int cmd_idx = -1; 3567 unsigned long flags; 3568 3569 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3570 do { 3571 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3572 mrioc->chain_buf_count); 3573 if (cmd_idx < mrioc->chain_buf_count) { 3574 set_bit(cmd_idx, mrioc->chain_bitmap); 3575 break; 3576 } 3577 cmd_idx = -1; 3578 } while (retry_count--); 3579 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3580 return cmd_idx; 3581 } 3582 3583 /** 3584 * mpi3mr_prepare_sg_scmd - build scatter gather list 3585 * @mrioc: Adapter instance reference 3586 * @scmd: SCSI command reference 3587 * @scsiio_req: MPI3 SCSI IO request 3588 * 3589 * This function maps SCSI command's data and protection SGEs to 3590 * MPI request SGEs. If required additional 4K chain buffer is 3591 * used to send the SGEs. 3592 * 3593 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3594 */ 3595 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3596 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3597 { 3598 dma_addr_t chain_dma; 3599 struct scatterlist *sg_scmd; 3600 void *sg_local, *chain; 3601 u32 chain_length; 3602 int sges_left, chain_idx; 3603 u32 sges_in_segment; 3604 u8 simple_sgl_flags; 3605 u8 simple_sgl_flags_last; 3606 u8 last_chain_sgl_flags; 3607 struct chain_element *chain_req; 3608 struct scmd_priv *priv = NULL; 3609 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3610 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3611 3612 priv = scsi_cmd_priv(scmd); 3613 3614 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3615 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3616 simple_sgl_flags_last = simple_sgl_flags | 3617 MPI3_SGE_FLAGS_END_OF_LIST; 3618 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3619 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3620 3621 if (meta_sg) 3622 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3623 else 3624 sg_local = &scsiio_req->sgl; 3625 3626 if (!scsiio_req->data_length && !meta_sg) { 3627 mpi3mr_build_zero_len_sge(sg_local); 3628 return 0; 3629 } 3630 3631 if (meta_sg) { 3632 sg_scmd = scsi_prot_sglist(scmd); 3633 sges_left = dma_map_sg(&mrioc->pdev->dev, 3634 scsi_prot_sglist(scmd), 3635 scsi_prot_sg_count(scmd), 3636 scmd->sc_data_direction); 3637 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3638 } else { 3639 /* 3640 * Some firmware versions byte-swap the REPORT ZONES command 3641 * reply from ATA-ZAC devices by directly accessing in the host 3642 * buffer. This does not respect the default command DMA 3643 * direction and causes IOMMU page faults on some architectures 3644 * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3645 * Avoid such issue by making the REPORT ZONES buffer mapping 3646 * bi-directional. 3647 */ 3648 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3649 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3650 sg_scmd = scsi_sglist(scmd); 3651 sges_left = scsi_dma_map(scmd); 3652 } 3653 3654 if (sges_left < 0) { 3655 sdev_printk(KERN_ERR, scmd->device, 3656 "scsi_dma_map failed: request for %d bytes!\n", 3657 scsi_bufflen(scmd)); 3658 return -ENOMEM; 3659 } 3660 if (sges_left > mrioc->max_sgl_entries) { 3661 sdev_printk(KERN_ERR, scmd->device, 3662 "scsi_dma_map returned unsupported sge count %d!\n", 3663 sges_left); 3664 return -ENOMEM; 3665 } 3666 3667 sges_in_segment = (mrioc->facts.op_req_sz - 3668 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3669 3670 if (scsiio_req->sgl[0].eedp.flags == 3671 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3672 sg_local += sizeof(struct mpi3_sge_common); 3673 sges_in_segment--; 3674 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3675 } 3676 3677 if (scsiio_req->msg_flags == 3678 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3679 sges_in_segment--; 3680 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3681 } 3682 3683 if (meta_sg) 3684 sges_in_segment = 1; 3685 3686 if (sges_left <= sges_in_segment) 3687 goto fill_in_last_segment; 3688 3689 /* fill in main message segment when there is a chain following */ 3690 while (sges_in_segment > 1) { 3691 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3692 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3693 sg_scmd = sg_next(sg_scmd); 3694 sg_local += sizeof(struct mpi3_sge_common); 3695 sges_left--; 3696 sges_in_segment--; 3697 } 3698 3699 chain_idx = mpi3mr_get_chain_idx(mrioc); 3700 if (chain_idx < 0) 3701 return -1; 3702 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3703 if (meta_sg) 3704 priv->meta_chain_idx = chain_idx; 3705 else 3706 priv->chain_idx = chain_idx; 3707 3708 chain = chain_req->addr; 3709 chain_dma = chain_req->dma_addr; 3710 sges_in_segment = sges_left; 3711 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3712 3713 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3714 chain_length, chain_dma); 3715 3716 sg_local = chain; 3717 3718 fill_in_last_segment: 3719 while (sges_left > 0) { 3720 if (sges_left == 1) 3721 mpi3mr_add_sg_single(sg_local, 3722 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3723 sg_dma_address(sg_scmd)); 3724 else 3725 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3726 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3727 sg_scmd = sg_next(sg_scmd); 3728 sg_local += sizeof(struct mpi3_sge_common); 3729 sges_left--; 3730 } 3731 3732 return 0; 3733 } 3734 3735 /** 3736 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3737 * @mrioc: Adapter instance reference 3738 * @scmd: SCSI command reference 3739 * @scsiio_req: MPI3 SCSI IO request 3740 * 3741 * This function calls mpi3mr_prepare_sg_scmd for constructing 3742 * both data SGEs and protection information SGEs in the MPI 3743 * format from the SCSI Command as appropriate . 3744 * 3745 * Return: return value of mpi3mr_prepare_sg_scmd. 3746 */ 3747 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3748 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3749 { 3750 int ret; 3751 3752 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3753 if (ret) 3754 return ret; 3755 3756 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3757 /* There is a valid meta sg */ 3758 scsiio_req->flags |= 3759 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3760 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3761 } 3762 3763 return ret; 3764 } 3765 3766 /** 3767 * mpi3mr_tm_response_name - get TM response as a string 3768 * @resp_code: TM response code 3769 * 3770 * Convert known task management response code as a readable 3771 * string. 3772 * 3773 * Return: response code string. 3774 */ 3775 static const char *mpi3mr_tm_response_name(u8 resp_code) 3776 { 3777 char *desc; 3778 3779 switch (resp_code) { 3780 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3781 desc = "task management request completed"; 3782 break; 3783 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3784 desc = "invalid frame"; 3785 break; 3786 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3787 desc = "task management request not supported"; 3788 break; 3789 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3790 desc = "task management request failed"; 3791 break; 3792 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3793 desc = "task management request succeeded"; 3794 break; 3795 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3796 desc = "invalid LUN"; 3797 break; 3798 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3799 desc = "overlapped tag attempted"; 3800 break; 3801 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3802 desc = "task queued, however not sent to target"; 3803 break; 3804 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3805 desc = "task management request denied by NVMe device"; 3806 break; 3807 default: 3808 desc = "unknown"; 3809 break; 3810 } 3811 3812 return desc; 3813 } 3814 3815 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3816 { 3817 int i; 3818 int num_of_reply_queues = 3819 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3820 3821 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3822 mpi3mr_process_op_reply_q(mrioc, 3823 mrioc->intr_info[i].op_reply_q); 3824 } 3825 3826 /** 3827 * mpi3mr_issue_tm - Issue Task Management request 3828 * @mrioc: Adapter instance reference 3829 * @tm_type: Task Management type 3830 * @handle: Device handle 3831 * @lun: lun ID 3832 * @htag: Host tag of the TM request 3833 * @timeout: TM timeout value 3834 * @drv_cmd: Internal command tracker 3835 * @resp_code: Response code place holder 3836 * @scmd: SCSI command 3837 * 3838 * Issues a Task Management Request to the controller for a 3839 * specified target, lun and command and wait for its completion 3840 * and check TM response. Recover the TM if it timed out by 3841 * issuing controller reset. 3842 * 3843 * Return: 0 on success, non-zero on errors 3844 */ 3845 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3846 u16 handle, uint lun, u16 htag, ulong timeout, 3847 struct mpi3mr_drv_cmd *drv_cmd, 3848 u8 *resp_code, struct scsi_cmnd *scmd) 3849 { 3850 struct mpi3_scsi_task_mgmt_request tm_req; 3851 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3852 int retval = 0; 3853 struct mpi3mr_tgt_dev *tgtdev = NULL; 3854 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3855 struct scmd_priv *cmd_priv = NULL; 3856 struct scsi_device *sdev = NULL; 3857 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3858 3859 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3860 __func__, tm_type, handle); 3861 if (mrioc->unrecoverable) { 3862 retval = -1; 3863 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3864 __func__); 3865 goto out; 3866 } 3867 3868 memset(&tm_req, 0, sizeof(tm_req)); 3869 mutex_lock(&drv_cmd->mutex); 3870 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3871 retval = -1; 3872 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3873 mutex_unlock(&drv_cmd->mutex); 3874 goto out; 3875 } 3876 if (mrioc->reset_in_progress) { 3877 retval = -1; 3878 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3879 mutex_unlock(&drv_cmd->mutex); 3880 goto out; 3881 } 3882 if (mrioc->block_on_pci_err) { 3883 retval = -1; 3884 dprint_tm(mrioc, "sending task management failed due to\n" 3885 "pci error recovery in progress\n"); 3886 mutex_unlock(&drv_cmd->mutex); 3887 goto out; 3888 } 3889 3890 drv_cmd->state = MPI3MR_CMD_PENDING; 3891 drv_cmd->is_waiting = 1; 3892 drv_cmd->callback = NULL; 3893 tm_req.dev_handle = cpu_to_le16(handle); 3894 tm_req.task_type = tm_type; 3895 tm_req.host_tag = cpu_to_le16(htag); 3896 3897 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3898 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3899 3900 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3901 3902 if (scmd) { 3903 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 3904 cmd_priv = scsi_cmd_priv(scmd); 3905 if (!cmd_priv) 3906 goto out_unlock; 3907 3908 struct op_req_qinfo *op_req_q; 3909 3910 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; 3911 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); 3912 tm_req.task_request_queue_id = 3913 cpu_to_le16(op_req_q->qid); 3914 } 3915 sdev = scmd->device; 3916 sdev_priv_data = sdev->hostdata; 3917 scsi_tgt_priv_data = ((sdev_priv_data) ? 3918 sdev_priv_data->tgt_priv_data : NULL); 3919 } else { 3920 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3921 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3922 tgtdev->starget->hostdata; 3923 } 3924 3925 if (scsi_tgt_priv_data) 3926 atomic_inc(&scsi_tgt_priv_data->block_io); 3927 3928 if (tgtdev) { 3929 if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 3930 timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to 3931 : tgtdev->dev_spec.pcie_inf.reset_to; 3932 else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD) 3933 timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to 3934 : tgtdev->dev_spec.vd_inf.reset_to; 3935 } 3936 3937 init_completion(&drv_cmd->done); 3938 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3939 if (retval) { 3940 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3941 goto out_unlock; 3942 } 3943 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3944 3945 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3946 drv_cmd->is_waiting = 0; 3947 retval = -1; 3948 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3949 dprint_tm(mrioc, 3950 "task management request timed out after %ld seconds\n", 3951 timeout); 3952 if (mrioc->logging_level & MPI3_DEBUG_TM) 3953 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3954 mpi3mr_soft_reset_handler(mrioc, 3955 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3956 } 3957 goto out_unlock; 3958 } 3959 3960 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3961 dprint_tm(mrioc, "invalid task management reply message\n"); 3962 retval = -1; 3963 goto out_unlock; 3964 } 3965 3966 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3967 3968 switch (drv_cmd->ioc_status) { 3969 case MPI3_IOCSTATUS_SUCCESS: 3970 *resp_code = le32_to_cpu(tm_reply->response_data) & 3971 MPI3MR_RI_MASK_RESPCODE; 3972 break; 3973 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3974 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3975 break; 3976 default: 3977 dprint_tm(mrioc, 3978 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3979 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3980 retval = -1; 3981 goto out_unlock; 3982 } 3983 3984 switch (*resp_code) { 3985 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3986 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3987 break; 3988 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3989 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3990 retval = -1; 3991 break; 3992 default: 3993 retval = -1; 3994 break; 3995 } 3996 3997 dprint_tm(mrioc, 3998 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3999 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 4000 le32_to_cpu(tm_reply->termination_count), 4001 mpi3mr_tm_response_name(*resp_code), *resp_code); 4002 4003 if (!retval) { 4004 mpi3mr_ioc_disable_intr(mrioc); 4005 mpi3mr_poll_pend_io_completions(mrioc); 4006 mpi3mr_ioc_enable_intr(mrioc); 4007 mpi3mr_poll_pend_io_completions(mrioc); 4008 mpi3mr_process_admin_reply_q(mrioc); 4009 } 4010 switch (tm_type) { 4011 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4012 if (!scsi_tgt_priv_data) 4013 break; 4014 scsi_tgt_priv_data->pend_count = 0; 4015 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 4016 mpi3mr_count_tgt_pending, 4017 (void *)scsi_tgt_priv_data->starget); 4018 break; 4019 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 4020 if (!sdev_priv_data) 4021 break; 4022 sdev_priv_data->pend_count = 0; 4023 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 4024 mpi3mr_count_dev_pending, (void *)sdev); 4025 break; 4026 default: 4027 break; 4028 } 4029 mpi3mr_global_trigger(mrioc, 4030 MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); 4031 4032 out_unlock: 4033 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4034 mutex_unlock(&drv_cmd->mutex); 4035 if (scsi_tgt_priv_data) 4036 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 4037 if (tgtdev) 4038 mpi3mr_tgtdev_put(tgtdev); 4039 out: 4040 return retval; 4041 } 4042 4043 /** 4044 * mpi3mr_bios_param - BIOS param callback 4045 * @sdev: SCSI device reference 4046 * @unused: gendisk reference 4047 * @capacity: Capacity in logical sectors 4048 * @params: Parameter array 4049 * 4050 * Just the parameters with heads/secots/cylinders. 4051 * 4052 * Return: 0 always 4053 */ 4054 static int mpi3mr_bios_param(struct scsi_device *sdev, 4055 struct gendisk *unused, sector_t capacity, int params[]) 4056 { 4057 int heads; 4058 int sectors; 4059 sector_t cylinders; 4060 ulong dummy; 4061 4062 heads = 64; 4063 sectors = 32; 4064 4065 dummy = heads * sectors; 4066 cylinders = capacity; 4067 sector_div(cylinders, dummy); 4068 4069 if ((ulong)capacity >= 0x200000) { 4070 heads = 255; 4071 sectors = 63; 4072 dummy = heads * sectors; 4073 cylinders = capacity; 4074 sector_div(cylinders, dummy); 4075 } 4076 4077 params[0] = heads; 4078 params[1] = sectors; 4079 params[2] = cylinders; 4080 return 0; 4081 } 4082 4083 /** 4084 * mpi3mr_map_queues - Map queues callback handler 4085 * @shost: SCSI host reference 4086 * 4087 * Maps default and poll queues. 4088 * 4089 * Return: return zero. 4090 */ 4091 static void mpi3mr_map_queues(struct Scsi_Host *shost) 4092 { 4093 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4094 int i, qoff, offset; 4095 struct blk_mq_queue_map *map = NULL; 4096 4097 offset = mrioc->op_reply_q_offset; 4098 4099 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 4100 map = &shost->tag_set.map[i]; 4101 4102 map->nr_queues = 0; 4103 4104 if (i == HCTX_TYPE_DEFAULT) 4105 map->nr_queues = mrioc->default_qcount; 4106 else if (i == HCTX_TYPE_POLL) 4107 map->nr_queues = mrioc->active_poll_qcount; 4108 4109 if (!map->nr_queues) { 4110 BUG_ON(i == HCTX_TYPE_DEFAULT); 4111 continue; 4112 } 4113 4114 /* 4115 * The poll queue(s) doesn't have an IRQ (and hence IRQ 4116 * affinity), so use the regular blk-mq cpu mapping 4117 */ 4118 map->queue_offset = qoff; 4119 if (i != HCTX_TYPE_POLL) 4120 blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); 4121 else 4122 blk_mq_map_queues(map); 4123 4124 qoff += map->nr_queues; 4125 offset += map->nr_queues; 4126 } 4127 } 4128 4129 /** 4130 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 4131 * @mrioc: Adapter instance reference 4132 * 4133 * Calculate the pending I/Os for the controller and return. 4134 * 4135 * Return: Number of pending I/Os 4136 */ 4137 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 4138 { 4139 u16 i; 4140 uint pend_ios = 0; 4141 4142 for (i = 0; i < mrioc->num_op_reply_q; i++) 4143 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 4144 return pend_ios; 4145 } 4146 4147 /** 4148 * mpi3mr_print_pending_host_io - print pending I/Os 4149 * @mrioc: Adapter instance reference 4150 * 4151 * Print number of pending I/Os and each I/O details prior to 4152 * reset for debug purpose. 4153 * 4154 * Return: Nothing 4155 */ 4156 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 4157 { 4158 struct Scsi_Host *shost = mrioc->shost; 4159 4160 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 4161 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 4162 blk_mq_tagset_busy_iter(&shost->tag_set, 4163 mpi3mr_print_scmd, (void *)mrioc); 4164 } 4165 4166 /** 4167 * mpi3mr_wait_for_host_io - block for I/Os to complete 4168 * @mrioc: Adapter instance reference 4169 * @timeout: time out in seconds 4170 * Waits for pending I/Os for the given adapter to complete or 4171 * to hit the timeout. 4172 * 4173 * Return: Nothing 4174 */ 4175 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 4176 { 4177 enum mpi3mr_iocstate iocstate; 4178 int i = 0; 4179 4180 iocstate = mpi3mr_get_iocstate(mrioc); 4181 if (iocstate != MRIOC_STATE_READY) 4182 return; 4183 4184 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4185 return; 4186 ioc_info(mrioc, 4187 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 4188 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 4189 4190 for (i = 0; i < timeout; i++) { 4191 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4192 break; 4193 iocstate = mpi3mr_get_iocstate(mrioc); 4194 if (iocstate != MRIOC_STATE_READY) 4195 break; 4196 msleep(1000); 4197 } 4198 4199 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 4200 mpi3mr_get_fw_pending_ios(mrioc)); 4201 } 4202 4203 /** 4204 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 4205 * @mrioc: Adapter instance reference 4206 * @scmd: SCSI command reference 4207 * @scsiio_req: MPI3 SCSI IO request 4208 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 4209 * @wslen: write same max length 4210 * 4211 * Gets values of unmap, ndob and number of blocks from write 4212 * same scsi io and based on these values it sets divert IO flag 4213 * and reason for diverting IO to firmware. 4214 * 4215 * Return: Nothing 4216 */ 4217 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 4218 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 4219 u32 *scsiio_flags, u16 wslen) 4220 { 4221 u8 unmap = 0, ndob = 0; 4222 u8 opcode = scmd->cmnd[0]; 4223 u32 num_blocks = 0; 4224 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4225 4226 if (opcode == WRITE_SAME_16) { 4227 unmap = scmd->cmnd[1] & 0x08; 4228 ndob = scmd->cmnd[1] & 0x01; 4229 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4230 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4231 unmap = scmd->cmnd[10] & 0x08; 4232 ndob = scmd->cmnd[10] & 0x01; 4233 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4234 } else 4235 return; 4236 4237 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4238 scsiio_req->msg_flags |= 4239 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4240 *scsiio_flags |= 4241 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4242 } 4243 } 4244 4245 /** 4246 * mpi3mr_eh_host_reset - Host reset error handling callback 4247 * @scmd: SCSI command reference 4248 * 4249 * Issue controller reset 4250 * 4251 * Return: SUCCESS of successful reset else FAILED 4252 */ 4253 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4254 { 4255 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4256 int retval = FAILED, ret; 4257 4258 ret = mpi3mr_soft_reset_handler(mrioc, 4259 MPI3MR_RESET_FROM_EH_HOS, 1); 4260 if (ret) 4261 goto out; 4262 4263 retval = SUCCESS; 4264 out: 4265 sdev_printk(KERN_INFO, scmd->device, 4266 "Host reset is %s for scmd(%p)\n", 4267 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4268 4269 return retval; 4270 } 4271 4272 /** 4273 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4274 * @scmd: SCSI command reference 4275 * 4276 * Checks whether pending I/Os are present for the RAID volume; 4277 * if not there's no need to reset the adapter. 4278 * 4279 * Return: SUCCESS of successful reset else FAILED 4280 */ 4281 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4282 { 4283 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4284 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4285 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4286 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4287 int retval = FAILED; 4288 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 4289 4290 sdev_priv_data = scmd->device->hostdata; 4291 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4292 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4293 dev_type = stgt_priv_data->dev_type; 4294 } 4295 4296 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4297 mpi3mr_wait_for_host_io(mrioc, 4298 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4299 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4300 while (mrioc->reset_in_progress || 4301 mrioc->prepare_for_reset || 4302 mrioc->block_on_pci_err) { 4303 ssleep(1); 4304 if (!timeout--) { 4305 retval = FAILED; 4306 goto out; 4307 } 4308 } 4309 retval = SUCCESS; 4310 goto out; 4311 } 4312 } 4313 if (retval == FAILED) 4314 mpi3mr_print_pending_host_io(mrioc); 4315 4316 out: 4317 sdev_printk(KERN_INFO, scmd->device, 4318 "Bus reset is %s for scmd(%p)\n", 4319 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4320 return retval; 4321 } 4322 4323 /** 4324 * mpi3mr_eh_target_reset - Target reset error handling callback 4325 * @scmd: SCSI command reference 4326 * 4327 * Issue Target reset Task Management and verify the scmd is 4328 * terminated successfully and return status accordingly. 4329 * 4330 * Return: SUCCESS of successful termination of the scmd else 4331 * FAILED 4332 */ 4333 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4334 { 4335 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4336 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4337 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4338 u16 dev_handle; 4339 u8 resp_code = 0; 4340 int retval = FAILED, ret = 0; 4341 4342 sdev_printk(KERN_INFO, scmd->device, 4343 "Attempting Target Reset! scmd(%p)\n", scmd); 4344 scsi_print_command(scmd); 4345 4346 sdev_priv_data = scmd->device->hostdata; 4347 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4348 sdev_printk(KERN_INFO, scmd->device, 4349 "SCSI device is not available\n"); 4350 retval = SUCCESS; 4351 goto out; 4352 } 4353 4354 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4355 dev_handle = stgt_priv_data->dev_handle; 4356 if (stgt_priv_data->dev_removed) { 4357 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4358 sdev_printk(KERN_INFO, scmd->device, 4359 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4360 mrioc->name, dev_handle); 4361 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4362 retval = SUCCESS; 4363 else 4364 retval = FAILED; 4365 goto out; 4366 } 4367 sdev_printk(KERN_INFO, scmd->device, 4368 "Target Reset is issued to handle(0x%04x)\n", 4369 dev_handle); 4370 4371 ret = mpi3mr_issue_tm(mrioc, 4372 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4373 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4374 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4375 4376 if (ret) 4377 goto out; 4378 4379 if (stgt_priv_data->pend_count) { 4380 sdev_printk(KERN_INFO, scmd->device, 4381 "%s: target has %d pending commands, target reset is failed\n", 4382 mrioc->name, stgt_priv_data->pend_count); 4383 goto out; 4384 } 4385 4386 retval = SUCCESS; 4387 out: 4388 sdev_printk(KERN_INFO, scmd->device, 4389 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4390 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4391 4392 return retval; 4393 } 4394 4395 /** 4396 * mpi3mr_eh_dev_reset- Device reset error handling callback 4397 * @scmd: SCSI command reference 4398 * 4399 * Issue lun reset Task Management and verify the scmd is 4400 * terminated successfully and return status accordingly. 4401 * 4402 * Return: SUCCESS of successful termination of the scmd else 4403 * FAILED 4404 */ 4405 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4406 { 4407 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4408 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4409 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4410 u16 dev_handle; 4411 u8 resp_code = 0; 4412 int retval = FAILED, ret = 0; 4413 4414 sdev_printk(KERN_INFO, scmd->device, 4415 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4416 scsi_print_command(scmd); 4417 4418 sdev_priv_data = scmd->device->hostdata; 4419 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4420 sdev_printk(KERN_INFO, scmd->device, 4421 "SCSI device is not available\n"); 4422 retval = SUCCESS; 4423 goto out; 4424 } 4425 4426 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4427 dev_handle = stgt_priv_data->dev_handle; 4428 if (stgt_priv_data->dev_removed) { 4429 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4430 sdev_printk(KERN_INFO, scmd->device, 4431 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4432 mrioc->name, dev_handle); 4433 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4434 retval = SUCCESS; 4435 else 4436 retval = FAILED; 4437 goto out; 4438 } 4439 sdev_printk(KERN_INFO, scmd->device, 4440 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4441 4442 ret = mpi3mr_issue_tm(mrioc, 4443 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4444 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4445 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4446 4447 if (ret) 4448 goto out; 4449 4450 if (sdev_priv_data->pend_count) { 4451 sdev_printk(KERN_INFO, scmd->device, 4452 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4453 mrioc->name, sdev_priv_data->pend_count); 4454 goto out; 4455 } 4456 retval = SUCCESS; 4457 out: 4458 sdev_printk(KERN_INFO, scmd->device, 4459 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4460 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4461 4462 return retval; 4463 } 4464 4465 /** 4466 * mpi3mr_eh_abort - Callback function for abort error handling 4467 * @scmd: SCSI command reference 4468 * 4469 * Issues Abort Task Management if the command is in LLD scope 4470 * and verifies if it is aborted successfully, and return status 4471 * accordingly. 4472 * 4473 * Return: SUCCESS if the abort was successful, otherwise FAILED 4474 */ 4475 static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) 4476 { 4477 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4478 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4479 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4480 struct scmd_priv *cmd_priv; 4481 u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; 4482 u8 resp_code = 0; 4483 int retval = FAILED, ret = 0; 4484 struct request *rq = scsi_cmd_to_rq(scmd); 4485 unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); 4486 unsigned long scmd_age_sec = scmd_age_ms / HZ; 4487 4488 sdev_printk(KERN_INFO, scmd->device, 4489 "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); 4490 4491 sdev_printk(KERN_INFO, scmd->device, 4492 "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", 4493 mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, 4494 scmd->retries, scmd->allowed); 4495 4496 scsi_print_command(scmd); 4497 4498 sdev_priv_data = scmd->device->hostdata; 4499 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4500 sdev_printk(KERN_INFO, scmd->device, 4501 "%s: Device not available, Skip issuing abort task\n", 4502 mrioc->name); 4503 retval = SUCCESS; 4504 goto out; 4505 } 4506 4507 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4508 dev_handle = stgt_priv_data->dev_handle; 4509 4510 cmd_priv = scsi_cmd_priv(scmd); 4511 if (!cmd_priv->in_lld_scope || 4512 cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { 4513 sdev_printk(KERN_INFO, scmd->device, 4514 "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", 4515 mrioc->name, scmd); 4516 retval = SUCCESS; 4517 goto out; 4518 } 4519 4520 if (stgt_priv_data->dev_removed) { 4521 sdev_printk(KERN_INFO, scmd->device, 4522 "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", 4523 mrioc->name, dev_handle); 4524 retval = FAILED; 4525 goto out; 4526 } 4527 4528 ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4529 dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4530 timeout, &mrioc->host_tm_cmds, &resp_code, scmd); 4531 4532 if (ret) 4533 goto out; 4534 4535 if (cmd_priv->in_lld_scope) { 4536 sdev_printk(KERN_INFO, scmd->device, 4537 "%s: Abort task failed. scmd (0x%p) was not terminated\n", 4538 mrioc->name, scmd); 4539 goto out; 4540 } 4541 4542 retval = SUCCESS; 4543 out: 4544 sdev_printk(KERN_INFO, scmd->device, 4545 "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, 4546 ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); 4547 4548 return retval; 4549 } 4550 4551 /** 4552 * mpi3mr_scan_start - Scan start callback handler 4553 * @shost: SCSI host reference 4554 * 4555 * Issue port enable request asynchronously. 4556 * 4557 * Return: Nothing 4558 */ 4559 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4560 { 4561 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4562 4563 mrioc->scan_started = 1; 4564 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4565 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4566 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4567 mrioc->scan_started = 0; 4568 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4569 } 4570 } 4571 4572 /** 4573 * mpi3mr_scan_finished - Scan finished callback handler 4574 * @shost: SCSI host reference 4575 * @time: Jiffies from the scan start 4576 * 4577 * Checks whether the port enable is completed or timedout or 4578 * failed and set the scan status accordingly after taking any 4579 * recovery if required. 4580 * 4581 * Return: 1 on scan finished or timed out, 0 for in progress 4582 */ 4583 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4584 unsigned long time) 4585 { 4586 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4587 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4588 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4589 4590 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4591 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4592 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4593 mpi3mr_print_fault_info(mrioc); 4594 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4595 mrioc->scan_started = 0; 4596 mrioc->init_cmds.is_waiting = 0; 4597 mrioc->init_cmds.callback = NULL; 4598 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4599 } 4600 4601 if (time >= (pe_timeout * HZ)) { 4602 ioc_err(mrioc, "port enable failed due to time out\n"); 4603 mpi3mr_check_rh_fault_ioc(mrioc, 4604 MPI3MR_RESET_FROM_PE_TIMEOUT); 4605 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4606 mrioc->scan_started = 0; 4607 mrioc->init_cmds.is_waiting = 0; 4608 mrioc->init_cmds.callback = NULL; 4609 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4610 } 4611 4612 if (mrioc->scan_started) 4613 return 0; 4614 4615 if (mrioc->scan_failed) { 4616 ioc_err(mrioc, 4617 "port enable failed with status=0x%04x\n", 4618 mrioc->scan_failed); 4619 } else 4620 ioc_info(mrioc, "port enable is successfully completed\n"); 4621 4622 mpi3mr_start_watchdog(mrioc); 4623 mrioc->is_driver_loading = 0; 4624 mrioc->stop_bsgs = 0; 4625 return 1; 4626 } 4627 4628 /** 4629 * mpi3mr_sdev_destroy - Slave destroy callback handler 4630 * @sdev: SCSI device reference 4631 * 4632 * Cleanup and free per device(lun) private data. 4633 * 4634 * Return: Nothing. 4635 */ 4636 static void mpi3mr_sdev_destroy(struct scsi_device *sdev) 4637 { 4638 struct Scsi_Host *shost; 4639 struct mpi3mr_ioc *mrioc; 4640 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4641 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4642 unsigned long flags; 4643 struct scsi_target *starget; 4644 struct sas_rphy *rphy = NULL; 4645 4646 if (!sdev->hostdata) 4647 return; 4648 4649 starget = scsi_target(sdev); 4650 shost = dev_to_shost(&starget->dev); 4651 mrioc = shost_priv(shost); 4652 scsi_tgt_priv_data = starget->hostdata; 4653 4654 scsi_tgt_priv_data->num_luns--; 4655 4656 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4657 if (starget->channel == mrioc->scsi_device_channel) 4658 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4659 else if (mrioc->sas_transport_enabled && !starget->channel) { 4660 rphy = dev_to_rphy(starget->dev.parent); 4661 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4662 rphy->identify.sas_address, rphy); 4663 } 4664 4665 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4666 tgt_dev->starget = NULL; 4667 if (tgt_dev) 4668 mpi3mr_tgtdev_put(tgt_dev); 4669 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4670 4671 kfree(sdev->hostdata); 4672 sdev->hostdata = NULL; 4673 } 4674 4675 /** 4676 * mpi3mr_target_destroy - Target destroy callback handler 4677 * @starget: SCSI target reference 4678 * 4679 * Cleanup and free per target private data. 4680 * 4681 * Return: Nothing. 4682 */ 4683 static void mpi3mr_target_destroy(struct scsi_target *starget) 4684 { 4685 struct Scsi_Host *shost; 4686 struct mpi3mr_ioc *mrioc; 4687 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4688 struct mpi3mr_tgt_dev *tgt_dev; 4689 unsigned long flags; 4690 4691 if (!starget->hostdata) 4692 return; 4693 4694 shost = dev_to_shost(&starget->dev); 4695 mrioc = shost_priv(shost); 4696 scsi_tgt_priv_data = starget->hostdata; 4697 4698 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4699 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4700 if (tgt_dev && (tgt_dev->starget == starget) && 4701 (tgt_dev->perst_id == starget->id)) 4702 tgt_dev->starget = NULL; 4703 if (tgt_dev) { 4704 scsi_tgt_priv_data->tgt_dev = NULL; 4705 scsi_tgt_priv_data->perst_id = 0; 4706 mpi3mr_tgtdev_put(tgt_dev); 4707 mpi3mr_tgtdev_put(tgt_dev); 4708 } 4709 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4710 4711 kfree(starget->hostdata); 4712 starget->hostdata = NULL; 4713 } 4714 4715 /** 4716 * mpi3mr_sdev_configure - Slave configure callback handler 4717 * @sdev: SCSI device reference 4718 * @lim: queue limits 4719 * 4720 * Configure queue depth, max hardware sectors and virt boundary 4721 * as required 4722 * 4723 * Return: 0 always. 4724 */ 4725 static int mpi3mr_sdev_configure(struct scsi_device *sdev, 4726 struct queue_limits *lim) 4727 { 4728 struct scsi_target *starget; 4729 struct Scsi_Host *shost; 4730 struct mpi3mr_ioc *mrioc; 4731 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4732 unsigned long flags; 4733 int retval = 0; 4734 struct sas_rphy *rphy = NULL; 4735 4736 starget = scsi_target(sdev); 4737 shost = dev_to_shost(&starget->dev); 4738 mrioc = shost_priv(shost); 4739 4740 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4741 if (starget->channel == mrioc->scsi_device_channel) 4742 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4743 else if (mrioc->sas_transport_enabled && !starget->channel) { 4744 rphy = dev_to_rphy(starget->dev.parent); 4745 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4746 rphy->identify.sas_address, rphy); 4747 } 4748 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4749 if (!tgt_dev) 4750 return -ENXIO; 4751 4752 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4753 4754 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4755 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4756 4757 mpi3mr_configure_tgt_dev(tgt_dev, lim); 4758 mpi3mr_tgtdev_put(tgt_dev); 4759 return retval; 4760 } 4761 4762 /** 4763 * mpi3mr_sdev_init -Slave alloc callback handler 4764 * @sdev: SCSI device reference 4765 * 4766 * Allocate per device(lun) private data and initialize it. 4767 * 4768 * Return: 0 on success -ENOMEM on memory allocation failure. 4769 */ 4770 static int mpi3mr_sdev_init(struct scsi_device *sdev) 4771 { 4772 struct Scsi_Host *shost; 4773 struct mpi3mr_ioc *mrioc; 4774 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4775 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4776 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4777 unsigned long flags; 4778 struct scsi_target *starget; 4779 int retval = 0; 4780 struct sas_rphy *rphy = NULL; 4781 4782 starget = scsi_target(sdev); 4783 shost = dev_to_shost(&starget->dev); 4784 mrioc = shost_priv(shost); 4785 scsi_tgt_priv_data = starget->hostdata; 4786 4787 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4788 4789 if (starget->channel == mrioc->scsi_device_channel) 4790 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4791 else if (mrioc->sas_transport_enabled && !starget->channel) { 4792 rphy = dev_to_rphy(starget->dev.parent); 4793 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4794 rphy->identify.sas_address, rphy); 4795 } 4796 4797 if (tgt_dev) { 4798 if (tgt_dev->starget == NULL) 4799 tgt_dev->starget = starget; 4800 mpi3mr_tgtdev_put(tgt_dev); 4801 retval = 0; 4802 } else { 4803 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4804 return -ENXIO; 4805 } 4806 4807 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4808 4809 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4810 if (!scsi_dev_priv_data) 4811 return -ENOMEM; 4812 4813 scsi_dev_priv_data->lun_id = sdev->lun; 4814 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4815 sdev->hostdata = scsi_dev_priv_data; 4816 4817 scsi_tgt_priv_data->num_luns++; 4818 4819 return retval; 4820 } 4821 4822 /** 4823 * mpi3mr_target_alloc - Target alloc callback handler 4824 * @starget: SCSI target reference 4825 * 4826 * Allocate per target private data and initialize it. 4827 * 4828 * Return: 0 on success -ENOMEM on memory allocation failure. 4829 */ 4830 static int mpi3mr_target_alloc(struct scsi_target *starget) 4831 { 4832 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4833 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4834 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4835 struct mpi3mr_tgt_dev *tgt_dev; 4836 unsigned long flags; 4837 int retval = 0; 4838 struct sas_rphy *rphy = NULL; 4839 4840 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4841 if (!scsi_tgt_priv_data) 4842 return -ENOMEM; 4843 4844 starget->hostdata = scsi_tgt_priv_data; 4845 4846 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4847 if (starget->channel == mrioc->scsi_device_channel) { 4848 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4849 if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) { 4850 scsi_tgt_priv_data->starget = starget; 4851 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4852 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4853 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4854 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4855 tgt_dev->starget = starget; 4856 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4857 retval = 0; 4858 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4859 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4860 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4861 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4862 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4863 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4864 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4865 scsi_tgt_priv_data->dev_nvme_dif = 1; 4866 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4867 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4868 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4869 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4870 } else 4871 retval = -ENXIO; 4872 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4873 rphy = dev_to_rphy(starget->dev.parent); 4874 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4875 rphy->identify.sas_address, rphy); 4876 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4877 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4878 scsi_tgt_priv_data->starget = starget; 4879 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4880 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4881 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4882 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4883 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4884 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4885 tgt_dev->starget = starget; 4886 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4887 retval = 0; 4888 } else 4889 retval = -ENXIO; 4890 } 4891 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4892 4893 return retval; 4894 } 4895 4896 /** 4897 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4898 * @mrioc: Adapter instance reference 4899 * @scmd: SCSI Command reference 4900 * 4901 * The controller hardware cannot handle certain unmap commands 4902 * for NVMe drives, this routine checks those and return true 4903 * and completes the SCSI command with proper status and sense 4904 * data. 4905 * 4906 * Return: TRUE for not allowed unmap, FALSE otherwise. 4907 */ 4908 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4909 struct scsi_cmnd *scmd) 4910 { 4911 unsigned char *buf; 4912 u16 param_len, desc_len, trunc_param_len; 4913 4914 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4915 4916 if (mrioc->pdev->revision) { 4917 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4918 trunc_param_len -= (param_len - 8) & 0xF; 4919 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4920 dprint_scsi_err(mrioc, 4921 "truncating param_len from (%d) to (%d)\n", 4922 param_len, trunc_param_len); 4923 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4924 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4925 } 4926 return false; 4927 } 4928 4929 if (!param_len) { 4930 ioc_warn(mrioc, 4931 "%s: cdb received with zero parameter length\n", 4932 __func__); 4933 scsi_print_command(scmd); 4934 scmd->result = DID_OK << 16; 4935 scsi_done(scmd); 4936 return true; 4937 } 4938 4939 if (param_len < 24) { 4940 ioc_warn(mrioc, 4941 "%s: cdb received with invalid param_len: %d\n", 4942 __func__, param_len); 4943 scsi_print_command(scmd); 4944 scmd->result = SAM_STAT_CHECK_CONDITION; 4945 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4946 0x1A, 0); 4947 scsi_done(scmd); 4948 return true; 4949 } 4950 if (param_len != scsi_bufflen(scmd)) { 4951 ioc_warn(mrioc, 4952 "%s: cdb received with param_len: %d bufflen: %d\n", 4953 __func__, param_len, scsi_bufflen(scmd)); 4954 scsi_print_command(scmd); 4955 scmd->result = SAM_STAT_CHECK_CONDITION; 4956 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4957 0x1A, 0); 4958 scsi_done(scmd); 4959 return true; 4960 } 4961 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4962 if (!buf) { 4963 scsi_print_command(scmd); 4964 scmd->result = SAM_STAT_CHECK_CONDITION; 4965 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4966 0x55, 0x03); 4967 scsi_done(scmd); 4968 return true; 4969 } 4970 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4971 desc_len = get_unaligned_be16(&buf[2]); 4972 4973 if (desc_len < 16) { 4974 ioc_warn(mrioc, 4975 "%s: Invalid descriptor length in param list: %d\n", 4976 __func__, desc_len); 4977 scsi_print_command(scmd); 4978 scmd->result = SAM_STAT_CHECK_CONDITION; 4979 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4980 0x26, 0); 4981 scsi_done(scmd); 4982 kfree(buf); 4983 return true; 4984 } 4985 4986 if (param_len > (desc_len + 8)) { 4987 trunc_param_len = desc_len + 8; 4988 scsi_print_command(scmd); 4989 dprint_scsi_err(mrioc, 4990 "truncating param_len(%d) to desc_len+8(%d)\n", 4991 param_len, trunc_param_len); 4992 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4993 scsi_print_command(scmd); 4994 } 4995 4996 kfree(buf); 4997 return false; 4998 } 4999 5000 /** 5001 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 5002 * @scmd: SCSI Command reference 5003 * 5004 * Checks whether a cdb is allowed during shutdown or not. 5005 * 5006 * Return: TRUE for allowed commands, FALSE otherwise. 5007 */ 5008 5009 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 5010 { 5011 switch (scmd->cmnd[0]) { 5012 case SYNCHRONIZE_CACHE: 5013 case START_STOP: 5014 return true; 5015 default: 5016 return false; 5017 } 5018 } 5019 5020 /** 5021 * mpi3mr_qcmd - I/O request despatcher 5022 * @shost: SCSI Host reference 5023 * @scmd: SCSI Command reference 5024 * 5025 * Issues the SCSI Command as an MPI3 request. 5026 * 5027 * Return: 0 on successful queueing of the request or if the 5028 * request is completed with failure. 5029 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 5030 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 5031 */ 5032 static int mpi3mr_qcmd(struct Scsi_Host *shost, 5033 struct scsi_cmnd *scmd) 5034 { 5035 struct mpi3mr_ioc *mrioc = shost_priv(shost); 5036 struct mpi3mr_stgt_priv_data *stgt_priv_data; 5037 struct mpi3mr_sdev_priv_data *sdev_priv_data; 5038 struct scmd_priv *scmd_priv_data = NULL; 5039 struct mpi3_scsi_io_request *scsiio_req = NULL; 5040 struct op_req_qinfo *op_req_q = NULL; 5041 int retval = 0; 5042 u16 dev_handle; 5043 u16 host_tag; 5044 u32 scsiio_flags = 0, data_len_blks = 0; 5045 struct request *rq = scsi_cmd_to_rq(scmd); 5046 int iprio_class; 5047 u8 is_pcie_dev = 0; 5048 u32 tracked_io_sz = 0; 5049 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 5050 struct mpi3mr_throttle_group_info *tg = NULL; 5051 5052 if (mrioc->unrecoverable) { 5053 scmd->result = DID_ERROR << 16; 5054 scsi_done(scmd); 5055 goto out; 5056 } 5057 5058 sdev_priv_data = scmd->device->hostdata; 5059 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 5060 scmd->result = DID_NO_CONNECT << 16; 5061 scsi_done(scmd); 5062 goto out; 5063 } 5064 5065 if (mrioc->stop_drv_processing && 5066 !(mpi3mr_allow_scmd_to_fw(scmd))) { 5067 scmd->result = DID_NO_CONNECT << 16; 5068 scsi_done(scmd); 5069 goto out; 5070 } 5071 5072 stgt_priv_data = sdev_priv_data->tgt_priv_data; 5073 dev_handle = stgt_priv_data->dev_handle; 5074 5075 /* Avoid error handling escalation when device is removed or blocked */ 5076 5077 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5078 scmd->cmnd[0] == TEST_UNIT_READY && 5079 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 5080 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5081 scsi_done(scmd); 5082 goto out; 5083 } 5084 5085 if (mrioc->reset_in_progress || mrioc->prepare_for_reset 5086 || mrioc->block_on_pci_err) { 5087 retval = SCSI_MLQUEUE_HOST_BUSY; 5088 goto out; 5089 } 5090 5091 if (atomic_read(&stgt_priv_data->block_io)) { 5092 if (mrioc->stop_drv_processing) { 5093 scmd->result = DID_NO_CONNECT << 16; 5094 scsi_done(scmd); 5095 goto out; 5096 } 5097 retval = SCSI_MLQUEUE_DEVICE_BUSY; 5098 goto out; 5099 } 5100 5101 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 5102 scmd->result = DID_NO_CONNECT << 16; 5103 scsi_done(scmd); 5104 goto out; 5105 } 5106 if (stgt_priv_data->dev_removed) { 5107 scmd->result = DID_NO_CONNECT << 16; 5108 scsi_done(scmd); 5109 goto out; 5110 } 5111 5112 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 5113 is_pcie_dev = 1; 5114 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 5115 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5116 mpi3mr_check_return_unmap(mrioc, scmd)) 5117 goto out; 5118 5119 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 5120 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 5121 scmd->result = DID_ERROR << 16; 5122 scsi_done(scmd); 5123 goto out; 5124 } 5125 5126 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5127 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 5128 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5129 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 5130 else 5131 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 5132 5133 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 5134 5135 if (sdev_priv_data->ncq_prio_enable) { 5136 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5137 if (iprio_class == IOPRIO_CLASS_RT) 5138 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 5139 } 5140 5141 if (scmd->cmd_len > 16) 5142 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 5143 5144 scmd_priv_data = scsi_cmd_priv(scmd); 5145 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 5146 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 5147 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 5148 scsiio_req->host_tag = cpu_to_le16(host_tag); 5149 5150 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 5151 5152 if (stgt_priv_data->wslen) 5153 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 5154 stgt_priv_data->wslen); 5155 5156 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 5157 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 5158 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 5159 scsiio_req->flags = cpu_to_le32(scsiio_flags); 5160 int_to_scsilun(sdev_priv_data->lun_id, 5161 (struct scsi_lun *)scsiio_req->lun); 5162 5163 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 5164 mpi3mr_clear_scmd_priv(mrioc, scmd); 5165 retval = SCSI_MLQUEUE_HOST_BUSY; 5166 goto out; 5167 } 5168 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 5169 data_len_blks = scsi_bufflen(scmd) >> 9; 5170 if ((data_len_blks >= mrioc->io_throttle_data_length) && 5171 stgt_priv_data->io_throttle_enabled) { 5172 tracked_io_sz = data_len_blks; 5173 tg = stgt_priv_data->throttle_group; 5174 if (tg) { 5175 ioc_pend_data_len = atomic_add_return(data_len_blks, 5176 &mrioc->pend_large_data_sz); 5177 tg_pend_data_len = atomic_add_return(data_len_blks, 5178 &tg->pend_large_data_sz); 5179 if (!tg->io_divert && ((ioc_pend_data_len >= 5180 mrioc->io_throttle_high) || 5181 (tg_pend_data_len >= tg->high))) { 5182 tg->io_divert = 1; 5183 tg->need_qd_reduction = 1; 5184 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 5185 tg, 1); 5186 mpi3mr_queue_qd_reduction_event(mrioc, tg); 5187 } 5188 } else { 5189 ioc_pend_data_len = atomic_add_return(data_len_blks, 5190 &mrioc->pend_large_data_sz); 5191 if (ioc_pend_data_len >= mrioc->io_throttle_high) 5192 stgt_priv_data->io_divert = 1; 5193 } 5194 } 5195 5196 if (stgt_priv_data->io_divert) { 5197 scsiio_req->msg_flags |= 5198 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 5199 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 5200 } 5201 scsiio_req->flags |= cpu_to_le32(scsiio_flags); 5202 5203 if (mpi3mr_op_request_post(mrioc, op_req_q, 5204 scmd_priv_data->mpi3mr_scsiio_req)) { 5205 mpi3mr_clear_scmd_priv(mrioc, scmd); 5206 retval = SCSI_MLQUEUE_HOST_BUSY; 5207 if (tracked_io_sz) { 5208 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 5209 if (tg) 5210 atomic_sub(tracked_io_sz, 5211 &tg->pend_large_data_sz); 5212 } 5213 goto out; 5214 } 5215 5216 out: 5217 return retval; 5218 } 5219 5220 static const struct scsi_host_template mpi3mr_driver_template = { 5221 .module = THIS_MODULE, 5222 .name = "MPI3 Storage Controller", 5223 .proc_name = MPI3MR_DRIVER_NAME, 5224 .queuecommand = mpi3mr_qcmd, 5225 .target_alloc = mpi3mr_target_alloc, 5226 .sdev_init = mpi3mr_sdev_init, 5227 .sdev_configure = mpi3mr_sdev_configure, 5228 .target_destroy = mpi3mr_target_destroy, 5229 .sdev_destroy = mpi3mr_sdev_destroy, 5230 .scan_finished = mpi3mr_scan_finished, 5231 .scan_start = mpi3mr_scan_start, 5232 .change_queue_depth = mpi3mr_change_queue_depth, 5233 .eh_abort_handler = mpi3mr_eh_abort, 5234 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 5235 .eh_target_reset_handler = mpi3mr_eh_target_reset, 5236 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 5237 .eh_host_reset_handler = mpi3mr_eh_host_reset, 5238 .bios_param = mpi3mr_bios_param, 5239 .map_queues = mpi3mr_map_queues, 5240 .mq_poll = mpi3mr_blk_mq_poll, 5241 .no_write_same = 1, 5242 .can_queue = 1, 5243 .this_id = -1, 5244 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 5245 /* max xfer supported is 1M (2K in 512 byte sized sectors) 5246 */ 5247 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 5248 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 5249 .max_segment_size = 0xffffffff, 5250 .track_queue_depth = 1, 5251 .cmd_size = sizeof(struct scmd_priv), 5252 .shost_groups = mpi3mr_host_groups, 5253 .sdev_groups = mpi3mr_dev_groups, 5254 }; 5255 5256 /** 5257 * mpi3mr_init_drv_cmd - Initialize internal command tracker 5258 * @cmdptr: Internal command tracker 5259 * @host_tag: Host tag used for the specific command 5260 * 5261 * Initialize the internal command tracker structure with 5262 * specified host tag. 5263 * 5264 * Return: Nothing. 5265 */ 5266 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 5267 u16 host_tag) 5268 { 5269 mutex_init(&cmdptr->mutex); 5270 cmdptr->reply = NULL; 5271 cmdptr->state = MPI3MR_CMD_NOTUSED; 5272 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 5273 cmdptr->host_tag = host_tag; 5274 } 5275 5276 /** 5277 * osintfc_mrioc_security_status -Check controller secure status 5278 * @pdev: PCI device instance 5279 * 5280 * Read the Device Serial Number capability from PCI config 5281 * space and decide whether the controller is secure or not. 5282 * 5283 * Return: 0 on success, non-zero on failure. 5284 */ 5285 static int 5286 osintfc_mrioc_security_status(struct pci_dev *pdev) 5287 { 5288 u32 cap_data; 5289 int base; 5290 u32 ctlr_status; 5291 u32 debug_status; 5292 int retval = 0; 5293 5294 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 5295 if (!base) { 5296 dev_err(&pdev->dev, 5297 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 5298 return -1; 5299 } 5300 5301 pci_read_config_dword(pdev, base + 4, &cap_data); 5302 5303 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 5304 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5305 5306 switch (ctlr_status) { 5307 case MPI3MR_INVALID_DEVICE: 5308 dev_err(&pdev->dev, 5309 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5310 __func__, pdev->device, pdev->subsystem_vendor, 5311 pdev->subsystem_device); 5312 retval = -1; 5313 break; 5314 case MPI3MR_CONFIG_SECURE_DEVICE: 5315 if (!debug_status) 5316 dev_info(&pdev->dev, 5317 "%s: Config secure ctlr is detected\n", 5318 __func__); 5319 break; 5320 case MPI3MR_HARD_SECURE_DEVICE: 5321 break; 5322 case MPI3MR_TAMPERED_DEVICE: 5323 dev_err(&pdev->dev, 5324 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5325 __func__, pdev->device, pdev->subsystem_vendor, 5326 pdev->subsystem_device); 5327 retval = -1; 5328 break; 5329 default: 5330 retval = -1; 5331 break; 5332 } 5333 5334 if (!retval && debug_status) { 5335 dev_err(&pdev->dev, 5336 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5337 __func__, pdev->device, pdev->subsystem_vendor, 5338 pdev->subsystem_device); 5339 retval = -1; 5340 } 5341 5342 return retval; 5343 } 5344 5345 /** 5346 * mpi3mr_probe - PCI probe callback 5347 * @pdev: PCI device instance 5348 * @id: PCI device ID details 5349 * 5350 * controller initialization routine. Checks the security status 5351 * of the controller and if it is invalid or tampered return the 5352 * probe without initializing the controller. Otherwise, 5353 * allocate per adapter instance through shost_priv and 5354 * initialize controller specific data structures, initializae 5355 * the controller hardware, add shost to the SCSI subsystem. 5356 * 5357 * Return: 0 on success, non-zero on failure. 5358 */ 5359 5360 static int 5361 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5362 { 5363 struct mpi3mr_ioc *mrioc = NULL; 5364 struct Scsi_Host *shost = NULL; 5365 int retval = 0, i; 5366 5367 if (osintfc_mrioc_security_status(pdev)) { 5368 warn_non_secure_ctlr = 1; 5369 return 1; /* For Invalid and Tampered device */ 5370 } 5371 5372 shost = scsi_host_alloc(&mpi3mr_driver_template, 5373 sizeof(struct mpi3mr_ioc)); 5374 if (!shost) { 5375 retval = -ENODEV; 5376 goto shost_failed; 5377 } 5378 5379 mrioc = shost_priv(shost); 5380 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); 5381 if (retval < 0) 5382 goto id_alloc_failed; 5383 mrioc->id = (u8)retval; 5384 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5385 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5386 INIT_LIST_HEAD(&mrioc->list); 5387 spin_lock(&mrioc_list_lock); 5388 list_add_tail(&mrioc->list, &mrioc_list); 5389 spin_unlock(&mrioc_list_lock); 5390 5391 spin_lock_init(&mrioc->admin_req_lock); 5392 spin_lock_init(&mrioc->reply_free_queue_lock); 5393 spin_lock_init(&mrioc->sbq_lock); 5394 spin_lock_init(&mrioc->fwevt_lock); 5395 spin_lock_init(&mrioc->tgtdev_lock); 5396 spin_lock_init(&mrioc->watchdog_lock); 5397 spin_lock_init(&mrioc->chain_buf_lock); 5398 spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); 5399 spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); 5400 spin_lock_init(&mrioc->sas_node_lock); 5401 spin_lock_init(&mrioc->trigger_lock); 5402 5403 INIT_LIST_HEAD(&mrioc->fwevt_list); 5404 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5405 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5406 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5407 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5408 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5409 INIT_LIST_HEAD(&mrioc->enclosure_list); 5410 5411 mutex_init(&mrioc->reset_mutex); 5412 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5413 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5414 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5415 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5416 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5417 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5418 5419 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5420 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5421 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5422 5423 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5424 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5425 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5426 5427 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5428 !pdev->revision) 5429 mrioc->enable_segqueue = false; 5430 else 5431 mrioc->enable_segqueue = true; 5432 5433 init_waitqueue_head(&mrioc->reset_waitq); 5434 mrioc->logging_level = logging_level; 5435 mrioc->shost = shost; 5436 mrioc->pdev = pdev; 5437 mrioc->stop_bsgs = 1; 5438 5439 mrioc->max_sgl_entries = max_sgl_entries; 5440 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5441 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5442 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5443 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5444 else { 5445 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5446 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5447 } 5448 5449 /* init shost parameters */ 5450 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5451 shost->max_lun = -1; 5452 shost->unique_id = mrioc->id; 5453 5454 shost->max_channel = 0; 5455 shost->max_id = 0xFFFFFFFF; 5456 5457 shost->host_tagset = 1; 5458 5459 if (prot_mask >= 0) 5460 scsi_host_set_prot(shost, prot_mask); 5461 else { 5462 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5463 | SHOST_DIF_TYPE2_PROTECTION 5464 | SHOST_DIF_TYPE3_PROTECTION; 5465 scsi_host_set_prot(shost, prot_mask); 5466 } 5467 5468 ioc_info(mrioc, 5469 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5470 __func__, 5471 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5472 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5473 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5474 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5475 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5476 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5477 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5478 5479 if (prot_guard_mask) 5480 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5481 else 5482 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5483 5484 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5485 "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); 5486 if (!mrioc->fwevt_worker_thread) { 5487 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5488 __FILE__, __LINE__, __func__); 5489 retval = -ENODEV; 5490 goto fwevtthread_failed; 5491 } 5492 5493 mrioc->is_driver_loading = 1; 5494 mrioc->cpu_count = num_online_cpus(); 5495 if (mpi3mr_setup_resources(mrioc)) { 5496 ioc_err(mrioc, "setup resources failed\n"); 5497 retval = -ENODEV; 5498 goto resource_alloc_failed; 5499 } 5500 if (mpi3mr_init_ioc(mrioc)) { 5501 ioc_err(mrioc, "initializing IOC failed\n"); 5502 retval = -ENODEV; 5503 goto init_ioc_failed; 5504 } 5505 5506 shost->nr_hw_queues = mrioc->num_op_reply_q; 5507 if (mrioc->active_poll_qcount) 5508 shost->nr_maps = 3; 5509 5510 shost->can_queue = mrioc->max_host_ios; 5511 shost->sg_tablesize = mrioc->max_sgl_entries; 5512 shost->max_id = mrioc->facts.max_perids + 1; 5513 5514 retval = scsi_add_host(shost, &pdev->dev); 5515 if (retval) { 5516 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5517 __FILE__, __LINE__, __func__); 5518 goto addhost_failed; 5519 } 5520 5521 scsi_scan_host(shost); 5522 mpi3mr_bsg_init(mrioc); 5523 return retval; 5524 5525 addhost_failed: 5526 mpi3mr_stop_watchdog(mrioc); 5527 mpi3mr_cleanup_ioc(mrioc); 5528 init_ioc_failed: 5529 mpi3mr_free_mem(mrioc); 5530 mpi3mr_cleanup_resources(mrioc); 5531 resource_alloc_failed: 5532 destroy_workqueue(mrioc->fwevt_worker_thread); 5533 fwevtthread_failed: 5534 ida_free(&mrioc_ida, mrioc->id); 5535 spin_lock(&mrioc_list_lock); 5536 list_del(&mrioc->list); 5537 spin_unlock(&mrioc_list_lock); 5538 id_alloc_failed: 5539 scsi_host_put(shost); 5540 shost_failed: 5541 return retval; 5542 } 5543 5544 /** 5545 * mpi3mr_remove - PCI remove callback 5546 * @pdev: PCI device instance 5547 * 5548 * Cleanup the IOC by issuing MUR and shutdown notification. 5549 * Free up all memory and resources associated with the 5550 * controllerand target devices, unregister the shost. 5551 * 5552 * Return: Nothing. 5553 */ 5554 static void mpi3mr_remove(struct pci_dev *pdev) 5555 { 5556 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5557 struct mpi3mr_ioc *mrioc; 5558 struct workqueue_struct *wq; 5559 unsigned long flags; 5560 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5561 struct mpi3mr_hba_port *port, *hba_port_next; 5562 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5563 5564 if (!shost) 5565 return; 5566 5567 mrioc = shost_priv(shost); 5568 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5569 ssleep(1); 5570 5571 if (mrioc->block_on_pci_err) { 5572 mrioc->block_on_pci_err = false; 5573 scsi_unblock_requests(shost); 5574 mrioc->unrecoverable = 1; 5575 } 5576 5577 if (!pci_device_is_present(mrioc->pdev) || 5578 mrioc->pci_err_recovery) { 5579 mrioc->unrecoverable = 1; 5580 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5581 } 5582 5583 mpi3mr_bsg_exit(mrioc); 5584 mrioc->stop_drv_processing = 1; 5585 mpi3mr_cleanup_fwevt_list(mrioc); 5586 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5587 wq = mrioc->fwevt_worker_thread; 5588 mrioc->fwevt_worker_thread = NULL; 5589 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5590 if (wq) 5591 destroy_workqueue(wq); 5592 5593 if (mrioc->sas_transport_enabled) 5594 sas_remove_host(shost); 5595 else 5596 scsi_remove_host(shost); 5597 5598 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5599 list) { 5600 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5601 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5602 mpi3mr_tgtdev_put(tgtdev); 5603 } 5604 mpi3mr_stop_watchdog(mrioc); 5605 mpi3mr_cleanup_ioc(mrioc); 5606 mpi3mr_free_mem(mrioc); 5607 mpi3mr_cleanup_resources(mrioc); 5608 5609 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5610 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5611 &mrioc->sas_expander_list, list) { 5612 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5613 mpi3mr_expander_node_remove(mrioc, sas_expander); 5614 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5615 } 5616 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5617 ioc_info(mrioc, 5618 "removing hba_port entry: %p port: %d from hba_port list\n", 5619 port, port->port_id); 5620 list_del(&port->list); 5621 kfree(port); 5622 } 5623 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5624 5625 if (mrioc->sas_hba.num_phys) { 5626 kfree(mrioc->sas_hba.phy); 5627 mrioc->sas_hba.phy = NULL; 5628 mrioc->sas_hba.num_phys = 0; 5629 } 5630 5631 ida_free(&mrioc_ida, mrioc->id); 5632 spin_lock(&mrioc_list_lock); 5633 list_del(&mrioc->list); 5634 spin_unlock(&mrioc_list_lock); 5635 5636 scsi_host_put(shost); 5637 } 5638 5639 /** 5640 * mpi3mr_shutdown - PCI shutdown callback 5641 * @pdev: PCI device instance 5642 * 5643 * Free up all memory and resources associated with the 5644 * controller 5645 * 5646 * Return: Nothing. 5647 */ 5648 static void mpi3mr_shutdown(struct pci_dev *pdev) 5649 { 5650 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5651 struct mpi3mr_ioc *mrioc; 5652 struct workqueue_struct *wq; 5653 unsigned long flags; 5654 5655 if (!shost) 5656 return; 5657 5658 mrioc = shost_priv(shost); 5659 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5660 ssleep(1); 5661 5662 mrioc->stop_drv_processing = 1; 5663 mpi3mr_cleanup_fwevt_list(mrioc); 5664 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5665 wq = mrioc->fwevt_worker_thread; 5666 mrioc->fwevt_worker_thread = NULL; 5667 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5668 if (wq) 5669 destroy_workqueue(wq); 5670 5671 mpi3mr_stop_watchdog(mrioc); 5672 mpi3mr_cleanup_ioc(mrioc); 5673 mpi3mr_cleanup_resources(mrioc); 5674 } 5675 5676 /** 5677 * mpi3mr_suspend - PCI power management suspend callback 5678 * @dev: Device struct 5679 * 5680 * Change the power state to the given value and cleanup the IOC 5681 * by issuing MUR and shutdown notification 5682 * 5683 * Return: 0 always. 5684 */ 5685 static int __maybe_unused 5686 mpi3mr_suspend(struct device *dev) 5687 { 5688 struct pci_dev *pdev = to_pci_dev(dev); 5689 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5690 struct mpi3mr_ioc *mrioc; 5691 5692 if (!shost) 5693 return 0; 5694 5695 mrioc = shost_priv(shost); 5696 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5697 ssleep(1); 5698 mrioc->stop_drv_processing = 1; 5699 mpi3mr_cleanup_fwevt_list(mrioc); 5700 scsi_block_requests(shost); 5701 mpi3mr_stop_watchdog(mrioc); 5702 mpi3mr_cleanup_ioc(mrioc); 5703 5704 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5705 pdev, pci_name(pdev)); 5706 mpi3mr_cleanup_resources(mrioc); 5707 5708 return 0; 5709 } 5710 5711 /** 5712 * mpi3mr_resume - PCI power management resume callback 5713 * @dev: Device struct 5714 * 5715 * Restore the power state to D0 and reinitialize the controller 5716 * and resume I/O operations to the target devices 5717 * 5718 * Return: 0 on success, non-zero on failure 5719 */ 5720 static int __maybe_unused 5721 mpi3mr_resume(struct device *dev) 5722 { 5723 struct pci_dev *pdev = to_pci_dev(dev); 5724 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5725 struct mpi3mr_ioc *mrioc; 5726 pci_power_t device_state = pdev->current_state; 5727 int r; 5728 5729 if (!shost) 5730 return 0; 5731 5732 mrioc = shost_priv(shost); 5733 5734 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5735 pdev, pci_name(pdev), device_state); 5736 mrioc->pdev = pdev; 5737 mrioc->cpu_count = num_online_cpus(); 5738 r = mpi3mr_setup_resources(mrioc); 5739 if (r) { 5740 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5741 __func__, r); 5742 return r; 5743 } 5744 5745 mrioc->stop_drv_processing = 0; 5746 mpi3mr_invalidate_devhandles(mrioc); 5747 mpi3mr_free_enclosure_list(mrioc); 5748 mpi3mr_memset_buffers(mrioc); 5749 r = mpi3mr_reinit_ioc(mrioc, 1); 5750 if (r) { 5751 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5752 return r; 5753 } 5754 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5755 scsi_unblock_requests(shost); 5756 mrioc->device_refresh_on = 0; 5757 mpi3mr_start_watchdog(mrioc); 5758 5759 return 0; 5760 } 5761 5762 /** 5763 * mpi3mr_pcierr_error_detected - PCI error detected callback 5764 * @pdev: PCI device instance 5765 * @state: channel state 5766 * 5767 * This function is called by the PCI error recovery driver and 5768 * based on the state passed the driver decides what actions to 5769 * be recommended back to PCI driver. 5770 * 5771 * For all of the states if there is no valid mrioc or scsi host 5772 * references in the PCI device then this function will return 5773 * the result as disconnect. 5774 * 5775 * For normal state, this function will return the result as can 5776 * recover. 5777 * 5778 * For frozen state, this function will block for any pending 5779 * controller initialization or re-initialization to complete, 5780 * stop any new interactions with the controller and return 5781 * status as reset required. 5782 * 5783 * For permanent failure state, this function will mark the 5784 * controller as unrecoverable and return status as disconnect. 5785 * 5786 * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or 5787 * DISCONNECT based on the controller state. 5788 */ 5789 static pci_ers_result_t 5790 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5791 { 5792 struct Scsi_Host *shost; 5793 struct mpi3mr_ioc *mrioc; 5794 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5795 5796 dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, 5797 state); 5798 5799 shost = pci_get_drvdata(pdev); 5800 mrioc = shost_priv(shost); 5801 5802 switch (state) { 5803 case pci_channel_io_normal: 5804 return PCI_ERS_RESULT_CAN_RECOVER; 5805 case pci_channel_io_frozen: 5806 mrioc->pci_err_recovery = true; 5807 mrioc->block_on_pci_err = true; 5808 do { 5809 if (mrioc->reset_in_progress || mrioc->is_driver_loading) 5810 ssleep(1); 5811 else 5812 break; 5813 } while (--timeout); 5814 5815 if (!timeout) { 5816 mrioc->pci_err_recovery = true; 5817 mrioc->block_on_pci_err = true; 5818 mrioc->unrecoverable = 1; 5819 mpi3mr_stop_watchdog(mrioc); 5820 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5821 return PCI_ERS_RESULT_DISCONNECT; 5822 } 5823 5824 scsi_block_requests(mrioc->shost); 5825 mpi3mr_stop_watchdog(mrioc); 5826 mpi3mr_cleanup_resources(mrioc); 5827 return PCI_ERS_RESULT_NEED_RESET; 5828 case pci_channel_io_perm_failure: 5829 mrioc->pci_err_recovery = true; 5830 mrioc->block_on_pci_err = true; 5831 mrioc->unrecoverable = 1; 5832 mpi3mr_stop_watchdog(mrioc); 5833 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5834 return PCI_ERS_RESULT_DISCONNECT; 5835 default: 5836 return PCI_ERS_RESULT_DISCONNECT; 5837 } 5838 } 5839 5840 /** 5841 * mpi3mr_pcierr_slot_reset - Post slot reset callback 5842 * @pdev: PCI device instance 5843 * 5844 * This function is called by the PCI error recovery driver 5845 * after a slot or link reset issued by it for the recovery, the 5846 * driver is expected to bring back the controller and 5847 * initialize it. 5848 * 5849 * This function restores PCI state and reinitializes controller 5850 * resources and the controller, this blocks for any pending 5851 * reset to complete. 5852 * 5853 * Returns: PCI_ERS_RESULT_DISCONNECT on failure or 5854 * PCI_ERS_RESULT_RECOVERED 5855 */ 5856 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) 5857 { 5858 struct Scsi_Host *shost; 5859 struct mpi3mr_ioc *mrioc; 5860 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5861 5862 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5863 5864 shost = pci_get_drvdata(pdev); 5865 mrioc = shost_priv(shost); 5866 5867 do { 5868 if (mrioc->reset_in_progress) 5869 ssleep(1); 5870 else 5871 break; 5872 } while (--timeout); 5873 5874 if (!timeout) 5875 goto out_failed; 5876 5877 pci_restore_state(pdev); 5878 5879 if (mpi3mr_setup_resources(mrioc)) { 5880 ioc_err(mrioc, "setup resources failed\n"); 5881 goto out_failed; 5882 } 5883 mrioc->unrecoverable = 0; 5884 mrioc->pci_err_recovery = false; 5885 5886 if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) 5887 goto out_failed; 5888 5889 return PCI_ERS_RESULT_RECOVERED; 5890 5891 out_failed: 5892 mrioc->unrecoverable = 1; 5893 mrioc->block_on_pci_err = false; 5894 scsi_unblock_requests(shost); 5895 mpi3mr_start_watchdog(mrioc); 5896 return PCI_ERS_RESULT_DISCONNECT; 5897 } 5898 5899 /** 5900 * mpi3mr_pcierr_resume - PCI error recovery resume 5901 * callback 5902 * @pdev: PCI device instance 5903 * 5904 * This function enables all I/O and IOCTLs post reset issued as 5905 * part of the PCI error recovery 5906 * 5907 * Return: Nothing. 5908 */ 5909 static void mpi3mr_pcierr_resume(struct pci_dev *pdev) 5910 { 5911 struct Scsi_Host *shost; 5912 struct mpi3mr_ioc *mrioc; 5913 5914 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5915 5916 shost = pci_get_drvdata(pdev); 5917 mrioc = shost_priv(shost); 5918 5919 if (mrioc->block_on_pci_err) { 5920 mrioc->block_on_pci_err = false; 5921 scsi_unblock_requests(shost); 5922 mpi3mr_start_watchdog(mrioc); 5923 } 5924 } 5925 5926 /** 5927 * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback 5928 * @pdev: PCI device instance 5929 * 5930 * This is called only if mpi3mr_pcierr_error_detected returns 5931 * PCI_ERS_RESULT_CAN_RECOVER. 5932 * 5933 * Return: PCI_ERS_RESULT_DISCONNECT when the controller is 5934 * unrecoverable or when the shost/mrioc reference cannot be 5935 * found, else return PCI_ERS_RESULT_RECOVERED 5936 */ 5937 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) 5938 { 5939 struct Scsi_Host *shost; 5940 struct mpi3mr_ioc *mrioc; 5941 5942 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5943 5944 shost = pci_get_drvdata(pdev); 5945 mrioc = shost_priv(shost); 5946 5947 if (mrioc->unrecoverable) 5948 return PCI_ERS_RESULT_DISCONNECT; 5949 5950 return PCI_ERS_RESULT_RECOVERED; 5951 } 5952 5953 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5954 { 5955 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5956 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5957 }, 5958 { 5959 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5960 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5961 }, 5962 { 5963 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5964 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5965 }, 5966 { 0 } 5967 }; 5968 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5969 5970 static const struct pci_error_handlers mpi3mr_err_handler = { 5971 .error_detected = mpi3mr_pcierr_error_detected, 5972 .mmio_enabled = mpi3mr_pcierr_mmio_enabled, 5973 .slot_reset = mpi3mr_pcierr_slot_reset, 5974 .resume = mpi3mr_pcierr_resume, 5975 }; 5976 5977 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5978 5979 static struct pci_driver mpi3mr_pci_driver = { 5980 .name = MPI3MR_DRIVER_NAME, 5981 .id_table = mpi3mr_pci_id_table, 5982 .probe = mpi3mr_probe, 5983 .remove = mpi3mr_remove, 5984 .shutdown = mpi3mr_shutdown, 5985 .err_handler = &mpi3mr_err_handler, 5986 .driver.pm = &mpi3mr_pm_ops, 5987 }; 5988 5989 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5990 { 5991 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5992 } 5993 static DRIVER_ATTR_RO(event_counter); 5994 5995 static int __init mpi3mr_init(void) 5996 { 5997 int ret_val; 5998 5999 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 6000 MPI3MR_DRIVER_VERSION); 6001 6002 mpi3mr_transport_template = 6003 sas_attach_transport(&mpi3mr_transport_functions); 6004 if (!mpi3mr_transport_template) { 6005 pr_err("%s failed to load due to sas transport attach failure\n", 6006 MPI3MR_DRIVER_NAME); 6007 return -ENODEV; 6008 } 6009 6010 ret_val = pci_register_driver(&mpi3mr_pci_driver); 6011 if (ret_val) { 6012 pr_err("%s failed to load due to pci register driver failure\n", 6013 MPI3MR_DRIVER_NAME); 6014 goto err_pci_reg_fail; 6015 } 6016 6017 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 6018 &driver_attr_event_counter); 6019 if (ret_val) 6020 goto err_event_counter; 6021 6022 return ret_val; 6023 6024 err_event_counter: 6025 pci_unregister_driver(&mpi3mr_pci_driver); 6026 6027 err_pci_reg_fail: 6028 sas_release_transport(mpi3mr_transport_template); 6029 return ret_val; 6030 } 6031 6032 static void __exit mpi3mr_exit(void) 6033 { 6034 if (warn_non_secure_ctlr) 6035 pr_warn( 6036 "Unloading %s version %s while managing a non secure controller\n", 6037 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 6038 else 6039 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 6040 MPI3MR_DRIVER_VERSION); 6041 6042 driver_remove_file(&mpi3mr_pci_driver.driver, 6043 &driver_attr_event_counter); 6044 pci_unregister_driver(&mpi3mr_pci_driver); 6045 sas_release_transport(mpi3mr_transport_template); 6046 ida_destroy(&mrioc_ida); 6047 } 6048 6049 module_init(mpi3mr_init); 6050 module_exit(mpi3mr_exit); 6051