1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /* 53 * SAS Log info code for a NCQ collateral abort after an NCQ error: 54 * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR 55 * See: drivers/message/fusion/lsi/mpi_log_sas.h 56 */ 57 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 58 59 /** 60 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 61 * @mrioc: Adapter instance reference 62 * @scmd: SCSI command reference 63 * 64 * Calculate the host tag based on block tag for a given scmd. 65 * 66 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 67 */ 68 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 69 struct scsi_cmnd *scmd) 70 { 71 struct scmd_priv *priv = NULL; 72 u32 unique_tag; 73 u16 host_tag, hw_queue; 74 75 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 76 77 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 78 if (hw_queue >= mrioc->num_op_reply_q) 79 return MPI3MR_HOSTTAG_INVALID; 80 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 81 82 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 83 return MPI3MR_HOSTTAG_INVALID; 84 85 priv = scsi_cmd_priv(scmd); 86 /*host_tag 0 is invalid hence incrementing by 1*/ 87 priv->host_tag = host_tag + 1; 88 priv->scmd = scmd; 89 priv->in_lld_scope = 1; 90 priv->req_q_idx = hw_queue; 91 priv->meta_chain_idx = -1; 92 priv->chain_idx = -1; 93 priv->meta_sg_valid = 0; 94 return priv->host_tag; 95 } 96 97 /** 98 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 99 * @mrioc: Adapter instance reference 100 * @host_tag: Host tag 101 * @qidx: Operational queue index 102 * 103 * Identify the block tag from the host tag and queue index and 104 * retrieve associated scsi command using scsi_host_find_tag(). 105 * 106 * Return: SCSI command reference or NULL. 107 */ 108 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 109 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 110 { 111 struct scsi_cmnd *scmd = NULL; 112 struct scmd_priv *priv = NULL; 113 u32 unique_tag = host_tag - 1; 114 115 if (WARN_ON(host_tag > mrioc->max_host_ios)) 116 goto out; 117 118 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 119 120 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 121 if (scmd) { 122 priv = scsi_cmd_priv(scmd); 123 if (!priv->in_lld_scope) 124 scmd = NULL; 125 } 126 out: 127 return scmd; 128 } 129 130 /** 131 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 132 * @mrioc: Adapter instance reference 133 * @scmd: SCSI command reference 134 * 135 * Invalidate the SCSI command private data to mark the command 136 * is not in LLD scope anymore. 137 * 138 * Return: Nothing. 139 */ 140 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 141 struct scsi_cmnd *scmd) 142 { 143 struct scmd_priv *priv = NULL; 144 145 priv = scsi_cmd_priv(scmd); 146 147 if (WARN_ON(priv->in_lld_scope == 0)) 148 return; 149 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 150 priv->req_q_idx = 0xFFFF; 151 priv->scmd = NULL; 152 priv->in_lld_scope = 0; 153 priv->meta_sg_valid = 0; 154 if (priv->chain_idx >= 0) { 155 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 156 priv->chain_idx = -1; 157 } 158 if (priv->meta_chain_idx >= 0) { 159 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 160 priv->meta_chain_idx = -1; 161 } 162 } 163 164 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 165 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 166 static void mpi3mr_fwevt_worker(struct work_struct *work); 167 168 /** 169 * mpi3mr_fwevt_free - firmware event memory dealloctor 170 * @r: k reference pointer of the firmware event 171 * 172 * Free firmware event memory when no reference. 173 */ 174 static void mpi3mr_fwevt_free(struct kref *r) 175 { 176 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 177 } 178 179 /** 180 * mpi3mr_fwevt_get - k reference incrementor 181 * @fwevt: Firmware event reference 182 * 183 * Increment firmware event reference count. 184 */ 185 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 186 { 187 kref_get(&fwevt->ref_count); 188 } 189 190 /** 191 * mpi3mr_fwevt_put - k reference decrementor 192 * @fwevt: Firmware event reference 193 * 194 * decrement firmware event reference count. 195 */ 196 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 197 { 198 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 199 } 200 201 /** 202 * mpi3mr_alloc_fwevt - Allocate firmware event 203 * @len: length of firmware event data to allocate 204 * 205 * Allocate firmware event with required length and initialize 206 * the reference counter. 207 * 208 * Return: firmware event reference. 209 */ 210 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 211 { 212 struct mpi3mr_fwevt *fwevt; 213 214 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 215 if (!fwevt) 216 return NULL; 217 218 kref_init(&fwevt->ref_count); 219 return fwevt; 220 } 221 222 /** 223 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 224 * @mrioc: Adapter instance reference 225 * @fwevt: Firmware event reference 226 * 227 * Add the given firmware event to the firmware event list. 228 * 229 * Return: Nothing. 230 */ 231 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 232 struct mpi3mr_fwevt *fwevt) 233 { 234 unsigned long flags; 235 236 if (!mrioc->fwevt_worker_thread) 237 return; 238 239 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 240 /* get fwevt reference count while adding it to fwevt_list */ 241 mpi3mr_fwevt_get(fwevt); 242 INIT_LIST_HEAD(&fwevt->list); 243 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 244 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 245 /* get fwevt reference count while enqueueing it to worker queue */ 246 mpi3mr_fwevt_get(fwevt); 247 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 248 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 249 } 250 251 /** 252 * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to 253 * the list 254 * @mrioc: Adapter instance reference 255 * @event_data: Event data 256 * 257 * Add the given hdb trigger data event to the firmware event 258 * list. 259 * 260 * Return: Nothing. 261 */ 262 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 263 struct trigger_event_data *event_data) 264 { 265 struct mpi3mr_fwevt *fwevt; 266 u16 sz = sizeof(*event_data); 267 268 fwevt = mpi3mr_alloc_fwevt(sz); 269 if (!fwevt) { 270 ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); 271 return; 272 } 273 274 fwevt->mrioc = mrioc; 275 fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; 276 fwevt->send_ack = 0; 277 fwevt->process_evt = 1; 278 fwevt->evt_ctx = 0; 279 fwevt->event_data_size = sz; 280 memcpy(fwevt->event_data, event_data, sz); 281 282 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 283 } 284 285 /** 286 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 287 * @mrioc: Adapter instance reference 288 * @fwevt: Firmware event reference 289 * 290 * Delete the given firmware event from the firmware event list. 291 * 292 * Return: Nothing. 293 */ 294 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 295 struct mpi3mr_fwevt *fwevt) 296 { 297 unsigned long flags; 298 299 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 300 if (!list_empty(&fwevt->list)) { 301 list_del_init(&fwevt->list); 302 /* 303 * Put fwevt reference count after 304 * removing it from fwevt_list 305 */ 306 mpi3mr_fwevt_put(fwevt); 307 } 308 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 309 } 310 311 /** 312 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 313 * @mrioc: Adapter instance reference 314 * 315 * Dequeue a firmware event from the firmware event list. 316 * 317 * Return: firmware event. 318 */ 319 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 320 struct mpi3mr_ioc *mrioc) 321 { 322 unsigned long flags; 323 struct mpi3mr_fwevt *fwevt = NULL; 324 325 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 326 if (!list_empty(&mrioc->fwevt_list)) { 327 fwevt = list_first_entry(&mrioc->fwevt_list, 328 struct mpi3mr_fwevt, list); 329 list_del_init(&fwevt->list); 330 /* 331 * Put fwevt reference count after 332 * removing it from fwevt_list 333 */ 334 mpi3mr_fwevt_put(fwevt); 335 } 336 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 337 338 return fwevt; 339 } 340 341 /** 342 * mpi3mr_cancel_work - cancel firmware event 343 * @fwevt: fwevt object which needs to be canceled 344 * 345 * Return: Nothing. 346 */ 347 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 348 { 349 /* 350 * Wait on the fwevt to complete. If this returns 1, then 351 * the event was never executed. 352 * 353 * If it did execute, we wait for it to finish, and the put will 354 * happen from mpi3mr_process_fwevt() 355 */ 356 if (cancel_work_sync(&fwevt->work)) { 357 /* 358 * Put fwevt reference count after 359 * dequeuing it from worker queue 360 */ 361 mpi3mr_fwevt_put(fwevt); 362 /* 363 * Put fwevt reference count to neutralize 364 * kref_init increment 365 */ 366 mpi3mr_fwevt_put(fwevt); 367 } 368 } 369 370 /** 371 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 372 * @mrioc: Adapter instance reference 373 * 374 * Flush all pending firmware events from the firmware event 375 * list. 376 * 377 * Return: Nothing. 378 */ 379 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 380 { 381 struct mpi3mr_fwevt *fwevt = NULL; 382 383 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 384 !mrioc->fwevt_worker_thread) 385 return; 386 387 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 388 mpi3mr_cancel_work(fwevt); 389 390 if (mrioc->current_event) { 391 fwevt = mrioc->current_event; 392 /* 393 * Don't call cancel_work_sync() API for the 394 * fwevt work if the controller reset is 395 * get called as part of processing the 396 * same fwevt work (or) when worker thread is 397 * waiting for device add/remove APIs to complete. 398 * Otherwise we will see deadlock. 399 */ 400 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 401 fwevt->discard = 1; 402 return; 403 } 404 405 mpi3mr_cancel_work(fwevt); 406 } 407 } 408 409 /** 410 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 411 * @mrioc: Adapter instance reference 412 * @tg: Throttle group information pointer 413 * 414 * Accessor to queue on synthetically generated driver event to 415 * the event worker thread, the driver event will be used to 416 * reduce the QD of all VDs in the TG from the worker thread. 417 * 418 * Return: None. 419 */ 420 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 421 struct mpi3mr_throttle_group_info *tg) 422 { 423 struct mpi3mr_fwevt *fwevt; 424 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 425 426 /* 427 * If the QD reduction event is already queued due to throttle and if 428 * the QD is not restored through device info change event 429 * then dont queue further reduction events 430 */ 431 if (tg->fw_qd != tg->modified_qd) 432 return; 433 434 fwevt = mpi3mr_alloc_fwevt(sz); 435 if (!fwevt) { 436 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 437 return; 438 } 439 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 440 fwevt->mrioc = mrioc; 441 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 442 fwevt->send_ack = 0; 443 fwevt->process_evt = 1; 444 fwevt->evt_ctx = 0; 445 fwevt->event_data_size = sz; 446 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 447 448 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 449 tg->id); 450 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 451 } 452 453 /** 454 * mpi3mr_invalidate_devhandles -Invalidate device handles 455 * @mrioc: Adapter instance reference 456 * 457 * Invalidate the device handles in the target device structures 458 * . Called post reset prior to reinitializing the controller. 459 * 460 * Return: Nothing. 461 */ 462 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 463 { 464 struct mpi3mr_tgt_dev *tgtdev; 465 struct mpi3mr_stgt_priv_data *tgt_priv; 466 467 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 468 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 469 if (tgtdev->starget && tgtdev->starget->hostdata) { 470 tgt_priv = tgtdev->starget->hostdata; 471 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 472 tgt_priv->io_throttle_enabled = 0; 473 tgt_priv->io_divert = 0; 474 tgt_priv->throttle_group = NULL; 475 tgt_priv->wslen = 0; 476 if (tgtdev->host_exposed) 477 atomic_set(&tgt_priv->block_io, 1); 478 } 479 } 480 } 481 482 /** 483 * mpi3mr_print_scmd - print individual SCSI command 484 * @rq: Block request 485 * @data: Adapter instance reference 486 * 487 * Print the SCSI command details if it is in LLD scope. 488 * 489 * Return: true always. 490 */ 491 static bool mpi3mr_print_scmd(struct request *rq, void *data) 492 { 493 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 494 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 495 struct scmd_priv *priv = NULL; 496 497 if (scmd) { 498 priv = scsi_cmd_priv(scmd); 499 if (!priv->in_lld_scope) 500 goto out; 501 502 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 503 __func__, priv->host_tag, priv->req_q_idx + 1); 504 scsi_print_command(scmd); 505 } 506 507 out: 508 return(true); 509 } 510 511 /** 512 * mpi3mr_flush_scmd - Flush individual SCSI command 513 * @rq: Block request 514 * @data: Adapter instance reference 515 * 516 * Return the SCSI command to the upper layers if it is in LLD 517 * scope. 518 * 519 * Return: true always. 520 */ 521 522 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 523 { 524 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 525 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 526 struct scmd_priv *priv = NULL; 527 528 if (scmd) { 529 priv = scsi_cmd_priv(scmd); 530 if (!priv->in_lld_scope) 531 goto out; 532 533 if (priv->meta_sg_valid) 534 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 535 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 536 mpi3mr_clear_scmd_priv(mrioc, scmd); 537 scsi_dma_unmap(scmd); 538 scmd->result = DID_RESET << 16; 539 scsi_print_command(scmd); 540 scsi_done(scmd); 541 mrioc->flush_io_count++; 542 } 543 544 out: 545 return(true); 546 } 547 548 /** 549 * mpi3mr_count_dev_pending - Count commands pending for a lun 550 * @rq: Block request 551 * @data: SCSI device reference 552 * 553 * This is an iterator function called for each SCSI command in 554 * a host and if the command is pending in the LLD for the 555 * specific device(lun) then device specific pending I/O counter 556 * is updated in the device structure. 557 * 558 * Return: true always. 559 */ 560 561 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 562 { 563 struct scsi_device *sdev = (struct scsi_device *)data; 564 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 565 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 566 struct scmd_priv *priv; 567 568 if (scmd) { 569 priv = scsi_cmd_priv(scmd); 570 if (!priv->in_lld_scope) 571 goto out; 572 if (scmd->device == sdev) 573 sdev_priv_data->pend_count++; 574 } 575 576 out: 577 return true; 578 } 579 580 /** 581 * mpi3mr_count_tgt_pending - Count commands pending for target 582 * @rq: Block request 583 * @data: SCSI target reference 584 * 585 * This is an iterator function called for each SCSI command in 586 * a host and if the command is pending in the LLD for the 587 * specific target then target specific pending I/O counter is 588 * updated in the target structure. 589 * 590 * Return: true always. 591 */ 592 593 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 594 { 595 struct scsi_target *starget = (struct scsi_target *)data; 596 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 597 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 598 struct scmd_priv *priv; 599 600 if (scmd) { 601 priv = scsi_cmd_priv(scmd); 602 if (!priv->in_lld_scope) 603 goto out; 604 if (scmd->device && (scsi_target(scmd->device) == starget)) 605 stgt_priv_data->pend_count++; 606 } 607 608 out: 609 return true; 610 } 611 612 /** 613 * mpi3mr_flush_host_io - Flush host I/Os 614 * @mrioc: Adapter instance reference 615 * 616 * Flush all of the pending I/Os by calling 617 * blk_mq_tagset_busy_iter() for each possible tag. This is 618 * executed post controller reset 619 * 620 * Return: Nothing. 621 */ 622 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 623 { 624 struct Scsi_Host *shost = mrioc->shost; 625 626 mrioc->flush_io_count = 0; 627 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 628 blk_mq_tagset_busy_iter(&shost->tag_set, 629 mpi3mr_flush_scmd, (void *)mrioc); 630 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 631 mrioc->flush_io_count); 632 } 633 634 /** 635 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 636 * @mrioc: Adapter instance reference 637 * 638 * This function waits for currently running IO poll threads to 639 * exit and then flushes all host I/Os and any internal pending 640 * cmds. This is executed after controller is marked as 641 * unrecoverable. 642 * 643 * Return: Nothing. 644 */ 645 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 646 { 647 struct Scsi_Host *shost = mrioc->shost; 648 int i; 649 650 if (!mrioc->unrecoverable) 651 return; 652 653 if (mrioc->op_reply_qinfo) { 654 for (i = 0; i < mrioc->num_queues; i++) { 655 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 656 udelay(500); 657 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 658 } 659 } 660 mrioc->flush_io_count = 0; 661 blk_mq_tagset_busy_iter(&shost->tag_set, 662 mpi3mr_flush_scmd, (void *)mrioc); 663 mpi3mr_flush_delayed_cmd_lists(mrioc); 664 mpi3mr_flush_drv_cmds(mrioc); 665 } 666 667 /** 668 * mpi3mr_alloc_tgtdev - target device allocator 669 * 670 * Allocate target device instance and initialize the reference 671 * count 672 * 673 * Return: target device instance. 674 */ 675 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 676 { 677 struct mpi3mr_tgt_dev *tgtdev; 678 679 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 680 if (!tgtdev) 681 return NULL; 682 kref_init(&tgtdev->ref_count); 683 return tgtdev; 684 } 685 686 /** 687 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 688 * @mrioc: Adapter instance reference 689 * @tgtdev: Target device 690 * 691 * Add the target device to the target device list 692 * 693 * Return: Nothing. 694 */ 695 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 696 struct mpi3mr_tgt_dev *tgtdev) 697 { 698 unsigned long flags; 699 700 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 701 mpi3mr_tgtdev_get(tgtdev); 702 INIT_LIST_HEAD(&tgtdev->list); 703 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 704 tgtdev->state = MPI3MR_DEV_CREATED; 705 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 706 } 707 708 /** 709 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 710 * @mrioc: Adapter instance reference 711 * @tgtdev: Target device 712 * @must_delete: Must delete the target device from the list irrespective 713 * of the device state. 714 * 715 * Remove the target device from the target device list 716 * 717 * Return: Nothing. 718 */ 719 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 720 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 721 { 722 unsigned long flags; 723 724 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 725 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 726 if (!list_empty(&tgtdev->list)) { 727 list_del_init(&tgtdev->list); 728 tgtdev->state = MPI3MR_DEV_DELETED; 729 mpi3mr_tgtdev_put(tgtdev); 730 } 731 } 732 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 733 } 734 735 /** 736 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 737 * @mrioc: Adapter instance reference 738 * @handle: Device handle 739 * 740 * Accessor to retrieve target device from the device handle. 741 * Non Lock version 742 * 743 * Return: Target device reference. 744 */ 745 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 746 struct mpi3mr_ioc *mrioc, u16 handle) 747 { 748 struct mpi3mr_tgt_dev *tgtdev; 749 750 assert_spin_locked(&mrioc->tgtdev_lock); 751 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 752 if (tgtdev->dev_handle == handle) 753 goto found_tgtdev; 754 return NULL; 755 756 found_tgtdev: 757 mpi3mr_tgtdev_get(tgtdev); 758 return tgtdev; 759 } 760 761 /** 762 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 763 * @mrioc: Adapter instance reference 764 * @handle: Device handle 765 * 766 * Accessor to retrieve target device from the device handle. 767 * Lock version 768 * 769 * Return: Target device reference. 770 */ 771 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 772 struct mpi3mr_ioc *mrioc, u16 handle) 773 { 774 struct mpi3mr_tgt_dev *tgtdev; 775 unsigned long flags; 776 777 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 778 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 779 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 780 return tgtdev; 781 } 782 783 /** 784 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 785 * @mrioc: Adapter instance reference 786 * @persist_id: Persistent ID 787 * 788 * Accessor to retrieve target device from the Persistent ID. 789 * Non Lock version 790 * 791 * Return: Target device reference. 792 */ 793 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 794 struct mpi3mr_ioc *mrioc, u16 persist_id) 795 { 796 struct mpi3mr_tgt_dev *tgtdev; 797 798 assert_spin_locked(&mrioc->tgtdev_lock); 799 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 800 if (tgtdev->perst_id == persist_id) 801 goto found_tgtdev; 802 return NULL; 803 804 found_tgtdev: 805 mpi3mr_tgtdev_get(tgtdev); 806 return tgtdev; 807 } 808 809 /** 810 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 811 * @mrioc: Adapter instance reference 812 * @persist_id: Persistent ID 813 * 814 * Accessor to retrieve target device from the Persistent ID. 815 * Lock version 816 * 817 * Return: Target device reference. 818 */ 819 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 820 struct mpi3mr_ioc *mrioc, u16 persist_id) 821 { 822 struct mpi3mr_tgt_dev *tgtdev; 823 unsigned long flags; 824 825 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 826 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 827 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 828 return tgtdev; 829 } 830 831 /** 832 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 833 * @mrioc: Adapter instance reference 834 * @tgt_priv: Target private data 835 * 836 * Accessor to return target device from the target private 837 * data. Non Lock version 838 * 839 * Return: Target device reference. 840 */ 841 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 842 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 843 { 844 struct mpi3mr_tgt_dev *tgtdev; 845 846 assert_spin_locked(&mrioc->tgtdev_lock); 847 tgtdev = tgt_priv->tgt_dev; 848 if (tgtdev) 849 mpi3mr_tgtdev_get(tgtdev); 850 return tgtdev; 851 } 852 853 /** 854 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 855 * @mrioc: Adapter instance reference 856 * @tg: Throttle group information pointer 857 * @divert_value: 1 or 0 858 * 859 * Accessor to set io_divert flag for each device associated 860 * with the given throttle group with the given value. 861 * 862 * Return: None. 863 */ 864 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 865 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 866 { 867 unsigned long flags; 868 struct mpi3mr_tgt_dev *tgtdev; 869 struct mpi3mr_stgt_priv_data *tgt_priv; 870 871 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 872 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 873 if (tgtdev->starget && tgtdev->starget->hostdata) { 874 tgt_priv = tgtdev->starget->hostdata; 875 if (tgt_priv->throttle_group == tg) 876 tgt_priv->io_divert = divert_value; 877 } 878 } 879 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 880 } 881 882 /** 883 * mpi3mr_print_device_event_notice - print notice related to post processing of 884 * device event after controller reset. 885 * 886 * @mrioc: Adapter instance reference 887 * @device_add: true for device add event and false for device removal event 888 * 889 * Return: None. 890 */ 891 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 892 bool device_add) 893 { 894 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 895 (device_add ? "addition" : "removal")); 896 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 897 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 898 } 899 900 /** 901 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 902 * @mrioc: Adapter instance reference 903 * @tgtdev: Target device structure 904 * 905 * Checks whether the device is exposed to upper layers and if it 906 * is then remove the device from upper layers by calling 907 * scsi_remove_target(). 908 * 909 * Return: 0 on success, non zero on failure. 910 */ 911 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 912 struct mpi3mr_tgt_dev *tgtdev) 913 { 914 struct mpi3mr_stgt_priv_data *tgt_priv; 915 916 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 917 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 918 if (tgtdev->starget && tgtdev->starget->hostdata) { 919 tgt_priv = tgtdev->starget->hostdata; 920 atomic_set(&tgt_priv->block_io, 0); 921 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 922 } 923 924 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 925 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 926 if (tgtdev->starget) { 927 if (mrioc->current_event) 928 mrioc->current_event->pending_at_sml = 1; 929 scsi_remove_target(&tgtdev->starget->dev); 930 tgtdev->host_exposed = 0; 931 if (mrioc->current_event) { 932 mrioc->current_event->pending_at_sml = 0; 933 if (mrioc->current_event->discard) { 934 mpi3mr_print_device_event_notice(mrioc, 935 false); 936 return; 937 } 938 } 939 } 940 } else 941 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 942 mpi3mr_global_trigger(mrioc, 943 MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); 944 945 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 946 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 947 } 948 949 /** 950 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 951 * @mrioc: Adapter instance reference 952 * @perst_id: Persistent ID of the device 953 * 954 * Checks whether the device can be exposed to upper layers and 955 * if it is not then expose the device to upper layers by 956 * calling scsi_scan_target(). 957 * 958 * Return: 0 on success, non zero on failure. 959 */ 960 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 961 u16 perst_id) 962 { 963 int retval = 0; 964 struct mpi3mr_tgt_dev *tgtdev; 965 966 if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 967 return -1; 968 969 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 970 if (!tgtdev) { 971 retval = -1; 972 goto out; 973 } 974 if (tgtdev->is_hidden || tgtdev->host_exposed) { 975 retval = -1; 976 goto out; 977 } 978 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 979 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 980 tgtdev->host_exposed = 1; 981 if (mrioc->current_event) 982 mrioc->current_event->pending_at_sml = 1; 983 scsi_scan_target(&mrioc->shost->shost_gendev, 984 mrioc->scsi_device_channel, tgtdev->perst_id, 985 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 986 if (!tgtdev->starget) 987 tgtdev->host_exposed = 0; 988 if (mrioc->current_event) { 989 mrioc->current_event->pending_at_sml = 0; 990 if (mrioc->current_event->discard) { 991 mpi3mr_print_device_event_notice(mrioc, true); 992 goto out; 993 } 994 } 995 dprint_event_bh(mrioc, 996 "exposed target device with handle(0x%04x), perst_id(%d)\n", 997 tgtdev->dev_handle, perst_id); 998 goto out; 999 } else 1000 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 1001 out: 1002 if (tgtdev) 1003 mpi3mr_tgtdev_put(tgtdev); 1004 1005 return retval; 1006 } 1007 1008 /** 1009 * mpi3mr_change_queue_depth- Change QD callback handler 1010 * @sdev: SCSI device reference 1011 * @q_depth: Queue depth 1012 * 1013 * Validate and limit QD and call scsi_change_queue_depth. 1014 * 1015 * Return: return value of scsi_change_queue_depth 1016 */ 1017 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 1018 int q_depth) 1019 { 1020 struct scsi_target *starget = scsi_target(sdev); 1021 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1022 int retval = 0; 1023 1024 if (!sdev->tagged_supported) 1025 q_depth = 1; 1026 if (q_depth > shost->can_queue) 1027 q_depth = shost->can_queue; 1028 else if (!q_depth) 1029 q_depth = MPI3MR_DEFAULT_SDEV_QD; 1030 retval = scsi_change_queue_depth(sdev, q_depth); 1031 sdev->max_queue_depth = sdev->queue_depth; 1032 1033 return retval; 1034 } 1035 1036 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, 1037 struct queue_limits *lim) 1038 { 1039 u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; 1040 1041 lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; 1042 lim->virt_boundary_mask = (1 << pgsz) - 1; 1043 } 1044 1045 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, 1046 struct queue_limits *lim) 1047 { 1048 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && 1049 (tgt_dev->dev_spec.pcie_inf.dev_info & 1050 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1051 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) 1052 mpi3mr_configure_nvme_dev(tgt_dev, lim); 1053 } 1054 1055 /** 1056 * mpi3mr_update_sdev - Update SCSI device information 1057 * @sdev: SCSI device reference 1058 * @data: target device reference 1059 * 1060 * This is an iterator function called for each SCSI device in a 1061 * target to update the target specific information into each 1062 * SCSI device. 1063 * 1064 * Return: Nothing. 1065 */ 1066 static void 1067 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1068 { 1069 struct mpi3mr_tgt_dev *tgtdev; 1070 struct queue_limits lim; 1071 1072 tgtdev = (struct mpi3mr_tgt_dev *)data; 1073 if (!tgtdev) 1074 return; 1075 1076 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1077 1078 lim = queue_limits_start_update(sdev->request_queue); 1079 mpi3mr_configure_tgt_dev(tgtdev, &lim); 1080 WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); 1081 } 1082 1083 /** 1084 * mpi3mr_refresh_tgtdevs - Refresh target device exposure 1085 * @mrioc: Adapter instance reference 1086 * 1087 * This is executed post controller reset to identify any 1088 * missing devices during reset and remove from the upper layers 1089 * or expose any newly detected device to the upper layers. 1090 * 1091 * Return: Nothing. 1092 */ 1093 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1094 { 1095 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1096 struct mpi3mr_stgt_priv_data *tgt_priv; 1097 1098 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1099 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1100 list) { 1101 if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || 1102 tgtdev->is_hidden) && 1103 tgtdev->host_exposed && tgtdev->starget && 1104 tgtdev->starget->hostdata) { 1105 tgt_priv = tgtdev->starget->hostdata; 1106 tgt_priv->dev_removed = 1; 1107 atomic_set(&tgt_priv->block_io, 0); 1108 } 1109 } 1110 1111 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1112 list) { 1113 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1114 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1115 tgtdev->perst_id); 1116 if (tgtdev->host_exposed) 1117 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1118 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1119 mpi3mr_tgtdev_put(tgtdev); 1120 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1121 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1122 tgtdev->perst_id); 1123 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1124 } 1125 } 1126 1127 tgtdev = NULL; 1128 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1129 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1130 !tgtdev->is_hidden) { 1131 if (!tgtdev->host_exposed) 1132 mpi3mr_report_tgtdev_to_host(mrioc, 1133 tgtdev->perst_id); 1134 else if (tgtdev->starget) 1135 starget_for_each_device(tgtdev->starget, 1136 (void *)tgtdev, mpi3mr_update_sdev); 1137 } 1138 } 1139 } 1140 1141 /** 1142 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1143 * @mrioc: Adapter instance reference 1144 * @tgtdev: Target device internal structure 1145 * @dev_pg0: New device page0 1146 * @is_added: Flag to indicate the device is just added 1147 * 1148 * Update the information from the device page0 into the driver 1149 * cached target device structure. 1150 * 1151 * Return: Nothing. 1152 */ 1153 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1154 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1155 bool is_added) 1156 { 1157 u16 flags = 0; 1158 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1159 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1160 u8 prot_mask = 0; 1161 1162 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1163 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1164 tgtdev->dev_type = dev_pg0->device_form; 1165 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1166 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1167 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1168 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1169 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1170 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1171 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1172 1173 if (tgtdev->encl_handle) 1174 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1175 tgtdev->encl_handle); 1176 if (enclosure_dev) 1177 tgtdev->enclosure_logical_id = le64_to_cpu( 1178 enclosure_dev->pg0.enclosure_logical_id); 1179 1180 flags = tgtdev->devpg0_flag; 1181 1182 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1183 1184 if (is_added == true) 1185 tgtdev->io_throttle_enabled = 1186 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1187 1188 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1189 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1190 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1191 break; 1192 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1193 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1194 break; 1195 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1196 default: 1197 tgtdev->wslen = 0; 1198 break; 1199 } 1200 1201 if (tgtdev->starget && tgtdev->starget->hostdata) { 1202 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1203 tgtdev->starget->hostdata; 1204 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1205 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1206 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1207 scsi_tgt_priv_data->io_throttle_enabled = 1208 tgtdev->io_throttle_enabled; 1209 if (is_added == true) 1210 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1211 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1212 } 1213 1214 switch (dev_pg0->access_status) { 1215 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1216 case MPI3_DEVICE0_ASTATUS_PREPARE: 1217 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1218 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1219 break; 1220 default: 1221 tgtdev->is_hidden = 1; 1222 break; 1223 } 1224 1225 switch (tgtdev->dev_type) { 1226 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1227 { 1228 struct mpi3_device0_sas_sata_format *sasinf = 1229 &dev_pg0->device_specific.sas_sata_format; 1230 u16 dev_info = le16_to_cpu(sasinf->device_info); 1231 1232 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1233 tgtdev->dev_spec.sas_sata_inf.sas_address = 1234 le64_to_cpu(sasinf->sas_address); 1235 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1236 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1237 sasinf->attached_phy_identifier; 1238 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1239 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1240 tgtdev->is_hidden = 1; 1241 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1242 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1243 tgtdev->is_hidden = 1; 1244 1245 if (((tgtdev->devpg0_flag & 1246 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1247 && (tgtdev->devpg0_flag & 1248 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1249 (tgtdev->parent_handle == 0xFFFF)) 1250 tgtdev->non_stl = 1; 1251 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1252 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1253 dev_pg0->io_unit_port; 1254 break; 1255 } 1256 case MPI3_DEVICE_DEVFORM_PCIE: 1257 { 1258 struct mpi3_device0_pcie_format *pcieinf = 1259 &dev_pg0->device_specific.pcie_format; 1260 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1261 1262 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1263 tgtdev->dev_spec.pcie_inf.capb = 1264 le32_to_cpu(pcieinf->capabilities); 1265 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1266 /* 2^12 = 4096 */ 1267 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1268 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1269 tgtdev->dev_spec.pcie_inf.mdts = 1270 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1271 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1272 tgtdev->dev_spec.pcie_inf.reset_to = 1273 max_t(u8, pcieinf->controller_reset_to, 1274 MPI3MR_INTADMCMD_TIMEOUT); 1275 tgtdev->dev_spec.pcie_inf.abort_to = 1276 max_t(u8, pcieinf->nvme_abort_to, 1277 MPI3MR_INTADMCMD_TIMEOUT); 1278 } 1279 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1280 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1281 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1282 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1283 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1284 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1285 tgtdev->is_hidden = 1; 1286 tgtdev->non_stl = 1; 1287 if (!mrioc->shost) 1288 break; 1289 prot_mask = scsi_host_get_prot(mrioc->shost); 1290 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1291 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1292 ioc_info(mrioc, 1293 "%s : Disabling DIX0 prot capability\n", __func__); 1294 ioc_info(mrioc, 1295 "because HBA does not support DIX0 operation on NVME drives\n"); 1296 } 1297 break; 1298 } 1299 case MPI3_DEVICE_DEVFORM_VD: 1300 { 1301 struct mpi3_device0_vd_format *vdinf = 1302 &dev_pg0->device_specific.vd_format; 1303 struct mpi3mr_throttle_group_info *tg = NULL; 1304 u16 vdinf_io_throttle_group = 1305 le16_to_cpu(vdinf->io_throttle_group); 1306 1307 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1308 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1309 tgtdev->is_hidden = 1; 1310 tgtdev->non_stl = 1; 1311 tgtdev->dev_spec.vd_inf.reset_to = 1312 max_t(u8, vdinf->vd_reset_to, 1313 MPI3MR_INTADMCMD_TIMEOUT); 1314 tgtdev->dev_spec.vd_inf.abort_to = 1315 max_t(u8, vdinf->vd_abort_to, 1316 MPI3MR_INTADMCMD_TIMEOUT); 1317 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1318 tgtdev->dev_spec.vd_inf.tg_high = 1319 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1320 tgtdev->dev_spec.vd_inf.tg_low = 1321 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1322 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1323 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1324 tg->id = vdinf_io_throttle_group; 1325 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1326 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1327 tg->qd_reduction = 1328 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1329 if (is_added == true) 1330 tg->fw_qd = tgtdev->q_depth; 1331 tg->modified_qd = tgtdev->q_depth; 1332 } 1333 tgtdev->dev_spec.vd_inf.tg = tg; 1334 if (scsi_tgt_priv_data) 1335 scsi_tgt_priv_data->throttle_group = tg; 1336 break; 1337 } 1338 default: 1339 break; 1340 } 1341 } 1342 1343 /** 1344 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1345 * @mrioc: Adapter instance reference 1346 * @fwevt: Firmware event information. 1347 * 1348 * Process Device status Change event and based on device's new 1349 * information, either expose the device to the upper layers, or 1350 * remove the device from upper layers. 1351 * 1352 * Return: Nothing. 1353 */ 1354 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1355 struct mpi3mr_fwevt *fwevt) 1356 { 1357 u16 dev_handle = 0; 1358 u8 uhide = 0, delete = 0, cleanup = 0; 1359 struct mpi3mr_tgt_dev *tgtdev = NULL; 1360 struct mpi3_event_data_device_status_change *evtdata = 1361 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1362 1363 dev_handle = le16_to_cpu(evtdata->dev_handle); 1364 dprint_event_bh(mrioc, 1365 "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", 1366 dev_handle, evtdata->reason_code); 1367 switch (evtdata->reason_code) { 1368 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1369 delete = 1; 1370 break; 1371 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1372 uhide = 1; 1373 break; 1374 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1375 delete = 1; 1376 cleanup = 1; 1377 break; 1378 default: 1379 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1380 evtdata->reason_code); 1381 break; 1382 } 1383 1384 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1385 if (!tgtdev) { 1386 dprint_event_bh(mrioc, 1387 "processing device status change event bottom half,\n" 1388 "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", 1389 dev_handle, evtdata->reason_code); 1390 goto out; 1391 } 1392 if (uhide) { 1393 tgtdev->is_hidden = 0; 1394 if (!tgtdev->host_exposed) 1395 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1396 } 1397 1398 if (delete) 1399 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1400 1401 if (cleanup) { 1402 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1403 mpi3mr_tgtdev_put(tgtdev); 1404 } 1405 1406 out: 1407 if (tgtdev) 1408 mpi3mr_tgtdev_put(tgtdev); 1409 } 1410 1411 /** 1412 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1413 * @mrioc: Adapter instance reference 1414 * @dev_pg0: New device page0 1415 * 1416 * Process Device Info Change event and based on device's new 1417 * information, either expose the device to the upper layers, or 1418 * remove the device from upper layers or update the details of 1419 * the device. 1420 * 1421 * Return: Nothing. 1422 */ 1423 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1424 struct mpi3_device_page0 *dev_pg0) 1425 { 1426 struct mpi3mr_tgt_dev *tgtdev = NULL; 1427 u16 dev_handle = 0, perst_id = 0; 1428 1429 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1430 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1431 dprint_event_bh(mrioc, 1432 "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", 1433 dev_handle, perst_id); 1434 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1435 if (!tgtdev) { 1436 dprint_event_bh(mrioc, 1437 "cannot identify target device for device info\n" 1438 "change event handle(0x%04x), perst_id(%d)\n", 1439 dev_handle, perst_id); 1440 goto out; 1441 } 1442 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1443 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1444 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1445 if (tgtdev->is_hidden && tgtdev->host_exposed) 1446 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1447 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1448 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1449 mpi3mr_update_sdev); 1450 out: 1451 if (tgtdev) 1452 mpi3mr_tgtdev_put(tgtdev); 1453 } 1454 1455 /** 1456 * mpi3mr_free_enclosure_list - release enclosures 1457 * @mrioc: Adapter instance reference 1458 * 1459 * Free memory allocated during encloure add. 1460 * 1461 * Return nothing. 1462 */ 1463 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1464 { 1465 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1466 1467 list_for_each_entry_safe(enclosure_dev, 1468 enclosure_dev_next, &mrioc->enclosure_list, list) { 1469 list_del(&enclosure_dev->list); 1470 kfree(enclosure_dev); 1471 } 1472 } 1473 1474 /** 1475 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1476 * @mrioc: Adapter instance reference 1477 * @handle: Firmware device handle of the enclosure 1478 * 1479 * This searches for enclosure device based on handle, then returns the 1480 * enclosure object. 1481 * 1482 * Return: Enclosure object reference or NULL 1483 */ 1484 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1485 struct mpi3mr_ioc *mrioc, u16 handle) 1486 { 1487 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1488 1489 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1490 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1491 continue; 1492 r = enclosure_dev; 1493 goto out; 1494 } 1495 out: 1496 return r; 1497 } 1498 1499 /** 1500 * mpi3mr_process_trigger_data_event_bh - Process trigger event 1501 * data 1502 * @mrioc: Adapter instance reference 1503 * @event_data: Event data 1504 * 1505 * This function releases diage buffers or issues diag fault 1506 * based on trigger conditions 1507 * 1508 * Return: Nothing 1509 */ 1510 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, 1511 struct trigger_event_data *event_data) 1512 { 1513 struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; 1514 struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; 1515 unsigned long flags; 1516 int retval = 0; 1517 u8 trigger_type = event_data->trigger_type; 1518 union mpi3mr_trigger_data *trigger_data = 1519 &event_data->trigger_specific_data; 1520 1521 if (event_data->snapdump) { 1522 if (trace_hdb) 1523 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1524 trigger_data, 1); 1525 if (fw_hdb) 1526 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1527 trigger_data, 1); 1528 mpi3mr_soft_reset_handler(mrioc, 1529 MPI3MR_RESET_FROM_TRIGGER, 1); 1530 return; 1531 } 1532 1533 if (trace_hdb) { 1534 retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); 1535 if (!retval) { 1536 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1537 trigger_data, 1); 1538 } 1539 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1540 mrioc->trace_release_trigger_active = false; 1541 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1542 } 1543 if (fw_hdb) { 1544 retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); 1545 if (!retval) { 1546 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1547 trigger_data, 1); 1548 } 1549 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1550 mrioc->fw_release_trigger_active = false; 1551 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1552 } 1553 } 1554 1555 /** 1556 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1557 * @mrioc: Adapter instance reference 1558 * @encl_pg0: Enclosure page 0. 1559 * @is_added: Added event or not 1560 * 1561 * Return nothing. 1562 */ 1563 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1564 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1565 { 1566 char *reason_str = NULL; 1567 1568 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1569 return; 1570 1571 if (is_added) 1572 reason_str = "enclosure added"; 1573 else 1574 reason_str = "enclosure dev status changed"; 1575 1576 ioc_info(mrioc, 1577 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1578 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1579 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1580 ioc_info(mrioc, 1581 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1582 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1583 le16_to_cpu(encl_pg0->flags), 1584 ((le16_to_cpu(encl_pg0->flags) & 1585 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1586 } 1587 1588 /** 1589 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1590 * @mrioc: Adapter instance reference 1591 * @fwevt: Firmware event reference 1592 * 1593 * Prints information about the Enclosure device status or 1594 * Enclosure add events if logging is enabled and add or remove 1595 * the enclosure from the controller's internal list of 1596 * enclosures. 1597 * 1598 * Return: Nothing. 1599 */ 1600 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1601 struct mpi3mr_fwevt *fwevt) 1602 { 1603 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1604 struct mpi3_enclosure_page0 *encl_pg0; 1605 u16 encl_handle; 1606 u8 added, present; 1607 1608 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1609 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1610 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1611 1612 1613 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1614 present = ((le16_to_cpu(encl_pg0->flags) & 1615 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1616 1617 if (encl_handle) 1618 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1619 encl_handle); 1620 if (!enclosure_dev && present) { 1621 enclosure_dev = 1622 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1623 GFP_KERNEL); 1624 if (!enclosure_dev) 1625 return; 1626 list_add_tail(&enclosure_dev->list, 1627 &mrioc->enclosure_list); 1628 } 1629 if (enclosure_dev) { 1630 if (!present) { 1631 list_del(&enclosure_dev->list); 1632 kfree(enclosure_dev); 1633 } else 1634 memcpy(&enclosure_dev->pg0, encl_pg0, 1635 sizeof(enclosure_dev->pg0)); 1636 1637 } 1638 } 1639 1640 /** 1641 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1642 * @mrioc: Adapter instance reference 1643 * @event_data: SAS topology change list event data 1644 * 1645 * Prints information about the SAS topology change event. 1646 * 1647 * Return: Nothing. 1648 */ 1649 static void 1650 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1651 struct mpi3_event_data_sas_topology_change_list *event_data) 1652 { 1653 int i; 1654 u16 handle; 1655 u8 reason_code, phy_number; 1656 char *status_str = NULL; 1657 u8 link_rate, prev_link_rate; 1658 1659 switch (event_data->exp_status) { 1660 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1661 status_str = "remove"; 1662 break; 1663 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1664 status_str = "responding"; 1665 break; 1666 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1667 status_str = "remove delay"; 1668 break; 1669 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1670 status_str = "direct attached"; 1671 break; 1672 default: 1673 status_str = "unknown status"; 1674 break; 1675 } 1676 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1677 __func__, status_str); 1678 ioc_info(mrioc, 1679 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1680 __func__, le16_to_cpu(event_data->expander_dev_handle), 1681 event_data->io_unit_port, 1682 le16_to_cpu(event_data->enclosure_handle), 1683 event_data->start_phy_num, event_data->num_entries); 1684 for (i = 0; i < event_data->num_entries; i++) { 1685 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1686 if (!handle) 1687 continue; 1688 phy_number = event_data->start_phy_num + i; 1689 reason_code = event_data->phy_entry[i].status & 1690 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1691 switch (reason_code) { 1692 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1693 status_str = "target remove"; 1694 break; 1695 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1696 status_str = "delay target remove"; 1697 break; 1698 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1699 status_str = "link status change"; 1700 break; 1701 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1702 status_str = "link status no change"; 1703 break; 1704 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1705 status_str = "target responding"; 1706 break; 1707 default: 1708 status_str = "unknown"; 1709 break; 1710 } 1711 link_rate = event_data->phy_entry[i].link_rate >> 4; 1712 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1713 ioc_info(mrioc, 1714 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1715 __func__, phy_number, handle, status_str, link_rate, 1716 prev_link_rate); 1717 } 1718 } 1719 1720 /** 1721 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1722 * @mrioc: Adapter instance reference 1723 * @fwevt: Firmware event reference 1724 * 1725 * Prints information about the SAS topology change event and 1726 * for "not responding" event code, removes the device from the 1727 * upper layers. 1728 * 1729 * Return: Nothing. 1730 */ 1731 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1732 struct mpi3mr_fwevt *fwevt) 1733 { 1734 struct mpi3_event_data_sas_topology_change_list *event_data = 1735 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1736 int i; 1737 u16 handle; 1738 u8 reason_code; 1739 u64 exp_sas_address = 0, parent_sas_address = 0; 1740 struct mpi3mr_hba_port *hba_port = NULL; 1741 struct mpi3mr_tgt_dev *tgtdev = NULL; 1742 struct mpi3mr_sas_node *sas_expander = NULL; 1743 unsigned long flags; 1744 u8 link_rate, prev_link_rate, parent_phy_number; 1745 1746 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1747 if (mrioc->sas_transport_enabled) { 1748 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1749 event_data->io_unit_port); 1750 if (le16_to_cpu(event_data->expander_dev_handle)) { 1751 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1752 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1753 le16_to_cpu(event_data->expander_dev_handle)); 1754 if (sas_expander) { 1755 exp_sas_address = sas_expander->sas_address; 1756 hba_port = sas_expander->hba_port; 1757 } 1758 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1759 parent_sas_address = exp_sas_address; 1760 } else 1761 parent_sas_address = mrioc->sas_hba.sas_address; 1762 } 1763 1764 for (i = 0; i < event_data->num_entries; i++) { 1765 if (fwevt->discard) 1766 return; 1767 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1768 if (!handle) 1769 continue; 1770 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1771 if (!tgtdev) 1772 continue; 1773 1774 reason_code = event_data->phy_entry[i].status & 1775 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1776 1777 switch (reason_code) { 1778 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1779 if (tgtdev->host_exposed) 1780 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1781 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1782 mpi3mr_tgtdev_put(tgtdev); 1783 break; 1784 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1785 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1786 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1787 { 1788 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1789 || tgtdev->is_hidden) 1790 break; 1791 link_rate = event_data->phy_entry[i].link_rate >> 4; 1792 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1793 if (link_rate == prev_link_rate) 1794 break; 1795 if (!parent_sas_address) 1796 break; 1797 parent_phy_number = event_data->start_phy_num + i; 1798 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1799 parent_phy_number, link_rate, hba_port); 1800 break; 1801 } 1802 default: 1803 break; 1804 } 1805 if (tgtdev) 1806 mpi3mr_tgtdev_put(tgtdev); 1807 } 1808 1809 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1810 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1811 if (sas_expander) 1812 mpi3mr_expander_remove(mrioc, exp_sas_address, 1813 hba_port); 1814 } 1815 } 1816 1817 /** 1818 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1819 * @mrioc: Adapter instance reference 1820 * @event_data: PCIe topology change list event data 1821 * 1822 * Prints information about the PCIe topology change event. 1823 * 1824 * Return: Nothing. 1825 */ 1826 static void 1827 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1828 struct mpi3_event_data_pcie_topology_change_list *event_data) 1829 { 1830 int i; 1831 u16 handle; 1832 u16 reason_code; 1833 u8 port_number; 1834 char *status_str = NULL; 1835 u8 link_rate, prev_link_rate; 1836 1837 switch (event_data->switch_status) { 1838 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1839 status_str = "remove"; 1840 break; 1841 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1842 status_str = "responding"; 1843 break; 1844 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1845 status_str = "remove delay"; 1846 break; 1847 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1848 status_str = "direct attached"; 1849 break; 1850 default: 1851 status_str = "unknown status"; 1852 break; 1853 } 1854 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1855 __func__, status_str); 1856 ioc_info(mrioc, 1857 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1858 __func__, le16_to_cpu(event_data->switch_dev_handle), 1859 le16_to_cpu(event_data->enclosure_handle), 1860 event_data->start_port_num, event_data->num_entries); 1861 for (i = 0; i < event_data->num_entries; i++) { 1862 handle = 1863 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1864 if (!handle) 1865 continue; 1866 port_number = event_data->start_port_num + i; 1867 reason_code = event_data->port_entry[i].port_status; 1868 switch (reason_code) { 1869 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1870 status_str = "target remove"; 1871 break; 1872 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1873 status_str = "delay target remove"; 1874 break; 1875 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1876 status_str = "link status change"; 1877 break; 1878 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1879 status_str = "link status no change"; 1880 break; 1881 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1882 status_str = "target responding"; 1883 break; 1884 default: 1885 status_str = "unknown"; 1886 break; 1887 } 1888 link_rate = event_data->port_entry[i].current_port_info & 1889 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1890 prev_link_rate = event_data->port_entry[i].previous_port_info & 1891 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1892 ioc_info(mrioc, 1893 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1894 __func__, port_number, handle, status_str, link_rate, 1895 prev_link_rate); 1896 } 1897 } 1898 1899 /** 1900 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1901 * @mrioc: Adapter instance reference 1902 * @fwevt: Firmware event reference 1903 * 1904 * Prints information about the PCIe topology change event and 1905 * for "not responding" event code, removes the device from the 1906 * upper layers. 1907 * 1908 * Return: Nothing. 1909 */ 1910 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1911 struct mpi3mr_fwevt *fwevt) 1912 { 1913 struct mpi3_event_data_pcie_topology_change_list *event_data = 1914 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1915 int i; 1916 u16 handle; 1917 u8 reason_code; 1918 struct mpi3mr_tgt_dev *tgtdev = NULL; 1919 1920 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1921 1922 for (i = 0; i < event_data->num_entries; i++) { 1923 if (fwevt->discard) 1924 return; 1925 handle = 1926 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1927 if (!handle) 1928 continue; 1929 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1930 if (!tgtdev) 1931 continue; 1932 1933 reason_code = event_data->port_entry[i].port_status; 1934 1935 switch (reason_code) { 1936 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1937 if (tgtdev->host_exposed) 1938 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1939 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1940 mpi3mr_tgtdev_put(tgtdev); 1941 break; 1942 default: 1943 break; 1944 } 1945 if (tgtdev) 1946 mpi3mr_tgtdev_put(tgtdev); 1947 } 1948 } 1949 1950 /** 1951 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1952 * @mrioc: Adapter instance reference 1953 * @fwevt: Firmware event reference 1954 * 1955 * Extracts the event data and calls application interfacing 1956 * function to process the event further. 1957 * 1958 * Return: Nothing. 1959 */ 1960 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1961 struct mpi3mr_fwevt *fwevt) 1962 { 1963 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1964 fwevt->event_data_size); 1965 } 1966 1967 /** 1968 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1969 * @sdev: SCSI device reference 1970 * @data: Queue depth reference 1971 * 1972 * This is an iterator function called for each SCSI device in a 1973 * target to update the QD of each SCSI device. 1974 * 1975 * Return: Nothing. 1976 */ 1977 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1978 { 1979 u16 *q_depth = (u16 *)data; 1980 1981 scsi_change_queue_depth(sdev, (int)*q_depth); 1982 sdev->max_queue_depth = sdev->queue_depth; 1983 } 1984 1985 /** 1986 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1987 * @mrioc: Adapter instance reference 1988 * @tg: Throttle group information pointer 1989 * 1990 * Accessor to reduce QD for each device associated with the 1991 * given throttle group. 1992 * 1993 * Return: None. 1994 */ 1995 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1996 struct mpi3mr_throttle_group_info *tg) 1997 { 1998 unsigned long flags; 1999 struct mpi3mr_tgt_dev *tgtdev; 2000 struct mpi3mr_stgt_priv_data *tgt_priv; 2001 2002 2003 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2004 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 2005 if (tgtdev->starget && tgtdev->starget->hostdata) { 2006 tgt_priv = tgtdev->starget->hostdata; 2007 if (tgt_priv->throttle_group == tg) { 2008 dprint_event_bh(mrioc, 2009 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 2010 tgt_priv->perst_id, tgtdev->q_depth, 2011 tg->modified_qd); 2012 starget_for_each_device(tgtdev->starget, 2013 (void *)&tg->modified_qd, 2014 mpi3mr_update_sdev_qd); 2015 } 2016 } 2017 } 2018 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2019 } 2020 2021 /** 2022 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 2023 * @mrioc: Adapter instance reference 2024 * @fwevt: Firmware event reference 2025 * 2026 * Identifies the firmware event and calls corresponding bottomg 2027 * half handler and sends event acknowledgment if required. 2028 * 2029 * Return: Nothing. 2030 */ 2031 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 2032 struct mpi3mr_fwevt *fwevt) 2033 { 2034 struct mpi3_device_page0 *dev_pg0 = NULL; 2035 u16 perst_id, handle, dev_info; 2036 struct mpi3_device0_sas_sata_format *sasinf = NULL; 2037 unsigned int timeout; 2038 2039 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 2040 mrioc->current_event = fwevt; 2041 2042 if (mrioc->stop_drv_processing) { 2043 dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" 2044 "due to stop_drv_processing\n", fwevt->event_id); 2045 goto out; 2046 } 2047 2048 if (mrioc->unrecoverable) { 2049 dprint_event_bh(mrioc, 2050 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 2051 fwevt->event_id); 2052 goto out; 2053 } 2054 2055 if (!fwevt->process_evt) 2056 goto evt_ack; 2057 2058 dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n", 2059 fwevt->event_id, fwevt->evt_ctx); 2060 2061 switch (fwevt->event_id) { 2062 case MPI3_EVENT_DEVICE_ADDED: 2063 { 2064 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2065 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2066 handle = le16_to_cpu(dev_pg0->dev_handle); 2067 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2068 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 2069 else if (mrioc->sas_transport_enabled && 2070 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 2071 sasinf = &dev_pg0->device_specific.sas_sata_format; 2072 dev_info = le16_to_cpu(sasinf->device_info); 2073 if (!mrioc->sas_hba.num_phys) 2074 mpi3mr_sas_host_add(mrioc); 2075 else 2076 mpi3mr_sas_host_refresh(mrioc); 2077 2078 if (mpi3mr_is_expander_device(dev_info)) 2079 mpi3mr_expander_add(mrioc, handle); 2080 } 2081 break; 2082 } 2083 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2084 { 2085 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2086 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2087 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2088 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 2089 break; 2090 } 2091 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2092 { 2093 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 2094 break; 2095 } 2096 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2097 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2098 { 2099 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 2100 break; 2101 } 2102 2103 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2104 { 2105 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 2106 break; 2107 } 2108 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2109 { 2110 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 2111 break; 2112 } 2113 case MPI3_EVENT_LOG_DATA: 2114 { 2115 mpi3mr_logdata_evt_bh(mrioc, fwevt); 2116 break; 2117 } 2118 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 2119 { 2120 struct mpi3mr_throttle_group_info *tg; 2121 2122 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 2123 dprint_event_bh(mrioc, 2124 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 2125 tg->id, tg->need_qd_reduction); 2126 if (tg->need_qd_reduction) { 2127 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 2128 tg->need_qd_reduction = 0; 2129 } 2130 break; 2131 } 2132 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2133 { 2134 timeout = MPI3MR_RESET_TIMEOUT * 2; 2135 while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && 2136 !mrioc->unrecoverable && !mrioc->pci_err_recovery) { 2137 msleep(500); 2138 if (!timeout--) { 2139 mrioc->unrecoverable = 1; 2140 break; 2141 } 2142 } 2143 2144 if (mrioc->unrecoverable || mrioc->pci_err_recovery) 2145 break; 2146 2147 dprint_event_bh(mrioc, 2148 "scan for non responding and newly added devices after soft reset started\n"); 2149 if (mrioc->sas_transport_enabled) { 2150 mpi3mr_refresh_sas_ports(mrioc); 2151 mpi3mr_refresh_expanders(mrioc); 2152 } 2153 mpi3mr_refresh_tgtdevs(mrioc); 2154 ioc_info(mrioc, 2155 "scan for non responding and newly added devices after soft reset completed\n"); 2156 break; 2157 } 2158 case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: 2159 { 2160 mpi3mr_process_trigger_data_event_bh(mrioc, 2161 (struct trigger_event_data *)fwevt->event_data); 2162 break; 2163 } 2164 default: 2165 break; 2166 } 2167 2168 evt_ack: 2169 if (fwevt->send_ack) 2170 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2171 fwevt->evt_ctx); 2172 out: 2173 /* Put fwevt reference count to neutralize kref_init increment */ 2174 mpi3mr_fwevt_put(fwevt); 2175 mrioc->current_event = NULL; 2176 } 2177 2178 /** 2179 * mpi3mr_fwevt_worker - Firmware event worker 2180 * @work: Work struct containing firmware event 2181 * 2182 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2183 * 2184 * Return: Nothing. 2185 */ 2186 static void mpi3mr_fwevt_worker(struct work_struct *work) 2187 { 2188 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2189 work); 2190 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2191 /* 2192 * Put fwevt reference count after 2193 * dequeuing it from worker queue 2194 */ 2195 mpi3mr_fwevt_put(fwevt); 2196 } 2197 2198 /** 2199 * mpi3mr_create_tgtdev - Create and add a target device 2200 * @mrioc: Adapter instance reference 2201 * @dev_pg0: Device Page 0 data 2202 * 2203 * If the device specified by the device page 0 data is not 2204 * present in the driver's internal list, allocate the memory 2205 * for the device, populate the data and add to the list, else 2206 * update the device data. The key is persistent ID. 2207 * 2208 * Return: 0 on success, -ENOMEM on memory allocation failure 2209 */ 2210 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2211 struct mpi3_device_page0 *dev_pg0) 2212 { 2213 int retval = 0; 2214 struct mpi3mr_tgt_dev *tgtdev = NULL; 2215 u16 perst_id = 0; 2216 unsigned long flags; 2217 2218 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2219 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2220 return retval; 2221 2222 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2223 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2224 if (tgtdev) 2225 tgtdev->state = MPI3MR_DEV_CREATED; 2226 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2227 2228 if (tgtdev) { 2229 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2230 mpi3mr_tgtdev_put(tgtdev); 2231 } else { 2232 tgtdev = mpi3mr_alloc_tgtdev(); 2233 if (!tgtdev) 2234 return -ENOMEM; 2235 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2236 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2237 } 2238 2239 return retval; 2240 } 2241 2242 /** 2243 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2244 * @mrioc: Adapter instance reference 2245 * 2246 * Flush pending commands in the delayed lists due to a 2247 * controller reset or driver removal as a cleanup. 2248 * 2249 * Return: Nothing 2250 */ 2251 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2252 { 2253 struct delayed_dev_rmhs_node *_rmhs_node; 2254 struct delayed_evt_ack_node *_evtack_node; 2255 2256 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2257 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2258 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2259 struct delayed_dev_rmhs_node, list); 2260 list_del(&_rmhs_node->list); 2261 kfree(_rmhs_node); 2262 } 2263 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2264 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2265 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2266 struct delayed_evt_ack_node, list); 2267 list_del(&_evtack_node->list); 2268 kfree(_evtack_node); 2269 } 2270 } 2271 2272 /** 2273 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2274 * @mrioc: Adapter instance reference 2275 * @drv_cmd: Internal command tracker 2276 * 2277 * Issues a target reset TM to the firmware from the device 2278 * removal TM pend list or retry the removal handshake sequence 2279 * based on the IOU control request IOC status. 2280 * 2281 * Return: Nothing 2282 */ 2283 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2284 struct mpi3mr_drv_cmd *drv_cmd) 2285 { 2286 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2287 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2288 2289 if (drv_cmd->state & MPI3MR_CMD_RESET) 2290 goto clear_drv_cmd; 2291 2292 ioc_info(mrioc, 2293 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2294 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2295 drv_cmd->ioc_loginfo); 2296 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2297 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2298 drv_cmd->retry_count++; 2299 ioc_info(mrioc, 2300 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2301 __func__, drv_cmd->dev_handle, 2302 drv_cmd->retry_count); 2303 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2304 drv_cmd, drv_cmd->iou_rc); 2305 return; 2306 } 2307 ioc_err(mrioc, 2308 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2309 __func__, drv_cmd->dev_handle); 2310 } else { 2311 ioc_info(mrioc, 2312 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2313 __func__, drv_cmd->dev_handle); 2314 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2315 } 2316 2317 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2318 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2319 struct delayed_dev_rmhs_node, list); 2320 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2321 drv_cmd->retry_count = 0; 2322 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2323 ioc_info(mrioc, 2324 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2325 __func__, drv_cmd->dev_handle); 2326 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2327 drv_cmd->iou_rc); 2328 list_del(&delayed_dev_rmhs->list); 2329 kfree(delayed_dev_rmhs); 2330 return; 2331 } 2332 2333 clear_drv_cmd: 2334 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2335 drv_cmd->callback = NULL; 2336 drv_cmd->retry_count = 0; 2337 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2338 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2339 } 2340 2341 /** 2342 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2343 * @mrioc: Adapter instance reference 2344 * @drv_cmd: Internal command tracker 2345 * 2346 * Issues a target reset TM to the firmware from the device 2347 * removal TM pend list or issue IO unit control request as 2348 * part of device removal or hidden acknowledgment handshake. 2349 * 2350 * Return: Nothing 2351 */ 2352 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2353 struct mpi3mr_drv_cmd *drv_cmd) 2354 { 2355 struct mpi3_iounit_control_request iou_ctrl; 2356 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2357 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2358 int retval; 2359 2360 if (drv_cmd->state & MPI3MR_CMD_RESET) 2361 goto clear_drv_cmd; 2362 2363 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2364 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2365 2366 if (tm_reply) 2367 pr_info(IOCNAME 2368 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2369 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2370 drv_cmd->ioc_loginfo, 2371 le32_to_cpu(tm_reply->termination_count)); 2372 2373 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2374 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2375 2376 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2377 2378 drv_cmd->state = MPI3MR_CMD_PENDING; 2379 drv_cmd->is_waiting = 0; 2380 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2381 iou_ctrl.operation = drv_cmd->iou_rc; 2382 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2383 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2384 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2385 2386 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2387 1); 2388 if (retval) { 2389 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2390 mrioc->name); 2391 goto clear_drv_cmd; 2392 } 2393 2394 return; 2395 clear_drv_cmd: 2396 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2397 drv_cmd->callback = NULL; 2398 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2399 drv_cmd->retry_count = 0; 2400 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2401 } 2402 2403 /** 2404 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2405 * @mrioc: Adapter instance reference 2406 * @handle: Device handle 2407 * @cmdparam: Internal command tracker 2408 * @iou_rc: IO unit reason code 2409 * 2410 * Issues a target reset TM to the firmware or add it to a pend 2411 * list as part of device removal or hidden acknowledgment 2412 * handshake. 2413 * 2414 * Return: Nothing 2415 */ 2416 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2417 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2418 { 2419 struct mpi3_scsi_task_mgmt_request tm_req; 2420 int retval = 0; 2421 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2422 u8 retrycount = 5; 2423 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2424 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2425 struct mpi3mr_tgt_dev *tgtdev = NULL; 2426 unsigned long flags; 2427 2428 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2429 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2430 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2431 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2432 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2433 2434 if (drv_cmd) 2435 goto issue_cmd; 2436 do { 2437 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2438 MPI3MR_NUM_DEVRMCMD); 2439 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2440 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2441 break; 2442 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2443 } 2444 } while (retrycount--); 2445 2446 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2447 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2448 GFP_ATOMIC); 2449 if (!delayed_dev_rmhs) 2450 return; 2451 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2452 delayed_dev_rmhs->handle = handle; 2453 delayed_dev_rmhs->iou_rc = iou_rc; 2454 list_add_tail(&delayed_dev_rmhs->list, 2455 &mrioc->delayed_rmhs_list); 2456 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2457 __func__, handle); 2458 return; 2459 } 2460 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2461 2462 issue_cmd: 2463 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2464 ioc_info(mrioc, 2465 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2466 __func__, handle, cmd_idx); 2467 2468 memset(&tm_req, 0, sizeof(tm_req)); 2469 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2470 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2471 goto out; 2472 } 2473 drv_cmd->state = MPI3MR_CMD_PENDING; 2474 drv_cmd->is_waiting = 0; 2475 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2476 drv_cmd->dev_handle = handle; 2477 drv_cmd->iou_rc = iou_rc; 2478 tm_req.dev_handle = cpu_to_le16(handle); 2479 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2480 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2481 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2482 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2483 2484 set_bit(handle, mrioc->removepend_bitmap); 2485 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2486 if (retval) { 2487 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2488 __func__); 2489 goto out_failed; 2490 } 2491 out: 2492 return; 2493 out_failed: 2494 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2495 drv_cmd->callback = NULL; 2496 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2497 drv_cmd->retry_count = 0; 2498 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2499 } 2500 2501 /** 2502 * mpi3mr_complete_evt_ack - event ack request completion 2503 * @mrioc: Adapter instance reference 2504 * @drv_cmd: Internal command tracker 2505 * 2506 * This is the completion handler for non blocking event 2507 * acknowledgment sent to the firmware and this will issue any 2508 * pending event acknowledgment request. 2509 * 2510 * Return: Nothing 2511 */ 2512 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2513 struct mpi3mr_drv_cmd *drv_cmd) 2514 { 2515 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2516 struct delayed_evt_ack_node *delayed_evtack = NULL; 2517 2518 if (drv_cmd->state & MPI3MR_CMD_RESET) 2519 goto clear_drv_cmd; 2520 2521 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2522 dprint_event_th(mrioc, 2523 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2524 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2525 drv_cmd->ioc_loginfo); 2526 } 2527 2528 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2529 delayed_evtack = 2530 list_entry(mrioc->delayed_evtack_cmds_list.next, 2531 struct delayed_evt_ack_node, list); 2532 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2533 delayed_evtack->event_ctx); 2534 list_del(&delayed_evtack->list); 2535 kfree(delayed_evtack); 2536 return; 2537 } 2538 clear_drv_cmd: 2539 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2540 drv_cmd->callback = NULL; 2541 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2542 } 2543 2544 /** 2545 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2546 * @mrioc: Adapter instance reference 2547 * @event: MPI3 event id 2548 * @cmdparam: Internal command tracker 2549 * @event_ctx: event context 2550 * 2551 * Issues event acknowledgment request to the firmware if there 2552 * is a free command to send the event ack else it to a pend 2553 * list so that it will be processed on a completion of a prior 2554 * event acknowledgment . 2555 * 2556 * Return: Nothing 2557 */ 2558 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2559 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2560 { 2561 struct mpi3_event_ack_request evtack_req; 2562 int retval = 0; 2563 u8 retrycount = 5; 2564 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2565 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2566 struct delayed_evt_ack_node *delayed_evtack = NULL; 2567 2568 if (drv_cmd) { 2569 dprint_event_th(mrioc, 2570 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2571 event, event_ctx); 2572 goto issue_cmd; 2573 } 2574 dprint_event_th(mrioc, 2575 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2576 event, event_ctx); 2577 do { 2578 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2579 MPI3MR_NUM_EVTACKCMD); 2580 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2581 if (!test_and_set_bit(cmd_idx, 2582 mrioc->evtack_cmds_bitmap)) 2583 break; 2584 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2585 } 2586 } while (retrycount--); 2587 2588 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2589 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2590 GFP_ATOMIC); 2591 if (!delayed_evtack) 2592 return; 2593 INIT_LIST_HEAD(&delayed_evtack->list); 2594 delayed_evtack->event = event; 2595 delayed_evtack->event_ctx = event_ctx; 2596 list_add_tail(&delayed_evtack->list, 2597 &mrioc->delayed_evtack_cmds_list); 2598 dprint_event_th(mrioc, 2599 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2600 event, event_ctx); 2601 return; 2602 } 2603 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2604 2605 issue_cmd: 2606 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2607 2608 memset(&evtack_req, 0, sizeof(evtack_req)); 2609 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2610 dprint_event_th(mrioc, 2611 "sending event ack failed due to command in use\n"); 2612 goto out; 2613 } 2614 drv_cmd->state = MPI3MR_CMD_PENDING; 2615 drv_cmd->is_waiting = 0; 2616 drv_cmd->callback = mpi3mr_complete_evt_ack; 2617 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2618 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2619 evtack_req.event = event; 2620 evtack_req.event_context = cpu_to_le32(event_ctx); 2621 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2622 sizeof(evtack_req), 1); 2623 if (retval) { 2624 dprint_event_th(mrioc, 2625 "posting event ack request is failed\n"); 2626 goto out_failed; 2627 } 2628 2629 dprint_event_th(mrioc, 2630 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2631 event, event_ctx); 2632 out: 2633 return; 2634 out_failed: 2635 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2636 drv_cmd->callback = NULL; 2637 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2638 } 2639 2640 /** 2641 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2642 * @mrioc: Adapter instance reference 2643 * @event_reply: event data 2644 * 2645 * Checks for the reason code and based on that either block I/O 2646 * to device, or unblock I/O to the device, or start the device 2647 * removal handshake with reason as remove with the firmware for 2648 * PCIe devices. 2649 * 2650 * Return: Nothing 2651 */ 2652 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2653 struct mpi3_event_notification_reply *event_reply) 2654 { 2655 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2656 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2657 int i; 2658 u16 handle; 2659 u8 reason_code; 2660 struct mpi3mr_tgt_dev *tgtdev = NULL; 2661 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2662 2663 for (i = 0; i < topo_evt->num_entries; i++) { 2664 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2665 if (!handle) 2666 continue; 2667 reason_code = topo_evt->port_entry[i].port_status; 2668 scsi_tgt_priv_data = NULL; 2669 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2670 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2671 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2672 tgtdev->starget->hostdata; 2673 switch (reason_code) { 2674 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2675 if (scsi_tgt_priv_data) { 2676 scsi_tgt_priv_data->dev_removed = 1; 2677 scsi_tgt_priv_data->dev_removedelay = 0; 2678 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2679 } 2680 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2681 MPI3_CTRL_OP_REMOVE_DEVICE); 2682 break; 2683 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2684 if (scsi_tgt_priv_data) { 2685 scsi_tgt_priv_data->dev_removedelay = 1; 2686 atomic_inc(&scsi_tgt_priv_data->block_io); 2687 } 2688 break; 2689 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2690 if (scsi_tgt_priv_data && 2691 scsi_tgt_priv_data->dev_removedelay) { 2692 scsi_tgt_priv_data->dev_removedelay = 0; 2693 atomic_dec_if_positive 2694 (&scsi_tgt_priv_data->block_io); 2695 } 2696 break; 2697 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2698 default: 2699 break; 2700 } 2701 if (tgtdev) 2702 mpi3mr_tgtdev_put(tgtdev); 2703 } 2704 } 2705 2706 /** 2707 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2708 * @mrioc: Adapter instance reference 2709 * @event_reply: event data 2710 * 2711 * Checks for the reason code and based on that either block I/O 2712 * to device, or unblock I/O to the device, or start the device 2713 * removal handshake with reason as remove with the firmware for 2714 * SAS/SATA devices. 2715 * 2716 * Return: Nothing 2717 */ 2718 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2719 struct mpi3_event_notification_reply *event_reply) 2720 { 2721 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2722 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2723 int i; 2724 u16 handle; 2725 u8 reason_code; 2726 struct mpi3mr_tgt_dev *tgtdev = NULL; 2727 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2728 2729 for (i = 0; i < topo_evt->num_entries; i++) { 2730 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2731 if (!handle) 2732 continue; 2733 reason_code = topo_evt->phy_entry[i].status & 2734 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2735 scsi_tgt_priv_data = NULL; 2736 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2737 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2738 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2739 tgtdev->starget->hostdata; 2740 switch (reason_code) { 2741 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2742 if (scsi_tgt_priv_data) { 2743 scsi_tgt_priv_data->dev_removed = 1; 2744 scsi_tgt_priv_data->dev_removedelay = 0; 2745 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2746 } 2747 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2748 MPI3_CTRL_OP_REMOVE_DEVICE); 2749 break; 2750 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2751 if (scsi_tgt_priv_data) { 2752 scsi_tgt_priv_data->dev_removedelay = 1; 2753 atomic_inc(&scsi_tgt_priv_data->block_io); 2754 } 2755 break; 2756 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2757 if (scsi_tgt_priv_data && 2758 scsi_tgt_priv_data->dev_removedelay) { 2759 scsi_tgt_priv_data->dev_removedelay = 0; 2760 atomic_dec_if_positive 2761 (&scsi_tgt_priv_data->block_io); 2762 } 2763 break; 2764 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2765 default: 2766 break; 2767 } 2768 if (tgtdev) 2769 mpi3mr_tgtdev_put(tgtdev); 2770 } 2771 } 2772 2773 /** 2774 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2775 * @mrioc: Adapter instance reference 2776 * @event_reply: event data 2777 * 2778 * Checks for the reason code and based on that either block I/O 2779 * to device, or unblock I/O to the device, or start the device 2780 * removal handshake with reason as remove/hide acknowledgment 2781 * with the firmware. 2782 * 2783 * Return: Nothing 2784 */ 2785 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2786 struct mpi3_event_notification_reply *event_reply) 2787 { 2788 u16 dev_handle = 0; 2789 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2790 struct mpi3mr_tgt_dev *tgtdev = NULL; 2791 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2792 struct mpi3_event_data_device_status_change *evtdata = 2793 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2794 2795 if (mrioc->stop_drv_processing) 2796 goto out; 2797 2798 dev_handle = le16_to_cpu(evtdata->dev_handle); 2799 dprint_event_th(mrioc, 2800 "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", 2801 evtdata->reason_code, dev_handle); 2802 2803 switch (evtdata->reason_code) { 2804 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2805 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2806 block = 1; 2807 break; 2808 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2809 delete = 1; 2810 hide = 1; 2811 break; 2812 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2813 delete = 1; 2814 remove = 1; 2815 break; 2816 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2817 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2818 ublock = 1; 2819 break; 2820 default: 2821 break; 2822 } 2823 2824 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2825 if (!tgtdev) { 2826 dprint_event_th(mrioc, 2827 "processing device status change event could not identify device for handle(0x%04x)\n", 2828 dev_handle); 2829 goto out; 2830 } 2831 if (hide) 2832 tgtdev->is_hidden = hide; 2833 if (tgtdev->starget && tgtdev->starget->hostdata) { 2834 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2835 tgtdev->starget->hostdata; 2836 if (block) 2837 atomic_inc(&scsi_tgt_priv_data->block_io); 2838 if (delete) 2839 scsi_tgt_priv_data->dev_removed = 1; 2840 if (ublock) 2841 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2842 } 2843 if (remove) 2844 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2845 MPI3_CTRL_OP_REMOVE_DEVICE); 2846 if (hide) 2847 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2848 MPI3_CTRL_OP_HIDDEN_ACK); 2849 2850 out: 2851 if (tgtdev) 2852 mpi3mr_tgtdev_put(tgtdev); 2853 } 2854 2855 /** 2856 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2857 * @mrioc: Adapter instance reference 2858 * @event_reply: event data 2859 * 2860 * Blocks and unblocks host level I/O based on the reason code 2861 * 2862 * Return: Nothing 2863 */ 2864 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2865 struct mpi3_event_notification_reply *event_reply) 2866 { 2867 struct mpi3_event_data_prepare_for_reset *evtdata = 2868 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2869 2870 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2871 dprint_event_th(mrioc, 2872 "prepare for reset event top half with rc=start\n"); 2873 if (mrioc->prepare_for_reset) 2874 return; 2875 scsi_block_requests(mrioc->shost); 2876 mrioc->prepare_for_reset = 1; 2877 mrioc->prepare_for_reset_timeout_counter = 0; 2878 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2879 dprint_event_th(mrioc, 2880 "prepare for reset top half with rc=abort\n"); 2881 mrioc->prepare_for_reset = 0; 2882 scsi_unblock_requests(mrioc->shost); 2883 mrioc->prepare_for_reset_timeout_counter = 0; 2884 } 2885 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2886 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2887 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2888 le32_to_cpu(event_reply->event_context)); 2889 } 2890 2891 /** 2892 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2893 * @mrioc: Adapter instance reference 2894 * @event_reply: event data 2895 * 2896 * Identifies the new shutdown timeout value and update. 2897 * 2898 * Return: Nothing 2899 */ 2900 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2901 struct mpi3_event_notification_reply *event_reply) 2902 { 2903 struct mpi3_event_data_energy_pack_change *evtdata = 2904 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2905 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2906 2907 if (shutdown_timeout <= 0) { 2908 dprint_event_th(mrioc, 2909 "%s :Invalid Shutdown Timeout received = %d\n", 2910 __func__, shutdown_timeout); 2911 return; 2912 } 2913 2914 dprint_event_th(mrioc, 2915 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2916 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2917 mrioc->facts.shutdown_timeout = shutdown_timeout; 2918 } 2919 2920 /** 2921 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2922 * @mrioc: Adapter instance reference 2923 * @event_reply: event data 2924 * 2925 * Displays Cable manegemt event details. 2926 * 2927 * Return: Nothing 2928 */ 2929 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2930 struct mpi3_event_notification_reply *event_reply) 2931 { 2932 struct mpi3_event_data_cable_management *evtdata = 2933 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2934 2935 switch (evtdata->status) { 2936 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2937 { 2938 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2939 "Devices connected to this cable are not detected.\n" 2940 "This cable requires %d mW of power.\n", 2941 evtdata->receptacle_id, 2942 le32_to_cpu(evtdata->active_cable_power_requirement)); 2943 break; 2944 } 2945 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2946 { 2947 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2948 evtdata->receptacle_id); 2949 break; 2950 } 2951 default: 2952 break; 2953 } 2954 } 2955 2956 /** 2957 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2958 * @mrioc: Adapter instance reference 2959 * 2960 * Add driver specific event to make sure that the driver won't process the 2961 * events until all the devices are refreshed during soft reset. 2962 * 2963 * Return: Nothing 2964 */ 2965 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2966 { 2967 struct mpi3mr_fwevt *fwevt = NULL; 2968 2969 fwevt = mpi3mr_alloc_fwevt(0); 2970 if (!fwevt) { 2971 dprint_event_th(mrioc, 2972 "failed to schedule bottom half handler for event(0x%02x)\n", 2973 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2974 return; 2975 } 2976 fwevt->mrioc = mrioc; 2977 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2978 fwevt->send_ack = 0; 2979 fwevt->process_evt = 1; 2980 fwevt->evt_ctx = 0; 2981 fwevt->event_data_size = 0; 2982 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2983 } 2984 2985 /** 2986 * mpi3mr_os_handle_events - Firmware event handler 2987 * @mrioc: Adapter instance reference 2988 * @event_reply: event data 2989 * 2990 * Identifies whether the event has to be handled and acknowledged, 2991 * and either processes the event in the top-half and/or schedule a 2992 * bottom-half through mpi3mr_fwevt_worker(). 2993 * 2994 * Return: Nothing 2995 */ 2996 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2997 struct mpi3_event_notification_reply *event_reply) 2998 { 2999 u16 evt_type, sz; 3000 struct mpi3mr_fwevt *fwevt = NULL; 3001 bool ack_req = 0, process_evt_bh = 0; 3002 3003 if (mrioc->stop_drv_processing) 3004 return; 3005 3006 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 3007 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 3008 ack_req = 1; 3009 3010 evt_type = event_reply->event; 3011 mpi3mr_event_trigger(mrioc, event_reply->event); 3012 3013 switch (evt_type) { 3014 case MPI3_EVENT_DEVICE_ADDED: 3015 { 3016 struct mpi3_device_page0 *dev_pg0 = 3017 (struct mpi3_device_page0 *)event_reply->event_data; 3018 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 3019 dprint_event_th(mrioc, 3020 "failed to process device added event for handle(0x%04x),\n" 3021 "perst_id(%d) in the event top half handler\n", 3022 le16_to_cpu(dev_pg0->dev_handle), 3023 le16_to_cpu(dev_pg0->persistent_id)); 3024 else 3025 process_evt_bh = 1; 3026 break; 3027 } 3028 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 3029 { 3030 process_evt_bh = 1; 3031 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 3032 break; 3033 } 3034 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 3035 { 3036 process_evt_bh = 1; 3037 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 3038 break; 3039 } 3040 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 3041 { 3042 process_evt_bh = 1; 3043 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 3044 break; 3045 } 3046 case MPI3_EVENT_PREPARE_FOR_RESET: 3047 { 3048 mpi3mr_preparereset_evt_th(mrioc, event_reply); 3049 ack_req = 0; 3050 break; 3051 } 3052 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 3053 { 3054 mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); 3055 break; 3056 } 3057 case MPI3_EVENT_DEVICE_INFO_CHANGED: 3058 case MPI3_EVENT_LOG_DATA: 3059 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 3060 case MPI3_EVENT_ENCL_DEVICE_ADDED: 3061 { 3062 process_evt_bh = 1; 3063 break; 3064 } 3065 case MPI3_EVENT_ENERGY_PACK_CHANGE: 3066 { 3067 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 3068 break; 3069 } 3070 case MPI3_EVENT_CABLE_MGMT: 3071 { 3072 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 3073 break; 3074 } 3075 case MPI3_EVENT_SAS_DISCOVERY: 3076 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 3077 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 3078 case MPI3_EVENT_PCIE_ENUMERATION: 3079 break; 3080 default: 3081 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 3082 __func__, evt_type); 3083 break; 3084 } 3085 if (process_evt_bh || ack_req) { 3086 dprint_event_th(mrioc, 3087 "scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n", 3088 evt_type, le32_to_cpu(event_reply->event_context), ack_req); 3089 sz = event_reply->event_data_length * 4; 3090 fwevt = mpi3mr_alloc_fwevt(sz); 3091 if (!fwevt) { 3092 dprint_event_th(mrioc, 3093 "failed to schedule bottom half handler for\n" 3094 "event(0x%02x), ack_required=%d\n", evt_type, ack_req); 3095 return; 3096 } 3097 3098 memcpy(fwevt->event_data, event_reply->event_data, sz); 3099 fwevt->mrioc = mrioc; 3100 fwevt->event_id = evt_type; 3101 fwevt->send_ack = ack_req; 3102 fwevt->process_evt = process_evt_bh; 3103 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 3104 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 3105 } 3106 } 3107 3108 /** 3109 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 3110 * @mrioc: Adapter instance reference 3111 * @scmd: SCSI command reference 3112 * @scsiio_req: MPI3 SCSI IO request 3113 * 3114 * Identifies the protection information flags from the SCSI 3115 * command and set appropriate flags in the MPI3 SCSI IO 3116 * request. 3117 * 3118 * Return: Nothing 3119 */ 3120 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 3121 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3122 { 3123 u16 eedp_flags = 0; 3124 unsigned char prot_op = scsi_get_prot_op(scmd); 3125 3126 switch (prot_op) { 3127 case SCSI_PROT_NORMAL: 3128 return; 3129 case SCSI_PROT_READ_STRIP: 3130 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3131 break; 3132 case SCSI_PROT_WRITE_INSERT: 3133 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3134 break; 3135 case SCSI_PROT_READ_INSERT: 3136 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3137 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3138 break; 3139 case SCSI_PROT_WRITE_STRIP: 3140 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3141 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3142 break; 3143 case SCSI_PROT_READ_PASS: 3144 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3145 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3146 break; 3147 case SCSI_PROT_WRITE_PASS: 3148 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 3149 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 3150 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 3151 0xffff; 3152 } else 3153 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3154 3155 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3156 break; 3157 default: 3158 return; 3159 } 3160 3161 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 3162 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 3163 3164 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 3165 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 3166 3167 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3168 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3169 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3170 scsiio_req->cdb.eedp32.primary_reference_tag = 3171 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3172 } 3173 3174 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3175 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3176 3177 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3178 3179 switch (scsi_prot_interval(scmd)) { 3180 case 512: 3181 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3182 break; 3183 case 520: 3184 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3185 break; 3186 case 4080: 3187 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3188 break; 3189 case 4088: 3190 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3191 break; 3192 case 4096: 3193 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3194 break; 3195 case 4104: 3196 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3197 break; 3198 case 4160: 3199 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3200 break; 3201 default: 3202 break; 3203 } 3204 3205 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3206 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3207 } 3208 3209 /** 3210 * mpi3mr_build_sense_buffer - Map sense information 3211 * @desc: Sense type 3212 * @buf: Sense buffer to populate 3213 * @key: Sense key 3214 * @asc: Additional sense code 3215 * @ascq: Additional sense code qualifier 3216 * 3217 * Maps the given sense information into either descriptor or 3218 * fixed format sense data. 3219 * 3220 * Return: Nothing 3221 */ 3222 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3223 u8 asc, u8 ascq) 3224 { 3225 if (desc) { 3226 buf[0] = 0x72; /* descriptor, current */ 3227 buf[1] = key; 3228 buf[2] = asc; 3229 buf[3] = ascq; 3230 buf[7] = 0; 3231 } else { 3232 buf[0] = 0x70; /* fixed, current */ 3233 buf[2] = key; 3234 buf[7] = 0xa; 3235 buf[12] = asc; 3236 buf[13] = ascq; 3237 } 3238 } 3239 3240 /** 3241 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3242 * @scmd: SCSI command reference 3243 * @ioc_status: status of MPI3 request 3244 * 3245 * Maps the EEDP error status of the SCSI IO request to sense 3246 * data. 3247 * 3248 * Return: Nothing 3249 */ 3250 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3251 u16 ioc_status) 3252 { 3253 u8 ascq = 0; 3254 3255 switch (ioc_status) { 3256 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3257 ascq = 0x01; 3258 break; 3259 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3260 ascq = 0x02; 3261 break; 3262 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3263 ascq = 0x03; 3264 break; 3265 default: 3266 ascq = 0x00; 3267 break; 3268 } 3269 3270 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3271 0x10, ascq); 3272 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3273 } 3274 3275 /** 3276 * mpi3mr_process_op_reply_desc - reply descriptor handler 3277 * @mrioc: Adapter instance reference 3278 * @reply_desc: Operational reply descriptor 3279 * @reply_dma: place holder for reply DMA address 3280 * @qidx: Operational queue index 3281 * 3282 * Process the operational reply descriptor and identifies the 3283 * descriptor type. Based on the descriptor map the MPI3 request 3284 * status to a SCSI command status and calls scsi_done call 3285 * back. 3286 * 3287 * Return: Nothing 3288 */ 3289 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3290 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3291 { 3292 u16 reply_desc_type, host_tag = 0; 3293 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3294 u32 ioc_loginfo = 0; 3295 struct mpi3_status_reply_descriptor *status_desc = NULL; 3296 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3297 struct mpi3_success_reply_descriptor *success_desc = NULL; 3298 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3299 struct scsi_cmnd *scmd = NULL; 3300 struct scmd_priv *priv = NULL; 3301 u8 *sense_buf = NULL; 3302 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3303 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3304 u16 dev_handle = 0xFFFF; 3305 struct scsi_sense_hdr sshdr; 3306 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3307 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3308 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3309 struct mpi3mr_throttle_group_info *tg = NULL; 3310 u8 throttle_enabled_dev = 0; 3311 3312 *reply_dma = 0; 3313 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3314 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3315 switch (reply_desc_type) { 3316 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3317 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3318 host_tag = le16_to_cpu(status_desc->host_tag); 3319 ioc_status = le16_to_cpu(status_desc->ioc_status); 3320 if (ioc_status & 3321 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3322 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3323 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3324 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3325 break; 3326 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3327 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3328 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3329 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3330 *reply_dma); 3331 if (!scsi_reply) { 3332 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3333 mrioc->name); 3334 goto out; 3335 } 3336 host_tag = le16_to_cpu(scsi_reply->host_tag); 3337 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3338 scsi_status = scsi_reply->scsi_status; 3339 scsi_state = scsi_reply->scsi_state; 3340 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3341 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3342 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3343 sense_count = le32_to_cpu(scsi_reply->sense_count); 3344 resp_data = le32_to_cpu(scsi_reply->response_data); 3345 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3346 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3347 if (ioc_status & 3348 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3349 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3350 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3351 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3352 panic("%s: Ran out of sense buffers\n", mrioc->name); 3353 if (sense_buf) { 3354 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3355 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 3356 sshdr.asc, sshdr.ascq); 3357 } 3358 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3359 break; 3360 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3361 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3362 host_tag = le16_to_cpu(success_desc->host_tag); 3363 break; 3364 default: 3365 break; 3366 } 3367 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3368 if (!scmd) { 3369 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3370 mrioc->name, host_tag); 3371 goto out; 3372 } 3373 priv = scsi_cmd_priv(scmd); 3374 3375 data_len_blks = scsi_bufflen(scmd) >> 9; 3376 sdev_priv_data = scmd->device->hostdata; 3377 if (sdev_priv_data) { 3378 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3379 if (stgt_priv_data) { 3380 tg = stgt_priv_data->throttle_group; 3381 throttle_enabled_dev = 3382 stgt_priv_data->io_throttle_enabled; 3383 dev_handle = stgt_priv_data->dev_handle; 3384 } 3385 } 3386 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3387 throttle_enabled_dev)) { 3388 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3389 &mrioc->pend_large_data_sz); 3390 if (tg) { 3391 tg_pend_data_len = atomic_sub_return(data_len_blks, 3392 &tg->pend_large_data_sz); 3393 if (tg->io_divert && ((ioc_pend_data_len <= 3394 mrioc->io_throttle_low) && 3395 (tg_pend_data_len <= tg->low))) { 3396 tg->io_divert = 0; 3397 mpi3mr_set_io_divert_for_all_vd_in_tg( 3398 mrioc, tg, 0); 3399 } 3400 } else { 3401 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3402 stgt_priv_data->io_divert = 0; 3403 } 3404 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3405 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3406 if (!tg) { 3407 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3408 stgt_priv_data->io_divert = 0; 3409 3410 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3411 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3412 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3413 tg->io_divert = 0; 3414 mpi3mr_set_io_divert_for_all_vd_in_tg( 3415 mrioc, tg, 0); 3416 } 3417 } 3418 } 3419 3420 if (success_desc) { 3421 scmd->result = DID_OK << 16; 3422 goto out_success; 3423 } 3424 3425 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3426 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3427 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3428 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3429 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3430 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3431 3432 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3433 sense_buf) { 3434 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3435 3436 memcpy(scmd->sense_buffer, sense_buf, sz); 3437 } 3438 3439 switch (ioc_status) { 3440 case MPI3_IOCSTATUS_BUSY: 3441 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3442 scmd->result = SAM_STAT_BUSY; 3443 break; 3444 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3445 scmd->result = DID_NO_CONNECT << 16; 3446 break; 3447 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3448 if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { 3449 /* 3450 * This is a ATA NCQ command aborted due to another NCQ 3451 * command failure. We must retry this command 3452 * immediately but without incrementing its retry 3453 * counter. 3454 */ 3455 WARN_ON_ONCE(xfer_count != 0); 3456 scmd->result = DID_IMM_RETRY << 16; 3457 } else { 3458 scmd->result = DID_SOFT_ERROR << 16; 3459 } 3460 break; 3461 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3462 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3463 scmd->result = DID_RESET << 16; 3464 break; 3465 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3466 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3467 scmd->result = DID_SOFT_ERROR << 16; 3468 else 3469 scmd->result = (DID_OK << 16) | scsi_status; 3470 break; 3471 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3472 scmd->result = (DID_OK << 16) | scsi_status; 3473 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3474 break; 3475 if (xfer_count < scmd->underflow) { 3476 if (scsi_status == SAM_STAT_BUSY) 3477 scmd->result = SAM_STAT_BUSY; 3478 else 3479 scmd->result = DID_SOFT_ERROR << 16; 3480 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3481 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3482 scmd->result = DID_SOFT_ERROR << 16; 3483 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3484 scmd->result = DID_RESET << 16; 3485 break; 3486 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3487 scsi_set_resid(scmd, 0); 3488 fallthrough; 3489 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3490 case MPI3_IOCSTATUS_SUCCESS: 3491 scmd->result = (DID_OK << 16) | scsi_status; 3492 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3493 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3494 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3495 scmd->result = DID_SOFT_ERROR << 16; 3496 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3497 scmd->result = DID_RESET << 16; 3498 break; 3499 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3500 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3501 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3502 mpi3mr_map_eedp_error(scmd, ioc_status); 3503 break; 3504 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3505 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3506 case MPI3_IOCSTATUS_INVALID_SGL: 3507 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3508 case MPI3_IOCSTATUS_INVALID_FIELD: 3509 case MPI3_IOCSTATUS_INVALID_STATE: 3510 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3511 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3512 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3513 default: 3514 scmd->result = DID_SOFT_ERROR << 16; 3515 break; 3516 } 3517 3518 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3519 (scmd->cmnd[0] != ATA_16) && 3520 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3521 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3522 scmd->result); 3523 scsi_print_command(scmd); 3524 ioc_info(mrioc, 3525 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3526 __func__, dev_handle, ioc_status, ioc_loginfo, 3527 priv->req_q_idx + 1); 3528 ioc_info(mrioc, 3529 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3530 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3531 if (sense_buf) { 3532 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3533 ioc_info(mrioc, 3534 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3535 __func__, sense_count, sshdr.sense_key, 3536 sshdr.asc, sshdr.ascq); 3537 } 3538 } 3539 out_success: 3540 if (priv->meta_sg_valid) { 3541 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3542 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3543 } 3544 mpi3mr_clear_scmd_priv(mrioc, scmd); 3545 scsi_dma_unmap(scmd); 3546 scsi_done(scmd); 3547 out: 3548 if (sense_buf) 3549 mpi3mr_repost_sense_buf(mrioc, 3550 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3551 } 3552 3553 /** 3554 * mpi3mr_get_chain_idx - get free chain buffer index 3555 * @mrioc: Adapter instance reference 3556 * 3557 * Try to get a free chain buffer index from the free pool. 3558 * 3559 * Return: -1 on failure or the free chain buffer index 3560 */ 3561 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3562 { 3563 u8 retry_count = 5; 3564 int cmd_idx = -1; 3565 unsigned long flags; 3566 3567 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3568 do { 3569 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3570 mrioc->chain_buf_count); 3571 if (cmd_idx < mrioc->chain_buf_count) { 3572 set_bit(cmd_idx, mrioc->chain_bitmap); 3573 break; 3574 } 3575 cmd_idx = -1; 3576 } while (retry_count--); 3577 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3578 return cmd_idx; 3579 } 3580 3581 /** 3582 * mpi3mr_prepare_sg_scmd - build scatter gather list 3583 * @mrioc: Adapter instance reference 3584 * @scmd: SCSI command reference 3585 * @scsiio_req: MPI3 SCSI IO request 3586 * 3587 * This function maps SCSI command's data and protection SGEs to 3588 * MPI request SGEs. If required additional 4K chain buffer is 3589 * used to send the SGEs. 3590 * 3591 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3592 */ 3593 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3594 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3595 { 3596 dma_addr_t chain_dma; 3597 struct scatterlist *sg_scmd; 3598 void *sg_local, *chain; 3599 u32 chain_length; 3600 int sges_left, chain_idx; 3601 u32 sges_in_segment; 3602 u8 simple_sgl_flags; 3603 u8 simple_sgl_flags_last; 3604 u8 last_chain_sgl_flags; 3605 struct chain_element *chain_req; 3606 struct scmd_priv *priv = NULL; 3607 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3608 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3609 3610 priv = scsi_cmd_priv(scmd); 3611 3612 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3613 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3614 simple_sgl_flags_last = simple_sgl_flags | 3615 MPI3_SGE_FLAGS_END_OF_LIST; 3616 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3617 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3618 3619 if (meta_sg) 3620 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3621 else 3622 sg_local = &scsiio_req->sgl; 3623 3624 if (!scsiio_req->data_length && !meta_sg) { 3625 mpi3mr_build_zero_len_sge(sg_local); 3626 return 0; 3627 } 3628 3629 if (meta_sg) { 3630 sg_scmd = scsi_prot_sglist(scmd); 3631 sges_left = dma_map_sg(&mrioc->pdev->dev, 3632 scsi_prot_sglist(scmd), 3633 scsi_prot_sg_count(scmd), 3634 scmd->sc_data_direction); 3635 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3636 } else { 3637 /* 3638 * Some firmware versions byte-swap the REPORT ZONES command 3639 * reply from ATA-ZAC devices by directly accessing in the host 3640 * buffer. This does not respect the default command DMA 3641 * direction and causes IOMMU page faults on some architectures 3642 * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3643 * Avoid such issue by making the REPORT ZONES buffer mapping 3644 * bi-directional. 3645 */ 3646 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3647 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3648 sg_scmd = scsi_sglist(scmd); 3649 sges_left = scsi_dma_map(scmd); 3650 } 3651 3652 if (sges_left < 0) { 3653 sdev_printk(KERN_ERR, scmd->device, 3654 "scsi_dma_map failed: request for %d bytes!\n", 3655 scsi_bufflen(scmd)); 3656 return -ENOMEM; 3657 } 3658 if (sges_left > mrioc->max_sgl_entries) { 3659 sdev_printk(KERN_ERR, scmd->device, 3660 "scsi_dma_map returned unsupported sge count %d!\n", 3661 sges_left); 3662 return -ENOMEM; 3663 } 3664 3665 sges_in_segment = (mrioc->facts.op_req_sz - 3666 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3667 3668 if (scsiio_req->sgl[0].eedp.flags == 3669 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3670 sg_local += sizeof(struct mpi3_sge_common); 3671 sges_in_segment--; 3672 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3673 } 3674 3675 if (scsiio_req->msg_flags == 3676 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3677 sges_in_segment--; 3678 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3679 } 3680 3681 if (meta_sg) 3682 sges_in_segment = 1; 3683 3684 if (sges_left <= sges_in_segment) 3685 goto fill_in_last_segment; 3686 3687 /* fill in main message segment when there is a chain following */ 3688 while (sges_in_segment > 1) { 3689 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3690 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3691 sg_scmd = sg_next(sg_scmd); 3692 sg_local += sizeof(struct mpi3_sge_common); 3693 sges_left--; 3694 sges_in_segment--; 3695 } 3696 3697 chain_idx = mpi3mr_get_chain_idx(mrioc); 3698 if (chain_idx < 0) 3699 return -1; 3700 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3701 if (meta_sg) 3702 priv->meta_chain_idx = chain_idx; 3703 else 3704 priv->chain_idx = chain_idx; 3705 3706 chain = chain_req->addr; 3707 chain_dma = chain_req->dma_addr; 3708 sges_in_segment = sges_left; 3709 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3710 3711 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3712 chain_length, chain_dma); 3713 3714 sg_local = chain; 3715 3716 fill_in_last_segment: 3717 while (sges_left > 0) { 3718 if (sges_left == 1) 3719 mpi3mr_add_sg_single(sg_local, 3720 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3721 sg_dma_address(sg_scmd)); 3722 else 3723 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3724 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3725 sg_scmd = sg_next(sg_scmd); 3726 sg_local += sizeof(struct mpi3_sge_common); 3727 sges_left--; 3728 } 3729 3730 return 0; 3731 } 3732 3733 /** 3734 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3735 * @mrioc: Adapter instance reference 3736 * @scmd: SCSI command reference 3737 * @scsiio_req: MPI3 SCSI IO request 3738 * 3739 * This function calls mpi3mr_prepare_sg_scmd for constructing 3740 * both data SGEs and protection information SGEs in the MPI 3741 * format from the SCSI Command as appropriate . 3742 * 3743 * Return: return value of mpi3mr_prepare_sg_scmd. 3744 */ 3745 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3746 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3747 { 3748 int ret; 3749 3750 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3751 if (ret) 3752 return ret; 3753 3754 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3755 /* There is a valid meta sg */ 3756 scsiio_req->flags |= 3757 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3758 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3759 } 3760 3761 return ret; 3762 } 3763 3764 /** 3765 * mpi3mr_tm_response_name - get TM response as a string 3766 * @resp_code: TM response code 3767 * 3768 * Convert known task management response code as a readable 3769 * string. 3770 * 3771 * Return: response code string. 3772 */ 3773 static const char *mpi3mr_tm_response_name(u8 resp_code) 3774 { 3775 char *desc; 3776 3777 switch (resp_code) { 3778 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3779 desc = "task management request completed"; 3780 break; 3781 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3782 desc = "invalid frame"; 3783 break; 3784 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3785 desc = "task management request not supported"; 3786 break; 3787 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3788 desc = "task management request failed"; 3789 break; 3790 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3791 desc = "task management request succeeded"; 3792 break; 3793 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3794 desc = "invalid LUN"; 3795 break; 3796 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3797 desc = "overlapped tag attempted"; 3798 break; 3799 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3800 desc = "task queued, however not sent to target"; 3801 break; 3802 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3803 desc = "task management request denied by NVMe device"; 3804 break; 3805 default: 3806 desc = "unknown"; 3807 break; 3808 } 3809 3810 return desc; 3811 } 3812 3813 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3814 { 3815 int i; 3816 int num_of_reply_queues = 3817 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3818 3819 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3820 mpi3mr_process_op_reply_q(mrioc, 3821 mrioc->intr_info[i].op_reply_q); 3822 } 3823 3824 /** 3825 * mpi3mr_issue_tm - Issue Task Management request 3826 * @mrioc: Adapter instance reference 3827 * @tm_type: Task Management type 3828 * @handle: Device handle 3829 * @lun: lun ID 3830 * @htag: Host tag of the TM request 3831 * @timeout: TM timeout value 3832 * @drv_cmd: Internal command tracker 3833 * @resp_code: Response code place holder 3834 * @scmd: SCSI command 3835 * 3836 * Issues a Task Management Request to the controller for a 3837 * specified target, lun and command and wait for its completion 3838 * and check TM response. Recover the TM if it timed out by 3839 * issuing controller reset. 3840 * 3841 * Return: 0 on success, non-zero on errors 3842 */ 3843 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3844 u16 handle, uint lun, u16 htag, ulong timeout, 3845 struct mpi3mr_drv_cmd *drv_cmd, 3846 u8 *resp_code, struct scsi_cmnd *scmd) 3847 { 3848 struct mpi3_scsi_task_mgmt_request tm_req; 3849 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3850 int retval = 0; 3851 struct mpi3mr_tgt_dev *tgtdev = NULL; 3852 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3853 struct scmd_priv *cmd_priv = NULL; 3854 struct scsi_device *sdev = NULL; 3855 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3856 3857 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3858 __func__, tm_type, handle); 3859 if (mrioc->unrecoverable) { 3860 retval = -1; 3861 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3862 __func__); 3863 goto out; 3864 } 3865 3866 memset(&tm_req, 0, sizeof(tm_req)); 3867 mutex_lock(&drv_cmd->mutex); 3868 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3869 retval = -1; 3870 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3871 mutex_unlock(&drv_cmd->mutex); 3872 goto out; 3873 } 3874 if (mrioc->reset_in_progress) { 3875 retval = -1; 3876 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3877 mutex_unlock(&drv_cmd->mutex); 3878 goto out; 3879 } 3880 if (mrioc->block_on_pci_err) { 3881 retval = -1; 3882 dprint_tm(mrioc, "sending task management failed due to\n" 3883 "pci error recovery in progress\n"); 3884 mutex_unlock(&drv_cmd->mutex); 3885 goto out; 3886 } 3887 3888 drv_cmd->state = MPI3MR_CMD_PENDING; 3889 drv_cmd->is_waiting = 1; 3890 drv_cmd->callback = NULL; 3891 tm_req.dev_handle = cpu_to_le16(handle); 3892 tm_req.task_type = tm_type; 3893 tm_req.host_tag = cpu_to_le16(htag); 3894 3895 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3896 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3897 3898 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3899 3900 if (scmd) { 3901 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 3902 cmd_priv = scsi_cmd_priv(scmd); 3903 if (!cmd_priv) 3904 goto out_unlock; 3905 3906 struct op_req_qinfo *op_req_q; 3907 3908 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; 3909 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); 3910 tm_req.task_request_queue_id = 3911 cpu_to_le16(op_req_q->qid); 3912 } 3913 sdev = scmd->device; 3914 sdev_priv_data = sdev->hostdata; 3915 scsi_tgt_priv_data = ((sdev_priv_data) ? 3916 sdev_priv_data->tgt_priv_data : NULL); 3917 } else { 3918 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3919 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3920 tgtdev->starget->hostdata; 3921 } 3922 3923 if (scsi_tgt_priv_data) 3924 atomic_inc(&scsi_tgt_priv_data->block_io); 3925 3926 if (tgtdev) { 3927 if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 3928 timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to 3929 : tgtdev->dev_spec.pcie_inf.reset_to; 3930 else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD) 3931 timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to 3932 : tgtdev->dev_spec.vd_inf.reset_to; 3933 } 3934 3935 init_completion(&drv_cmd->done); 3936 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3937 if (retval) { 3938 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3939 goto out_unlock; 3940 } 3941 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3942 3943 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3944 drv_cmd->is_waiting = 0; 3945 retval = -1; 3946 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3947 dprint_tm(mrioc, 3948 "task management request timed out after %ld seconds\n", 3949 timeout); 3950 if (mrioc->logging_level & MPI3_DEBUG_TM) 3951 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3952 mpi3mr_soft_reset_handler(mrioc, 3953 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3954 } 3955 goto out_unlock; 3956 } 3957 3958 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3959 dprint_tm(mrioc, "invalid task management reply message\n"); 3960 retval = -1; 3961 goto out_unlock; 3962 } 3963 3964 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3965 3966 switch (drv_cmd->ioc_status) { 3967 case MPI3_IOCSTATUS_SUCCESS: 3968 *resp_code = le32_to_cpu(tm_reply->response_data) & 3969 MPI3MR_RI_MASK_RESPCODE; 3970 break; 3971 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3972 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3973 break; 3974 default: 3975 dprint_tm(mrioc, 3976 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3977 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3978 retval = -1; 3979 goto out_unlock; 3980 } 3981 3982 switch (*resp_code) { 3983 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3984 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3985 break; 3986 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3987 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3988 retval = -1; 3989 break; 3990 default: 3991 retval = -1; 3992 break; 3993 } 3994 3995 dprint_tm(mrioc, 3996 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3997 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3998 le32_to_cpu(tm_reply->termination_count), 3999 mpi3mr_tm_response_name(*resp_code), *resp_code); 4000 4001 if (!retval) { 4002 mpi3mr_ioc_disable_intr(mrioc); 4003 mpi3mr_poll_pend_io_completions(mrioc); 4004 mpi3mr_ioc_enable_intr(mrioc); 4005 mpi3mr_poll_pend_io_completions(mrioc); 4006 mpi3mr_process_admin_reply_q(mrioc); 4007 } 4008 switch (tm_type) { 4009 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4010 if (!scsi_tgt_priv_data) 4011 break; 4012 scsi_tgt_priv_data->pend_count = 0; 4013 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 4014 mpi3mr_count_tgt_pending, 4015 (void *)scsi_tgt_priv_data->starget); 4016 break; 4017 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 4018 if (!sdev_priv_data) 4019 break; 4020 sdev_priv_data->pend_count = 0; 4021 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 4022 mpi3mr_count_dev_pending, (void *)sdev); 4023 break; 4024 default: 4025 break; 4026 } 4027 mpi3mr_global_trigger(mrioc, 4028 MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); 4029 4030 out_unlock: 4031 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4032 mutex_unlock(&drv_cmd->mutex); 4033 if (scsi_tgt_priv_data) 4034 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 4035 if (tgtdev) 4036 mpi3mr_tgtdev_put(tgtdev); 4037 out: 4038 return retval; 4039 } 4040 4041 /** 4042 * mpi3mr_bios_param - BIOS param callback 4043 * @sdev: SCSI device reference 4044 * @unused: gendisk reference 4045 * @capacity: Capacity in logical sectors 4046 * @params: Parameter array 4047 * 4048 * Just the parameters with heads/secots/cylinders. 4049 * 4050 * Return: 0 always 4051 */ 4052 static int mpi3mr_bios_param(struct scsi_device *sdev, 4053 struct gendisk *unused, sector_t capacity, int params[]) 4054 { 4055 int heads; 4056 int sectors; 4057 sector_t cylinders; 4058 ulong dummy; 4059 4060 heads = 64; 4061 sectors = 32; 4062 4063 dummy = heads * sectors; 4064 cylinders = capacity; 4065 sector_div(cylinders, dummy); 4066 4067 if ((ulong)capacity >= 0x200000) { 4068 heads = 255; 4069 sectors = 63; 4070 dummy = heads * sectors; 4071 cylinders = capacity; 4072 sector_div(cylinders, dummy); 4073 } 4074 4075 params[0] = heads; 4076 params[1] = sectors; 4077 params[2] = cylinders; 4078 return 0; 4079 } 4080 4081 /** 4082 * mpi3mr_map_queues - Map queues callback handler 4083 * @shost: SCSI host reference 4084 * 4085 * Maps default and poll queues. 4086 * 4087 * Return: return zero. 4088 */ 4089 static void mpi3mr_map_queues(struct Scsi_Host *shost) 4090 { 4091 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4092 int i, qoff, offset; 4093 struct blk_mq_queue_map *map = NULL; 4094 4095 offset = mrioc->op_reply_q_offset; 4096 4097 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 4098 map = &shost->tag_set.map[i]; 4099 4100 map->nr_queues = 0; 4101 4102 if (i == HCTX_TYPE_DEFAULT) 4103 map->nr_queues = mrioc->default_qcount; 4104 else if (i == HCTX_TYPE_POLL) 4105 map->nr_queues = mrioc->active_poll_qcount; 4106 4107 if (!map->nr_queues) { 4108 BUG_ON(i == HCTX_TYPE_DEFAULT); 4109 continue; 4110 } 4111 4112 /* 4113 * The poll queue(s) doesn't have an IRQ (and hence IRQ 4114 * affinity), so use the regular blk-mq cpu mapping 4115 */ 4116 map->queue_offset = qoff; 4117 if (i != HCTX_TYPE_POLL) 4118 blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); 4119 else 4120 blk_mq_map_queues(map); 4121 4122 qoff += map->nr_queues; 4123 offset += map->nr_queues; 4124 } 4125 } 4126 4127 /** 4128 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 4129 * @mrioc: Adapter instance reference 4130 * 4131 * Calculate the pending I/Os for the controller and return. 4132 * 4133 * Return: Number of pending I/Os 4134 */ 4135 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 4136 { 4137 u16 i; 4138 uint pend_ios = 0; 4139 4140 for (i = 0; i < mrioc->num_op_reply_q; i++) 4141 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 4142 return pend_ios; 4143 } 4144 4145 /** 4146 * mpi3mr_print_pending_host_io - print pending I/Os 4147 * @mrioc: Adapter instance reference 4148 * 4149 * Print number of pending I/Os and each I/O details prior to 4150 * reset for debug purpose. 4151 * 4152 * Return: Nothing 4153 */ 4154 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 4155 { 4156 struct Scsi_Host *shost = mrioc->shost; 4157 4158 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 4159 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 4160 blk_mq_tagset_busy_iter(&shost->tag_set, 4161 mpi3mr_print_scmd, (void *)mrioc); 4162 } 4163 4164 /** 4165 * mpi3mr_wait_for_host_io - block for I/Os to complete 4166 * @mrioc: Adapter instance reference 4167 * @timeout: time out in seconds 4168 * Waits for pending I/Os for the given adapter to complete or 4169 * to hit the timeout. 4170 * 4171 * Return: Nothing 4172 */ 4173 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 4174 { 4175 enum mpi3mr_iocstate iocstate; 4176 int i = 0; 4177 4178 iocstate = mpi3mr_get_iocstate(mrioc); 4179 if (iocstate != MRIOC_STATE_READY) 4180 return; 4181 4182 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4183 return; 4184 ioc_info(mrioc, 4185 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 4186 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 4187 4188 for (i = 0; i < timeout; i++) { 4189 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4190 break; 4191 iocstate = mpi3mr_get_iocstate(mrioc); 4192 if (iocstate != MRIOC_STATE_READY) 4193 break; 4194 msleep(1000); 4195 } 4196 4197 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 4198 mpi3mr_get_fw_pending_ios(mrioc)); 4199 } 4200 4201 /** 4202 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 4203 * @mrioc: Adapter instance reference 4204 * @scmd: SCSI command reference 4205 * @scsiio_req: MPI3 SCSI IO request 4206 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 4207 * @wslen: write same max length 4208 * 4209 * Gets values of unmap, ndob and number of blocks from write 4210 * same scsi io and based on these values it sets divert IO flag 4211 * and reason for diverting IO to firmware. 4212 * 4213 * Return: Nothing 4214 */ 4215 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 4216 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 4217 u32 *scsiio_flags, u16 wslen) 4218 { 4219 u8 unmap = 0, ndob = 0; 4220 u8 opcode = scmd->cmnd[0]; 4221 u32 num_blocks = 0; 4222 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4223 4224 if (opcode == WRITE_SAME_16) { 4225 unmap = scmd->cmnd[1] & 0x08; 4226 ndob = scmd->cmnd[1] & 0x01; 4227 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4228 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4229 unmap = scmd->cmnd[10] & 0x08; 4230 ndob = scmd->cmnd[10] & 0x01; 4231 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4232 } else 4233 return; 4234 4235 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4236 scsiio_req->msg_flags |= 4237 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4238 *scsiio_flags |= 4239 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4240 } 4241 } 4242 4243 /** 4244 * mpi3mr_eh_host_reset - Host reset error handling callback 4245 * @scmd: SCSI command reference 4246 * 4247 * Issue controller reset 4248 * 4249 * Return: SUCCESS of successful reset else FAILED 4250 */ 4251 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4252 { 4253 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4254 int retval = FAILED, ret; 4255 4256 ret = mpi3mr_soft_reset_handler(mrioc, 4257 MPI3MR_RESET_FROM_EH_HOS, 1); 4258 if (ret) 4259 goto out; 4260 4261 retval = SUCCESS; 4262 out: 4263 sdev_printk(KERN_INFO, scmd->device, 4264 "Host reset is %s for scmd(%p)\n", 4265 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4266 4267 return retval; 4268 } 4269 4270 /** 4271 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4272 * @scmd: SCSI command reference 4273 * 4274 * Checks whether pending I/Os are present for the RAID volume; 4275 * if not there's no need to reset the adapter. 4276 * 4277 * Return: SUCCESS of successful reset else FAILED 4278 */ 4279 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4280 { 4281 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4282 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4283 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4284 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4285 int retval = FAILED; 4286 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 4287 4288 sdev_priv_data = scmd->device->hostdata; 4289 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4290 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4291 dev_type = stgt_priv_data->dev_type; 4292 } 4293 4294 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4295 mpi3mr_wait_for_host_io(mrioc, 4296 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4297 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4298 while (mrioc->reset_in_progress || 4299 mrioc->prepare_for_reset || 4300 mrioc->block_on_pci_err) { 4301 ssleep(1); 4302 if (!timeout--) { 4303 retval = FAILED; 4304 goto out; 4305 } 4306 } 4307 retval = SUCCESS; 4308 goto out; 4309 } 4310 } 4311 if (retval == FAILED) 4312 mpi3mr_print_pending_host_io(mrioc); 4313 4314 out: 4315 sdev_printk(KERN_INFO, scmd->device, 4316 "Bus reset is %s for scmd(%p)\n", 4317 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4318 return retval; 4319 } 4320 4321 /** 4322 * mpi3mr_eh_target_reset - Target reset error handling callback 4323 * @scmd: SCSI command reference 4324 * 4325 * Issue Target reset Task Management and verify the scmd is 4326 * terminated successfully and return status accordingly. 4327 * 4328 * Return: SUCCESS of successful termination of the scmd else 4329 * FAILED 4330 */ 4331 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4332 { 4333 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4334 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4335 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4336 u16 dev_handle; 4337 u8 resp_code = 0; 4338 int retval = FAILED, ret = 0; 4339 4340 sdev_printk(KERN_INFO, scmd->device, 4341 "Attempting Target Reset! scmd(%p)\n", scmd); 4342 scsi_print_command(scmd); 4343 4344 sdev_priv_data = scmd->device->hostdata; 4345 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4346 sdev_printk(KERN_INFO, scmd->device, 4347 "SCSI device is not available\n"); 4348 retval = SUCCESS; 4349 goto out; 4350 } 4351 4352 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4353 dev_handle = stgt_priv_data->dev_handle; 4354 if (stgt_priv_data->dev_removed) { 4355 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4356 sdev_printk(KERN_INFO, scmd->device, 4357 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4358 mrioc->name, dev_handle); 4359 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4360 retval = SUCCESS; 4361 else 4362 retval = FAILED; 4363 goto out; 4364 } 4365 sdev_printk(KERN_INFO, scmd->device, 4366 "Target Reset is issued to handle(0x%04x)\n", 4367 dev_handle); 4368 4369 ret = mpi3mr_issue_tm(mrioc, 4370 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4371 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4372 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4373 4374 if (ret) 4375 goto out; 4376 4377 if (stgt_priv_data->pend_count) { 4378 sdev_printk(KERN_INFO, scmd->device, 4379 "%s: target has %d pending commands, target reset is failed\n", 4380 mrioc->name, stgt_priv_data->pend_count); 4381 goto out; 4382 } 4383 4384 retval = SUCCESS; 4385 out: 4386 sdev_printk(KERN_INFO, scmd->device, 4387 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4388 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4389 4390 return retval; 4391 } 4392 4393 /** 4394 * mpi3mr_eh_dev_reset- Device reset error handling callback 4395 * @scmd: SCSI command reference 4396 * 4397 * Issue lun reset Task Management and verify the scmd is 4398 * terminated successfully and return status accordingly. 4399 * 4400 * Return: SUCCESS of successful termination of the scmd else 4401 * FAILED 4402 */ 4403 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4404 { 4405 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4406 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4407 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4408 u16 dev_handle; 4409 u8 resp_code = 0; 4410 int retval = FAILED, ret = 0; 4411 4412 sdev_printk(KERN_INFO, scmd->device, 4413 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4414 scsi_print_command(scmd); 4415 4416 sdev_priv_data = scmd->device->hostdata; 4417 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4418 sdev_printk(KERN_INFO, scmd->device, 4419 "SCSI device is not available\n"); 4420 retval = SUCCESS; 4421 goto out; 4422 } 4423 4424 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4425 dev_handle = stgt_priv_data->dev_handle; 4426 if (stgt_priv_data->dev_removed) { 4427 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4428 sdev_printk(KERN_INFO, scmd->device, 4429 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4430 mrioc->name, dev_handle); 4431 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4432 retval = SUCCESS; 4433 else 4434 retval = FAILED; 4435 goto out; 4436 } 4437 sdev_printk(KERN_INFO, scmd->device, 4438 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4439 4440 ret = mpi3mr_issue_tm(mrioc, 4441 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4442 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4443 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4444 4445 if (ret) 4446 goto out; 4447 4448 if (sdev_priv_data->pend_count) { 4449 sdev_printk(KERN_INFO, scmd->device, 4450 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4451 mrioc->name, sdev_priv_data->pend_count); 4452 goto out; 4453 } 4454 retval = SUCCESS; 4455 out: 4456 sdev_printk(KERN_INFO, scmd->device, 4457 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4458 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4459 4460 return retval; 4461 } 4462 4463 /** 4464 * mpi3mr_eh_abort - Callback function for abort error handling 4465 * @scmd: SCSI command reference 4466 * 4467 * Issues Abort Task Management if the command is in LLD scope 4468 * and verifies if it is aborted successfully, and return status 4469 * accordingly. 4470 * 4471 * Return: SUCCESS if the abort was successful, otherwise FAILED 4472 */ 4473 static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) 4474 { 4475 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4476 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4477 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4478 struct scmd_priv *cmd_priv; 4479 u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; 4480 u8 resp_code = 0; 4481 int retval = FAILED, ret = 0; 4482 struct request *rq = scsi_cmd_to_rq(scmd); 4483 unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); 4484 unsigned long scmd_age_sec = scmd_age_ms / HZ; 4485 4486 sdev_printk(KERN_INFO, scmd->device, 4487 "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); 4488 4489 sdev_printk(KERN_INFO, scmd->device, 4490 "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", 4491 mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, 4492 scmd->retries, scmd->allowed); 4493 4494 scsi_print_command(scmd); 4495 4496 sdev_priv_data = scmd->device->hostdata; 4497 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4498 sdev_printk(KERN_INFO, scmd->device, 4499 "%s: Device not available, Skip issuing abort task\n", 4500 mrioc->name); 4501 retval = SUCCESS; 4502 goto out; 4503 } 4504 4505 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4506 dev_handle = stgt_priv_data->dev_handle; 4507 4508 cmd_priv = scsi_cmd_priv(scmd); 4509 if (!cmd_priv->in_lld_scope || 4510 cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { 4511 sdev_printk(KERN_INFO, scmd->device, 4512 "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", 4513 mrioc->name, scmd); 4514 retval = SUCCESS; 4515 goto out; 4516 } 4517 4518 if (stgt_priv_data->dev_removed) { 4519 sdev_printk(KERN_INFO, scmd->device, 4520 "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", 4521 mrioc->name, dev_handle); 4522 retval = FAILED; 4523 goto out; 4524 } 4525 4526 ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4527 dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4528 timeout, &mrioc->host_tm_cmds, &resp_code, scmd); 4529 4530 if (ret) 4531 goto out; 4532 4533 if (cmd_priv->in_lld_scope) { 4534 sdev_printk(KERN_INFO, scmd->device, 4535 "%s: Abort task failed. scmd (0x%p) was not terminated\n", 4536 mrioc->name, scmd); 4537 goto out; 4538 } 4539 4540 retval = SUCCESS; 4541 out: 4542 sdev_printk(KERN_INFO, scmd->device, 4543 "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, 4544 ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); 4545 4546 return retval; 4547 } 4548 4549 /** 4550 * mpi3mr_scan_start - Scan start callback handler 4551 * @shost: SCSI host reference 4552 * 4553 * Issue port enable request asynchronously. 4554 * 4555 * Return: Nothing 4556 */ 4557 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4558 { 4559 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4560 4561 mrioc->scan_started = 1; 4562 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4563 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4564 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4565 mrioc->scan_started = 0; 4566 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4567 } 4568 } 4569 4570 /** 4571 * mpi3mr_scan_finished - Scan finished callback handler 4572 * @shost: SCSI host reference 4573 * @time: Jiffies from the scan start 4574 * 4575 * Checks whether the port enable is completed or timedout or 4576 * failed and set the scan status accordingly after taking any 4577 * recovery if required. 4578 * 4579 * Return: 1 on scan finished or timed out, 0 for in progress 4580 */ 4581 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4582 unsigned long time) 4583 { 4584 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4585 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4586 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4587 4588 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4589 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4590 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4591 mpi3mr_print_fault_info(mrioc); 4592 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4593 mrioc->scan_started = 0; 4594 mrioc->init_cmds.is_waiting = 0; 4595 mrioc->init_cmds.callback = NULL; 4596 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4597 } 4598 4599 if (time >= (pe_timeout * HZ)) { 4600 ioc_err(mrioc, "port enable failed due to time out\n"); 4601 mpi3mr_check_rh_fault_ioc(mrioc, 4602 MPI3MR_RESET_FROM_PE_TIMEOUT); 4603 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4604 mrioc->scan_started = 0; 4605 mrioc->init_cmds.is_waiting = 0; 4606 mrioc->init_cmds.callback = NULL; 4607 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4608 } 4609 4610 if (mrioc->scan_started) 4611 return 0; 4612 4613 if (mrioc->scan_failed) { 4614 ioc_err(mrioc, 4615 "port enable failed with status=0x%04x\n", 4616 mrioc->scan_failed); 4617 } else 4618 ioc_info(mrioc, "port enable is successfully completed\n"); 4619 4620 mpi3mr_start_watchdog(mrioc); 4621 mrioc->is_driver_loading = 0; 4622 mrioc->stop_bsgs = 0; 4623 return 1; 4624 } 4625 4626 /** 4627 * mpi3mr_sdev_destroy - Slave destroy callback handler 4628 * @sdev: SCSI device reference 4629 * 4630 * Cleanup and free per device(lun) private data. 4631 * 4632 * Return: Nothing. 4633 */ 4634 static void mpi3mr_sdev_destroy(struct scsi_device *sdev) 4635 { 4636 struct Scsi_Host *shost; 4637 struct mpi3mr_ioc *mrioc; 4638 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4639 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4640 unsigned long flags; 4641 struct scsi_target *starget; 4642 struct sas_rphy *rphy = NULL; 4643 4644 if (!sdev->hostdata) 4645 return; 4646 4647 starget = scsi_target(sdev); 4648 shost = dev_to_shost(&starget->dev); 4649 mrioc = shost_priv(shost); 4650 scsi_tgt_priv_data = starget->hostdata; 4651 4652 scsi_tgt_priv_data->num_luns--; 4653 4654 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4655 if (starget->channel == mrioc->scsi_device_channel) 4656 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4657 else if (mrioc->sas_transport_enabled && !starget->channel) { 4658 rphy = dev_to_rphy(starget->dev.parent); 4659 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4660 rphy->identify.sas_address, rphy); 4661 } 4662 4663 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4664 tgt_dev->starget = NULL; 4665 if (tgt_dev) 4666 mpi3mr_tgtdev_put(tgt_dev); 4667 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4668 4669 kfree(sdev->hostdata); 4670 sdev->hostdata = NULL; 4671 } 4672 4673 /** 4674 * mpi3mr_target_destroy - Target destroy callback handler 4675 * @starget: SCSI target reference 4676 * 4677 * Cleanup and free per target private data. 4678 * 4679 * Return: Nothing. 4680 */ 4681 static void mpi3mr_target_destroy(struct scsi_target *starget) 4682 { 4683 struct Scsi_Host *shost; 4684 struct mpi3mr_ioc *mrioc; 4685 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4686 struct mpi3mr_tgt_dev *tgt_dev; 4687 unsigned long flags; 4688 4689 if (!starget->hostdata) 4690 return; 4691 4692 shost = dev_to_shost(&starget->dev); 4693 mrioc = shost_priv(shost); 4694 scsi_tgt_priv_data = starget->hostdata; 4695 4696 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4697 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4698 if (tgt_dev && (tgt_dev->starget == starget) && 4699 (tgt_dev->perst_id == starget->id)) 4700 tgt_dev->starget = NULL; 4701 if (tgt_dev) { 4702 scsi_tgt_priv_data->tgt_dev = NULL; 4703 scsi_tgt_priv_data->perst_id = 0; 4704 mpi3mr_tgtdev_put(tgt_dev); 4705 mpi3mr_tgtdev_put(tgt_dev); 4706 } 4707 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4708 4709 kfree(starget->hostdata); 4710 starget->hostdata = NULL; 4711 } 4712 4713 /** 4714 * mpi3mr_sdev_configure - Slave configure callback handler 4715 * @sdev: SCSI device reference 4716 * @lim: queue limits 4717 * 4718 * Configure queue depth, max hardware sectors and virt boundary 4719 * as required 4720 * 4721 * Return: 0 always. 4722 */ 4723 static int mpi3mr_sdev_configure(struct scsi_device *sdev, 4724 struct queue_limits *lim) 4725 { 4726 struct scsi_target *starget; 4727 struct Scsi_Host *shost; 4728 struct mpi3mr_ioc *mrioc; 4729 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4730 unsigned long flags; 4731 int retval = 0; 4732 struct sas_rphy *rphy = NULL; 4733 4734 starget = scsi_target(sdev); 4735 shost = dev_to_shost(&starget->dev); 4736 mrioc = shost_priv(shost); 4737 4738 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4739 if (starget->channel == mrioc->scsi_device_channel) 4740 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4741 else if (mrioc->sas_transport_enabled && !starget->channel) { 4742 rphy = dev_to_rphy(starget->dev.parent); 4743 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4744 rphy->identify.sas_address, rphy); 4745 } 4746 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4747 if (!tgt_dev) 4748 return -ENXIO; 4749 4750 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4751 4752 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4753 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4754 4755 mpi3mr_configure_tgt_dev(tgt_dev, lim); 4756 mpi3mr_tgtdev_put(tgt_dev); 4757 return retval; 4758 } 4759 4760 /** 4761 * mpi3mr_sdev_init -Slave alloc callback handler 4762 * @sdev: SCSI device reference 4763 * 4764 * Allocate per device(lun) private data and initialize it. 4765 * 4766 * Return: 0 on success -ENOMEM on memory allocation failure. 4767 */ 4768 static int mpi3mr_sdev_init(struct scsi_device *sdev) 4769 { 4770 struct Scsi_Host *shost; 4771 struct mpi3mr_ioc *mrioc; 4772 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4773 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4774 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4775 unsigned long flags; 4776 struct scsi_target *starget; 4777 int retval = 0; 4778 struct sas_rphy *rphy = NULL; 4779 4780 starget = scsi_target(sdev); 4781 shost = dev_to_shost(&starget->dev); 4782 mrioc = shost_priv(shost); 4783 scsi_tgt_priv_data = starget->hostdata; 4784 4785 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4786 4787 if (starget->channel == mrioc->scsi_device_channel) 4788 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4789 else if (mrioc->sas_transport_enabled && !starget->channel) { 4790 rphy = dev_to_rphy(starget->dev.parent); 4791 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4792 rphy->identify.sas_address, rphy); 4793 } 4794 4795 if (tgt_dev) { 4796 if (tgt_dev->starget == NULL) 4797 tgt_dev->starget = starget; 4798 mpi3mr_tgtdev_put(tgt_dev); 4799 retval = 0; 4800 } else { 4801 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4802 return -ENXIO; 4803 } 4804 4805 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4806 4807 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4808 if (!scsi_dev_priv_data) 4809 return -ENOMEM; 4810 4811 scsi_dev_priv_data->lun_id = sdev->lun; 4812 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4813 sdev->hostdata = scsi_dev_priv_data; 4814 4815 scsi_tgt_priv_data->num_luns++; 4816 4817 return retval; 4818 } 4819 4820 /** 4821 * mpi3mr_target_alloc - Target alloc callback handler 4822 * @starget: SCSI target reference 4823 * 4824 * Allocate per target private data and initialize it. 4825 * 4826 * Return: 0 on success -ENOMEM on memory allocation failure. 4827 */ 4828 static int mpi3mr_target_alloc(struct scsi_target *starget) 4829 { 4830 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4831 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4832 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4833 struct mpi3mr_tgt_dev *tgt_dev; 4834 unsigned long flags; 4835 int retval = 0; 4836 struct sas_rphy *rphy = NULL; 4837 4838 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4839 if (!scsi_tgt_priv_data) 4840 return -ENOMEM; 4841 4842 starget->hostdata = scsi_tgt_priv_data; 4843 4844 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4845 if (starget->channel == mrioc->scsi_device_channel) { 4846 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4847 if (tgt_dev && !tgt_dev->is_hidden) { 4848 scsi_tgt_priv_data->starget = starget; 4849 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4850 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4851 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4852 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4853 tgt_dev->starget = starget; 4854 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4855 retval = 0; 4856 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4857 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4858 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4859 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4860 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4861 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4862 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4863 scsi_tgt_priv_data->dev_nvme_dif = 1; 4864 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4865 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4866 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4867 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4868 } else 4869 retval = -ENXIO; 4870 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4871 rphy = dev_to_rphy(starget->dev.parent); 4872 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4873 rphy->identify.sas_address, rphy); 4874 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4875 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4876 scsi_tgt_priv_data->starget = starget; 4877 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4878 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4879 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4880 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4881 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4882 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4883 tgt_dev->starget = starget; 4884 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4885 retval = 0; 4886 } else 4887 retval = -ENXIO; 4888 } 4889 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4890 4891 return retval; 4892 } 4893 4894 /** 4895 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4896 * @mrioc: Adapter instance reference 4897 * @scmd: SCSI Command reference 4898 * 4899 * The controller hardware cannot handle certain unmap commands 4900 * for NVMe drives, this routine checks those and return true 4901 * and completes the SCSI command with proper status and sense 4902 * data. 4903 * 4904 * Return: TRUE for not allowed unmap, FALSE otherwise. 4905 */ 4906 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4907 struct scsi_cmnd *scmd) 4908 { 4909 unsigned char *buf; 4910 u16 param_len, desc_len, trunc_param_len; 4911 4912 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4913 4914 if (mrioc->pdev->revision) { 4915 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4916 trunc_param_len -= (param_len - 8) & 0xF; 4917 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4918 dprint_scsi_err(mrioc, 4919 "truncating param_len from (%d) to (%d)\n", 4920 param_len, trunc_param_len); 4921 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4922 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4923 } 4924 return false; 4925 } 4926 4927 if (!param_len) { 4928 ioc_warn(mrioc, 4929 "%s: cdb received with zero parameter length\n", 4930 __func__); 4931 scsi_print_command(scmd); 4932 scmd->result = DID_OK << 16; 4933 scsi_done(scmd); 4934 return true; 4935 } 4936 4937 if (param_len < 24) { 4938 ioc_warn(mrioc, 4939 "%s: cdb received with invalid param_len: %d\n", 4940 __func__, param_len); 4941 scsi_print_command(scmd); 4942 scmd->result = SAM_STAT_CHECK_CONDITION; 4943 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4944 0x1A, 0); 4945 scsi_done(scmd); 4946 return true; 4947 } 4948 if (param_len != scsi_bufflen(scmd)) { 4949 ioc_warn(mrioc, 4950 "%s: cdb received with param_len: %d bufflen: %d\n", 4951 __func__, param_len, scsi_bufflen(scmd)); 4952 scsi_print_command(scmd); 4953 scmd->result = SAM_STAT_CHECK_CONDITION; 4954 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4955 0x1A, 0); 4956 scsi_done(scmd); 4957 return true; 4958 } 4959 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4960 if (!buf) { 4961 scsi_print_command(scmd); 4962 scmd->result = SAM_STAT_CHECK_CONDITION; 4963 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4964 0x55, 0x03); 4965 scsi_done(scmd); 4966 return true; 4967 } 4968 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4969 desc_len = get_unaligned_be16(&buf[2]); 4970 4971 if (desc_len < 16) { 4972 ioc_warn(mrioc, 4973 "%s: Invalid descriptor length in param list: %d\n", 4974 __func__, desc_len); 4975 scsi_print_command(scmd); 4976 scmd->result = SAM_STAT_CHECK_CONDITION; 4977 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4978 0x26, 0); 4979 scsi_done(scmd); 4980 kfree(buf); 4981 return true; 4982 } 4983 4984 if (param_len > (desc_len + 8)) { 4985 trunc_param_len = desc_len + 8; 4986 scsi_print_command(scmd); 4987 dprint_scsi_err(mrioc, 4988 "truncating param_len(%d) to desc_len+8(%d)\n", 4989 param_len, trunc_param_len); 4990 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4991 scsi_print_command(scmd); 4992 } 4993 4994 kfree(buf); 4995 return false; 4996 } 4997 4998 /** 4999 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 5000 * @scmd: SCSI Command reference 5001 * 5002 * Checks whether a cdb is allowed during shutdown or not. 5003 * 5004 * Return: TRUE for allowed commands, FALSE otherwise. 5005 */ 5006 5007 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 5008 { 5009 switch (scmd->cmnd[0]) { 5010 case SYNCHRONIZE_CACHE: 5011 case START_STOP: 5012 return true; 5013 default: 5014 return false; 5015 } 5016 } 5017 5018 /** 5019 * mpi3mr_qcmd - I/O request despatcher 5020 * @shost: SCSI Host reference 5021 * @scmd: SCSI Command reference 5022 * 5023 * Issues the SCSI Command as an MPI3 request. 5024 * 5025 * Return: 0 on successful queueing of the request or if the 5026 * request is completed with failure. 5027 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 5028 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 5029 */ 5030 static int mpi3mr_qcmd(struct Scsi_Host *shost, 5031 struct scsi_cmnd *scmd) 5032 { 5033 struct mpi3mr_ioc *mrioc = shost_priv(shost); 5034 struct mpi3mr_stgt_priv_data *stgt_priv_data; 5035 struct mpi3mr_sdev_priv_data *sdev_priv_data; 5036 struct scmd_priv *scmd_priv_data = NULL; 5037 struct mpi3_scsi_io_request *scsiio_req = NULL; 5038 struct op_req_qinfo *op_req_q = NULL; 5039 int retval = 0; 5040 u16 dev_handle; 5041 u16 host_tag; 5042 u32 scsiio_flags = 0, data_len_blks = 0; 5043 struct request *rq = scsi_cmd_to_rq(scmd); 5044 int iprio_class; 5045 u8 is_pcie_dev = 0; 5046 u32 tracked_io_sz = 0; 5047 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 5048 struct mpi3mr_throttle_group_info *tg = NULL; 5049 5050 if (mrioc->unrecoverable) { 5051 scmd->result = DID_ERROR << 16; 5052 scsi_done(scmd); 5053 goto out; 5054 } 5055 5056 sdev_priv_data = scmd->device->hostdata; 5057 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 5058 scmd->result = DID_NO_CONNECT << 16; 5059 scsi_done(scmd); 5060 goto out; 5061 } 5062 5063 if (mrioc->stop_drv_processing && 5064 !(mpi3mr_allow_scmd_to_fw(scmd))) { 5065 scmd->result = DID_NO_CONNECT << 16; 5066 scsi_done(scmd); 5067 goto out; 5068 } 5069 5070 stgt_priv_data = sdev_priv_data->tgt_priv_data; 5071 dev_handle = stgt_priv_data->dev_handle; 5072 5073 /* Avoid error handling escalation when device is removed or blocked */ 5074 5075 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5076 scmd->cmnd[0] == TEST_UNIT_READY && 5077 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 5078 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5079 scsi_done(scmd); 5080 goto out; 5081 } 5082 5083 if (mrioc->reset_in_progress || mrioc->prepare_for_reset 5084 || mrioc->block_on_pci_err) { 5085 retval = SCSI_MLQUEUE_HOST_BUSY; 5086 goto out; 5087 } 5088 5089 if (atomic_read(&stgt_priv_data->block_io)) { 5090 if (mrioc->stop_drv_processing) { 5091 scmd->result = DID_NO_CONNECT << 16; 5092 scsi_done(scmd); 5093 goto out; 5094 } 5095 retval = SCSI_MLQUEUE_DEVICE_BUSY; 5096 goto out; 5097 } 5098 5099 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 5100 scmd->result = DID_NO_CONNECT << 16; 5101 scsi_done(scmd); 5102 goto out; 5103 } 5104 if (stgt_priv_data->dev_removed) { 5105 scmd->result = DID_NO_CONNECT << 16; 5106 scsi_done(scmd); 5107 goto out; 5108 } 5109 5110 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 5111 is_pcie_dev = 1; 5112 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 5113 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5114 mpi3mr_check_return_unmap(mrioc, scmd)) 5115 goto out; 5116 5117 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 5118 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 5119 scmd->result = DID_ERROR << 16; 5120 scsi_done(scmd); 5121 goto out; 5122 } 5123 5124 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5125 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 5126 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5127 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 5128 else 5129 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 5130 5131 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 5132 5133 if (sdev_priv_data->ncq_prio_enable) { 5134 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5135 if (iprio_class == IOPRIO_CLASS_RT) 5136 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 5137 } 5138 5139 if (scmd->cmd_len > 16) 5140 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 5141 5142 scmd_priv_data = scsi_cmd_priv(scmd); 5143 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 5144 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 5145 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 5146 scsiio_req->host_tag = cpu_to_le16(host_tag); 5147 5148 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 5149 5150 if (stgt_priv_data->wslen) 5151 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 5152 stgt_priv_data->wslen); 5153 5154 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 5155 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 5156 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 5157 scsiio_req->flags = cpu_to_le32(scsiio_flags); 5158 int_to_scsilun(sdev_priv_data->lun_id, 5159 (struct scsi_lun *)scsiio_req->lun); 5160 5161 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 5162 mpi3mr_clear_scmd_priv(mrioc, scmd); 5163 retval = SCSI_MLQUEUE_HOST_BUSY; 5164 goto out; 5165 } 5166 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 5167 data_len_blks = scsi_bufflen(scmd) >> 9; 5168 if ((data_len_blks >= mrioc->io_throttle_data_length) && 5169 stgt_priv_data->io_throttle_enabled) { 5170 tracked_io_sz = data_len_blks; 5171 tg = stgt_priv_data->throttle_group; 5172 if (tg) { 5173 ioc_pend_data_len = atomic_add_return(data_len_blks, 5174 &mrioc->pend_large_data_sz); 5175 tg_pend_data_len = atomic_add_return(data_len_blks, 5176 &tg->pend_large_data_sz); 5177 if (!tg->io_divert && ((ioc_pend_data_len >= 5178 mrioc->io_throttle_high) || 5179 (tg_pend_data_len >= tg->high))) { 5180 tg->io_divert = 1; 5181 tg->need_qd_reduction = 1; 5182 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 5183 tg, 1); 5184 mpi3mr_queue_qd_reduction_event(mrioc, tg); 5185 } 5186 } else { 5187 ioc_pend_data_len = atomic_add_return(data_len_blks, 5188 &mrioc->pend_large_data_sz); 5189 if (ioc_pend_data_len >= mrioc->io_throttle_high) 5190 stgt_priv_data->io_divert = 1; 5191 } 5192 } 5193 5194 if (stgt_priv_data->io_divert) { 5195 scsiio_req->msg_flags |= 5196 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 5197 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 5198 } 5199 scsiio_req->flags |= cpu_to_le32(scsiio_flags); 5200 5201 if (mpi3mr_op_request_post(mrioc, op_req_q, 5202 scmd_priv_data->mpi3mr_scsiio_req)) { 5203 mpi3mr_clear_scmd_priv(mrioc, scmd); 5204 retval = SCSI_MLQUEUE_HOST_BUSY; 5205 if (tracked_io_sz) { 5206 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 5207 if (tg) 5208 atomic_sub(tracked_io_sz, 5209 &tg->pend_large_data_sz); 5210 } 5211 goto out; 5212 } 5213 5214 out: 5215 return retval; 5216 } 5217 5218 static const struct scsi_host_template mpi3mr_driver_template = { 5219 .module = THIS_MODULE, 5220 .name = "MPI3 Storage Controller", 5221 .proc_name = MPI3MR_DRIVER_NAME, 5222 .queuecommand = mpi3mr_qcmd, 5223 .target_alloc = mpi3mr_target_alloc, 5224 .sdev_init = mpi3mr_sdev_init, 5225 .sdev_configure = mpi3mr_sdev_configure, 5226 .target_destroy = mpi3mr_target_destroy, 5227 .sdev_destroy = mpi3mr_sdev_destroy, 5228 .scan_finished = mpi3mr_scan_finished, 5229 .scan_start = mpi3mr_scan_start, 5230 .change_queue_depth = mpi3mr_change_queue_depth, 5231 .eh_abort_handler = mpi3mr_eh_abort, 5232 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 5233 .eh_target_reset_handler = mpi3mr_eh_target_reset, 5234 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 5235 .eh_host_reset_handler = mpi3mr_eh_host_reset, 5236 .bios_param = mpi3mr_bios_param, 5237 .map_queues = mpi3mr_map_queues, 5238 .mq_poll = mpi3mr_blk_mq_poll, 5239 .no_write_same = 1, 5240 .can_queue = 1, 5241 .this_id = -1, 5242 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 5243 /* max xfer supported is 1M (2K in 512 byte sized sectors) 5244 */ 5245 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 5246 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 5247 .max_segment_size = 0xffffffff, 5248 .track_queue_depth = 1, 5249 .cmd_size = sizeof(struct scmd_priv), 5250 .shost_groups = mpi3mr_host_groups, 5251 .sdev_groups = mpi3mr_dev_groups, 5252 }; 5253 5254 /** 5255 * mpi3mr_init_drv_cmd - Initialize internal command tracker 5256 * @cmdptr: Internal command tracker 5257 * @host_tag: Host tag used for the specific command 5258 * 5259 * Initialize the internal command tracker structure with 5260 * specified host tag. 5261 * 5262 * Return: Nothing. 5263 */ 5264 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 5265 u16 host_tag) 5266 { 5267 mutex_init(&cmdptr->mutex); 5268 cmdptr->reply = NULL; 5269 cmdptr->state = MPI3MR_CMD_NOTUSED; 5270 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 5271 cmdptr->host_tag = host_tag; 5272 } 5273 5274 /** 5275 * osintfc_mrioc_security_status -Check controller secure status 5276 * @pdev: PCI device instance 5277 * 5278 * Read the Device Serial Number capability from PCI config 5279 * space and decide whether the controller is secure or not. 5280 * 5281 * Return: 0 on success, non-zero on failure. 5282 */ 5283 static int 5284 osintfc_mrioc_security_status(struct pci_dev *pdev) 5285 { 5286 u32 cap_data; 5287 int base; 5288 u32 ctlr_status; 5289 u32 debug_status; 5290 int retval = 0; 5291 5292 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 5293 if (!base) { 5294 dev_err(&pdev->dev, 5295 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 5296 return -1; 5297 } 5298 5299 pci_read_config_dword(pdev, base + 4, &cap_data); 5300 5301 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 5302 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5303 5304 switch (ctlr_status) { 5305 case MPI3MR_INVALID_DEVICE: 5306 dev_err(&pdev->dev, 5307 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5308 __func__, pdev->device, pdev->subsystem_vendor, 5309 pdev->subsystem_device); 5310 retval = -1; 5311 break; 5312 case MPI3MR_CONFIG_SECURE_DEVICE: 5313 if (!debug_status) 5314 dev_info(&pdev->dev, 5315 "%s: Config secure ctlr is detected\n", 5316 __func__); 5317 break; 5318 case MPI3MR_HARD_SECURE_DEVICE: 5319 break; 5320 case MPI3MR_TAMPERED_DEVICE: 5321 dev_err(&pdev->dev, 5322 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5323 __func__, pdev->device, pdev->subsystem_vendor, 5324 pdev->subsystem_device); 5325 retval = -1; 5326 break; 5327 default: 5328 retval = -1; 5329 break; 5330 } 5331 5332 if (!retval && debug_status) { 5333 dev_err(&pdev->dev, 5334 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5335 __func__, pdev->device, pdev->subsystem_vendor, 5336 pdev->subsystem_device); 5337 retval = -1; 5338 } 5339 5340 return retval; 5341 } 5342 5343 /** 5344 * mpi3mr_probe - PCI probe callback 5345 * @pdev: PCI device instance 5346 * @id: PCI device ID details 5347 * 5348 * controller initialization routine. Checks the security status 5349 * of the controller and if it is invalid or tampered return the 5350 * probe without initializing the controller. Otherwise, 5351 * allocate per adapter instance through shost_priv and 5352 * initialize controller specific data structures, initializae 5353 * the controller hardware, add shost to the SCSI subsystem. 5354 * 5355 * Return: 0 on success, non-zero on failure. 5356 */ 5357 5358 static int 5359 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5360 { 5361 struct mpi3mr_ioc *mrioc = NULL; 5362 struct Scsi_Host *shost = NULL; 5363 int retval = 0, i; 5364 5365 if (osintfc_mrioc_security_status(pdev)) { 5366 warn_non_secure_ctlr = 1; 5367 return 1; /* For Invalid and Tampered device */ 5368 } 5369 5370 shost = scsi_host_alloc(&mpi3mr_driver_template, 5371 sizeof(struct mpi3mr_ioc)); 5372 if (!shost) { 5373 retval = -ENODEV; 5374 goto shost_failed; 5375 } 5376 5377 mrioc = shost_priv(shost); 5378 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); 5379 if (retval < 0) 5380 goto id_alloc_failed; 5381 mrioc->id = (u8)retval; 5382 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5383 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5384 INIT_LIST_HEAD(&mrioc->list); 5385 spin_lock(&mrioc_list_lock); 5386 list_add_tail(&mrioc->list, &mrioc_list); 5387 spin_unlock(&mrioc_list_lock); 5388 5389 spin_lock_init(&mrioc->admin_req_lock); 5390 spin_lock_init(&mrioc->reply_free_queue_lock); 5391 spin_lock_init(&mrioc->sbq_lock); 5392 spin_lock_init(&mrioc->fwevt_lock); 5393 spin_lock_init(&mrioc->tgtdev_lock); 5394 spin_lock_init(&mrioc->watchdog_lock); 5395 spin_lock_init(&mrioc->chain_buf_lock); 5396 spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); 5397 spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); 5398 spin_lock_init(&mrioc->sas_node_lock); 5399 spin_lock_init(&mrioc->trigger_lock); 5400 5401 INIT_LIST_HEAD(&mrioc->fwevt_list); 5402 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5403 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5404 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5405 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5406 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5407 INIT_LIST_HEAD(&mrioc->enclosure_list); 5408 5409 mutex_init(&mrioc->reset_mutex); 5410 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5411 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5412 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5413 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5414 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5415 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5416 5417 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5418 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5419 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5420 5421 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5422 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5423 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5424 5425 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5426 !pdev->revision) 5427 mrioc->enable_segqueue = false; 5428 else 5429 mrioc->enable_segqueue = true; 5430 5431 init_waitqueue_head(&mrioc->reset_waitq); 5432 mrioc->logging_level = logging_level; 5433 mrioc->shost = shost; 5434 mrioc->pdev = pdev; 5435 mrioc->stop_bsgs = 1; 5436 5437 mrioc->max_sgl_entries = max_sgl_entries; 5438 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5439 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5440 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5441 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5442 else { 5443 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5444 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5445 } 5446 5447 /* init shost parameters */ 5448 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5449 shost->max_lun = -1; 5450 shost->unique_id = mrioc->id; 5451 5452 shost->max_channel = 0; 5453 shost->max_id = 0xFFFFFFFF; 5454 5455 shost->host_tagset = 1; 5456 5457 if (prot_mask >= 0) 5458 scsi_host_set_prot(shost, prot_mask); 5459 else { 5460 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5461 | SHOST_DIF_TYPE2_PROTECTION 5462 | SHOST_DIF_TYPE3_PROTECTION; 5463 scsi_host_set_prot(shost, prot_mask); 5464 } 5465 5466 ioc_info(mrioc, 5467 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5468 __func__, 5469 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5470 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5471 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5472 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5473 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5474 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5475 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5476 5477 if (prot_guard_mask) 5478 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5479 else 5480 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5481 5482 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5483 "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); 5484 if (!mrioc->fwevt_worker_thread) { 5485 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5486 __FILE__, __LINE__, __func__); 5487 retval = -ENODEV; 5488 goto fwevtthread_failed; 5489 } 5490 5491 mrioc->is_driver_loading = 1; 5492 mrioc->cpu_count = num_online_cpus(); 5493 if (mpi3mr_setup_resources(mrioc)) { 5494 ioc_err(mrioc, "setup resources failed\n"); 5495 retval = -ENODEV; 5496 goto resource_alloc_failed; 5497 } 5498 if (mpi3mr_init_ioc(mrioc)) { 5499 ioc_err(mrioc, "initializing IOC failed\n"); 5500 retval = -ENODEV; 5501 goto init_ioc_failed; 5502 } 5503 5504 shost->nr_hw_queues = mrioc->num_op_reply_q; 5505 if (mrioc->active_poll_qcount) 5506 shost->nr_maps = 3; 5507 5508 shost->can_queue = mrioc->max_host_ios; 5509 shost->sg_tablesize = mrioc->max_sgl_entries; 5510 shost->max_id = mrioc->facts.max_perids + 1; 5511 5512 retval = scsi_add_host(shost, &pdev->dev); 5513 if (retval) { 5514 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5515 __FILE__, __LINE__, __func__); 5516 goto addhost_failed; 5517 } 5518 5519 scsi_scan_host(shost); 5520 mpi3mr_bsg_init(mrioc); 5521 return retval; 5522 5523 addhost_failed: 5524 mpi3mr_stop_watchdog(mrioc); 5525 mpi3mr_cleanup_ioc(mrioc); 5526 init_ioc_failed: 5527 mpi3mr_free_mem(mrioc); 5528 mpi3mr_cleanup_resources(mrioc); 5529 resource_alloc_failed: 5530 destroy_workqueue(mrioc->fwevt_worker_thread); 5531 fwevtthread_failed: 5532 ida_free(&mrioc_ida, mrioc->id); 5533 spin_lock(&mrioc_list_lock); 5534 list_del(&mrioc->list); 5535 spin_unlock(&mrioc_list_lock); 5536 id_alloc_failed: 5537 scsi_host_put(shost); 5538 shost_failed: 5539 return retval; 5540 } 5541 5542 /** 5543 * mpi3mr_remove - PCI remove callback 5544 * @pdev: PCI device instance 5545 * 5546 * Cleanup the IOC by issuing MUR and shutdown notification. 5547 * Free up all memory and resources associated with the 5548 * controllerand target devices, unregister the shost. 5549 * 5550 * Return: Nothing. 5551 */ 5552 static void mpi3mr_remove(struct pci_dev *pdev) 5553 { 5554 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5555 struct mpi3mr_ioc *mrioc; 5556 struct workqueue_struct *wq; 5557 unsigned long flags; 5558 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5559 struct mpi3mr_hba_port *port, *hba_port_next; 5560 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5561 5562 if (!shost) 5563 return; 5564 5565 mrioc = shost_priv(shost); 5566 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5567 ssleep(1); 5568 5569 if (mrioc->block_on_pci_err) { 5570 mrioc->block_on_pci_err = false; 5571 scsi_unblock_requests(shost); 5572 mrioc->unrecoverable = 1; 5573 } 5574 5575 if (!pci_device_is_present(mrioc->pdev) || 5576 mrioc->pci_err_recovery) { 5577 mrioc->unrecoverable = 1; 5578 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5579 } 5580 5581 mpi3mr_bsg_exit(mrioc); 5582 mrioc->stop_drv_processing = 1; 5583 mpi3mr_cleanup_fwevt_list(mrioc); 5584 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5585 wq = mrioc->fwevt_worker_thread; 5586 mrioc->fwevt_worker_thread = NULL; 5587 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5588 if (wq) 5589 destroy_workqueue(wq); 5590 5591 if (mrioc->sas_transport_enabled) 5592 sas_remove_host(shost); 5593 else 5594 scsi_remove_host(shost); 5595 5596 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5597 list) { 5598 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5599 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5600 mpi3mr_tgtdev_put(tgtdev); 5601 } 5602 mpi3mr_stop_watchdog(mrioc); 5603 mpi3mr_cleanup_ioc(mrioc); 5604 mpi3mr_free_mem(mrioc); 5605 mpi3mr_cleanup_resources(mrioc); 5606 5607 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5608 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5609 &mrioc->sas_expander_list, list) { 5610 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5611 mpi3mr_expander_node_remove(mrioc, sas_expander); 5612 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5613 } 5614 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5615 ioc_info(mrioc, 5616 "removing hba_port entry: %p port: %d from hba_port list\n", 5617 port, port->port_id); 5618 list_del(&port->list); 5619 kfree(port); 5620 } 5621 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5622 5623 if (mrioc->sas_hba.num_phys) { 5624 kfree(mrioc->sas_hba.phy); 5625 mrioc->sas_hba.phy = NULL; 5626 mrioc->sas_hba.num_phys = 0; 5627 } 5628 5629 ida_free(&mrioc_ida, mrioc->id); 5630 spin_lock(&mrioc_list_lock); 5631 list_del(&mrioc->list); 5632 spin_unlock(&mrioc_list_lock); 5633 5634 scsi_host_put(shost); 5635 } 5636 5637 /** 5638 * mpi3mr_shutdown - PCI shutdown callback 5639 * @pdev: PCI device instance 5640 * 5641 * Free up all memory and resources associated with the 5642 * controller 5643 * 5644 * Return: Nothing. 5645 */ 5646 static void mpi3mr_shutdown(struct pci_dev *pdev) 5647 { 5648 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5649 struct mpi3mr_ioc *mrioc; 5650 struct workqueue_struct *wq; 5651 unsigned long flags; 5652 5653 if (!shost) 5654 return; 5655 5656 mrioc = shost_priv(shost); 5657 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5658 ssleep(1); 5659 5660 mrioc->stop_drv_processing = 1; 5661 mpi3mr_cleanup_fwevt_list(mrioc); 5662 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5663 wq = mrioc->fwevt_worker_thread; 5664 mrioc->fwevt_worker_thread = NULL; 5665 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5666 if (wq) 5667 destroy_workqueue(wq); 5668 5669 mpi3mr_stop_watchdog(mrioc); 5670 mpi3mr_cleanup_ioc(mrioc); 5671 mpi3mr_cleanup_resources(mrioc); 5672 } 5673 5674 /** 5675 * mpi3mr_suspend - PCI power management suspend callback 5676 * @dev: Device struct 5677 * 5678 * Change the power state to the given value and cleanup the IOC 5679 * by issuing MUR and shutdown notification 5680 * 5681 * Return: 0 always. 5682 */ 5683 static int __maybe_unused 5684 mpi3mr_suspend(struct device *dev) 5685 { 5686 struct pci_dev *pdev = to_pci_dev(dev); 5687 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5688 struct mpi3mr_ioc *mrioc; 5689 5690 if (!shost) 5691 return 0; 5692 5693 mrioc = shost_priv(shost); 5694 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5695 ssleep(1); 5696 mrioc->stop_drv_processing = 1; 5697 mpi3mr_cleanup_fwevt_list(mrioc); 5698 scsi_block_requests(shost); 5699 mpi3mr_stop_watchdog(mrioc); 5700 mpi3mr_cleanup_ioc(mrioc); 5701 5702 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5703 pdev, pci_name(pdev)); 5704 mpi3mr_cleanup_resources(mrioc); 5705 5706 return 0; 5707 } 5708 5709 /** 5710 * mpi3mr_resume - PCI power management resume callback 5711 * @dev: Device struct 5712 * 5713 * Restore the power state to D0 and reinitialize the controller 5714 * and resume I/O operations to the target devices 5715 * 5716 * Return: 0 on success, non-zero on failure 5717 */ 5718 static int __maybe_unused 5719 mpi3mr_resume(struct device *dev) 5720 { 5721 struct pci_dev *pdev = to_pci_dev(dev); 5722 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5723 struct mpi3mr_ioc *mrioc; 5724 pci_power_t device_state = pdev->current_state; 5725 int r; 5726 5727 if (!shost) 5728 return 0; 5729 5730 mrioc = shost_priv(shost); 5731 5732 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5733 pdev, pci_name(pdev), device_state); 5734 mrioc->pdev = pdev; 5735 mrioc->cpu_count = num_online_cpus(); 5736 r = mpi3mr_setup_resources(mrioc); 5737 if (r) { 5738 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5739 __func__, r); 5740 return r; 5741 } 5742 5743 mrioc->stop_drv_processing = 0; 5744 mpi3mr_invalidate_devhandles(mrioc); 5745 mpi3mr_free_enclosure_list(mrioc); 5746 mpi3mr_memset_buffers(mrioc); 5747 r = mpi3mr_reinit_ioc(mrioc, 1); 5748 if (r) { 5749 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5750 return r; 5751 } 5752 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5753 scsi_unblock_requests(shost); 5754 mrioc->device_refresh_on = 0; 5755 mpi3mr_start_watchdog(mrioc); 5756 5757 return 0; 5758 } 5759 5760 /** 5761 * mpi3mr_pcierr_error_detected - PCI error detected callback 5762 * @pdev: PCI device instance 5763 * @state: channel state 5764 * 5765 * This function is called by the PCI error recovery driver and 5766 * based on the state passed the driver decides what actions to 5767 * be recommended back to PCI driver. 5768 * 5769 * For all of the states if there is no valid mrioc or scsi host 5770 * references in the PCI device then this function will return 5771 * the result as disconnect. 5772 * 5773 * For normal state, this function will return the result as can 5774 * recover. 5775 * 5776 * For frozen state, this function will block for any pending 5777 * controller initialization or re-initialization to complete, 5778 * stop any new interactions with the controller and return 5779 * status as reset required. 5780 * 5781 * For permanent failure state, this function will mark the 5782 * controller as unrecoverable and return status as disconnect. 5783 * 5784 * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or 5785 * DISCONNECT based on the controller state. 5786 */ 5787 static pci_ers_result_t 5788 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5789 { 5790 struct Scsi_Host *shost; 5791 struct mpi3mr_ioc *mrioc; 5792 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5793 5794 dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, 5795 state); 5796 5797 shost = pci_get_drvdata(pdev); 5798 mrioc = shost_priv(shost); 5799 5800 switch (state) { 5801 case pci_channel_io_normal: 5802 return PCI_ERS_RESULT_CAN_RECOVER; 5803 case pci_channel_io_frozen: 5804 mrioc->pci_err_recovery = true; 5805 mrioc->block_on_pci_err = true; 5806 do { 5807 if (mrioc->reset_in_progress || mrioc->is_driver_loading) 5808 ssleep(1); 5809 else 5810 break; 5811 } while (--timeout); 5812 5813 if (!timeout) { 5814 mrioc->pci_err_recovery = true; 5815 mrioc->block_on_pci_err = true; 5816 mrioc->unrecoverable = 1; 5817 mpi3mr_stop_watchdog(mrioc); 5818 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5819 return PCI_ERS_RESULT_DISCONNECT; 5820 } 5821 5822 scsi_block_requests(mrioc->shost); 5823 mpi3mr_stop_watchdog(mrioc); 5824 mpi3mr_cleanup_resources(mrioc); 5825 return PCI_ERS_RESULT_NEED_RESET; 5826 case pci_channel_io_perm_failure: 5827 mrioc->pci_err_recovery = true; 5828 mrioc->block_on_pci_err = true; 5829 mrioc->unrecoverable = 1; 5830 mpi3mr_stop_watchdog(mrioc); 5831 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5832 return PCI_ERS_RESULT_DISCONNECT; 5833 default: 5834 return PCI_ERS_RESULT_DISCONNECT; 5835 } 5836 } 5837 5838 /** 5839 * mpi3mr_pcierr_slot_reset - Post slot reset callback 5840 * @pdev: PCI device instance 5841 * 5842 * This function is called by the PCI error recovery driver 5843 * after a slot or link reset issued by it for the recovery, the 5844 * driver is expected to bring back the controller and 5845 * initialize it. 5846 * 5847 * This function restores PCI state and reinitializes controller 5848 * resources and the controller, this blocks for any pending 5849 * reset to complete. 5850 * 5851 * Returns: PCI_ERS_RESULT_DISCONNECT on failure or 5852 * PCI_ERS_RESULT_RECOVERED 5853 */ 5854 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) 5855 { 5856 struct Scsi_Host *shost; 5857 struct mpi3mr_ioc *mrioc; 5858 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5859 5860 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5861 5862 shost = pci_get_drvdata(pdev); 5863 mrioc = shost_priv(shost); 5864 5865 do { 5866 if (mrioc->reset_in_progress) 5867 ssleep(1); 5868 else 5869 break; 5870 } while (--timeout); 5871 5872 if (!timeout) 5873 goto out_failed; 5874 5875 pci_restore_state(pdev); 5876 5877 if (mpi3mr_setup_resources(mrioc)) { 5878 ioc_err(mrioc, "setup resources failed\n"); 5879 goto out_failed; 5880 } 5881 mrioc->unrecoverable = 0; 5882 mrioc->pci_err_recovery = false; 5883 5884 if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) 5885 goto out_failed; 5886 5887 return PCI_ERS_RESULT_RECOVERED; 5888 5889 out_failed: 5890 mrioc->unrecoverable = 1; 5891 mrioc->block_on_pci_err = false; 5892 scsi_unblock_requests(shost); 5893 mpi3mr_start_watchdog(mrioc); 5894 return PCI_ERS_RESULT_DISCONNECT; 5895 } 5896 5897 /** 5898 * mpi3mr_pcierr_resume - PCI error recovery resume 5899 * callback 5900 * @pdev: PCI device instance 5901 * 5902 * This function enables all I/O and IOCTLs post reset issued as 5903 * part of the PCI error recovery 5904 * 5905 * Return: Nothing. 5906 */ 5907 static void mpi3mr_pcierr_resume(struct pci_dev *pdev) 5908 { 5909 struct Scsi_Host *shost; 5910 struct mpi3mr_ioc *mrioc; 5911 5912 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5913 5914 shost = pci_get_drvdata(pdev); 5915 mrioc = shost_priv(shost); 5916 5917 if (mrioc->block_on_pci_err) { 5918 mrioc->block_on_pci_err = false; 5919 scsi_unblock_requests(shost); 5920 mpi3mr_start_watchdog(mrioc); 5921 } 5922 } 5923 5924 /** 5925 * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback 5926 * @pdev: PCI device instance 5927 * 5928 * This is called only if mpi3mr_pcierr_error_detected returns 5929 * PCI_ERS_RESULT_CAN_RECOVER. 5930 * 5931 * Return: PCI_ERS_RESULT_DISCONNECT when the controller is 5932 * unrecoverable or when the shost/mrioc reference cannot be 5933 * found, else return PCI_ERS_RESULT_RECOVERED 5934 */ 5935 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) 5936 { 5937 struct Scsi_Host *shost; 5938 struct mpi3mr_ioc *mrioc; 5939 5940 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5941 5942 shost = pci_get_drvdata(pdev); 5943 mrioc = shost_priv(shost); 5944 5945 if (mrioc->unrecoverable) 5946 return PCI_ERS_RESULT_DISCONNECT; 5947 5948 return PCI_ERS_RESULT_RECOVERED; 5949 } 5950 5951 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5952 { 5953 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5954 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5955 }, 5956 { 5957 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5958 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5959 }, 5960 { 5961 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5962 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5963 }, 5964 { 0 } 5965 }; 5966 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5967 5968 static const struct pci_error_handlers mpi3mr_err_handler = { 5969 .error_detected = mpi3mr_pcierr_error_detected, 5970 .mmio_enabled = mpi3mr_pcierr_mmio_enabled, 5971 .slot_reset = mpi3mr_pcierr_slot_reset, 5972 .resume = mpi3mr_pcierr_resume, 5973 }; 5974 5975 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5976 5977 static struct pci_driver mpi3mr_pci_driver = { 5978 .name = MPI3MR_DRIVER_NAME, 5979 .id_table = mpi3mr_pci_id_table, 5980 .probe = mpi3mr_probe, 5981 .remove = mpi3mr_remove, 5982 .shutdown = mpi3mr_shutdown, 5983 .err_handler = &mpi3mr_err_handler, 5984 .driver.pm = &mpi3mr_pm_ops, 5985 }; 5986 5987 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5988 { 5989 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5990 } 5991 static DRIVER_ATTR_RO(event_counter); 5992 5993 static int __init mpi3mr_init(void) 5994 { 5995 int ret_val; 5996 5997 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5998 MPI3MR_DRIVER_VERSION); 5999 6000 mpi3mr_transport_template = 6001 sas_attach_transport(&mpi3mr_transport_functions); 6002 if (!mpi3mr_transport_template) { 6003 pr_err("%s failed to load due to sas transport attach failure\n", 6004 MPI3MR_DRIVER_NAME); 6005 return -ENODEV; 6006 } 6007 6008 ret_val = pci_register_driver(&mpi3mr_pci_driver); 6009 if (ret_val) { 6010 pr_err("%s failed to load due to pci register driver failure\n", 6011 MPI3MR_DRIVER_NAME); 6012 goto err_pci_reg_fail; 6013 } 6014 6015 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 6016 &driver_attr_event_counter); 6017 if (ret_val) 6018 goto err_event_counter; 6019 6020 return ret_val; 6021 6022 err_event_counter: 6023 pci_unregister_driver(&mpi3mr_pci_driver); 6024 6025 err_pci_reg_fail: 6026 sas_release_transport(mpi3mr_transport_template); 6027 return ret_val; 6028 } 6029 6030 static void __exit mpi3mr_exit(void) 6031 { 6032 if (warn_non_secure_ctlr) 6033 pr_warn( 6034 "Unloading %s version %s while managing a non secure controller\n", 6035 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 6036 else 6037 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 6038 MPI3MR_DRIVER_VERSION); 6039 6040 driver_remove_file(&mpi3mr_pci_driver.driver, 6041 &driver_attr_event_counter); 6042 pci_unregister_driver(&mpi3mr_pci_driver); 6043 sas_release_transport(mpi3mr_transport_template); 6044 ida_destroy(&mrioc_ida); 6045 } 6046 6047 module_init(mpi3mr_init); 6048 module_exit(mpi3mr_exit); 6049