1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /* 53 * SAS Log info code for a NCQ collateral abort after an NCQ error: 54 * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR 55 * See: drivers/message/fusion/lsi/mpi_log_sas.h 56 */ 57 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 58 59 /** 60 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 61 * @mrioc: Adapter instance reference 62 * @scmd: SCSI command reference 63 * 64 * Calculate the host tag based on block tag for a given scmd. 65 * 66 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 67 */ 68 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 69 struct scsi_cmnd *scmd) 70 { 71 struct scmd_priv *priv = NULL; 72 u32 unique_tag; 73 u16 host_tag, hw_queue; 74 75 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 76 77 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 78 if (hw_queue >= mrioc->num_op_reply_q) 79 return MPI3MR_HOSTTAG_INVALID; 80 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 81 82 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 83 return MPI3MR_HOSTTAG_INVALID; 84 85 priv = scsi_cmd_priv(scmd); 86 /*host_tag 0 is invalid hence incrementing by 1*/ 87 priv->host_tag = host_tag + 1; 88 priv->scmd = scmd; 89 priv->in_lld_scope = 1; 90 priv->req_q_idx = hw_queue; 91 priv->meta_chain_idx = -1; 92 priv->chain_idx = -1; 93 priv->meta_sg_valid = 0; 94 return priv->host_tag; 95 } 96 97 /** 98 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 99 * @mrioc: Adapter instance reference 100 * @host_tag: Host tag 101 * @qidx: Operational queue index 102 * 103 * Identify the block tag from the host tag and queue index and 104 * retrieve associated scsi command using scsi_host_find_tag(). 105 * 106 * Return: SCSI command reference or NULL. 107 */ 108 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 109 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 110 { 111 struct scsi_cmnd *scmd = NULL; 112 struct scmd_priv *priv = NULL; 113 u32 unique_tag = host_tag - 1; 114 115 if (WARN_ON(host_tag > mrioc->max_host_ios)) 116 goto out; 117 118 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 119 120 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 121 if (scmd) { 122 priv = scsi_cmd_priv(scmd); 123 if (!priv->in_lld_scope) 124 scmd = NULL; 125 } 126 out: 127 return scmd; 128 } 129 130 /** 131 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 132 * @mrioc: Adapter instance reference 133 * @scmd: SCSI command reference 134 * 135 * Invalidate the SCSI command private data to mark the command 136 * is not in LLD scope anymore. 137 * 138 * Return: Nothing. 139 */ 140 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 141 struct scsi_cmnd *scmd) 142 { 143 struct scmd_priv *priv = NULL; 144 145 priv = scsi_cmd_priv(scmd); 146 147 if (WARN_ON(priv->in_lld_scope == 0)) 148 return; 149 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 150 priv->req_q_idx = 0xFFFF; 151 priv->scmd = NULL; 152 priv->in_lld_scope = 0; 153 priv->meta_sg_valid = 0; 154 if (priv->chain_idx >= 0) { 155 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 156 priv->chain_idx = -1; 157 } 158 if (priv->meta_chain_idx >= 0) { 159 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 160 priv->meta_chain_idx = -1; 161 } 162 } 163 164 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 165 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 166 static void mpi3mr_fwevt_worker(struct work_struct *work); 167 168 /** 169 * mpi3mr_fwevt_free - firmware event memory dealloctor 170 * @r: k reference pointer of the firmware event 171 * 172 * Free firmware event memory when no reference. 173 */ 174 static void mpi3mr_fwevt_free(struct kref *r) 175 { 176 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 177 } 178 179 /** 180 * mpi3mr_fwevt_get - k reference incrementor 181 * @fwevt: Firmware event reference 182 * 183 * Increment firmware event reference count. 184 */ 185 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 186 { 187 kref_get(&fwevt->ref_count); 188 } 189 190 /** 191 * mpi3mr_fwevt_put - k reference decrementor 192 * @fwevt: Firmware event reference 193 * 194 * decrement firmware event reference count. 195 */ 196 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 197 { 198 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 199 } 200 201 /** 202 * mpi3mr_alloc_fwevt - Allocate firmware event 203 * @len: length of firmware event data to allocate 204 * 205 * Allocate firmware event with required length and initialize 206 * the reference counter. 207 * 208 * Return: firmware event reference. 209 */ 210 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 211 { 212 struct mpi3mr_fwevt *fwevt; 213 214 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 215 if (!fwevt) 216 return NULL; 217 218 kref_init(&fwevt->ref_count); 219 return fwevt; 220 } 221 222 /** 223 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 224 * @mrioc: Adapter instance reference 225 * @fwevt: Firmware event reference 226 * 227 * Add the given firmware event to the firmware event list. 228 * 229 * Return: Nothing. 230 */ 231 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 232 struct mpi3mr_fwevt *fwevt) 233 { 234 unsigned long flags; 235 236 if (!mrioc->fwevt_worker_thread) 237 return; 238 239 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 240 /* get fwevt reference count while adding it to fwevt_list */ 241 mpi3mr_fwevt_get(fwevt); 242 INIT_LIST_HEAD(&fwevt->list); 243 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 244 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 245 /* get fwevt reference count while enqueueing it to worker queue */ 246 mpi3mr_fwevt_get(fwevt); 247 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 248 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 249 } 250 251 /** 252 * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to 253 * the list 254 * @mrioc: Adapter instance reference 255 * @event_data: Event data 256 * 257 * Add the given hdb trigger data event to the firmware event 258 * list. 259 * 260 * Return: Nothing. 261 */ 262 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, 263 struct trigger_event_data *event_data) 264 { 265 struct mpi3mr_fwevt *fwevt; 266 u16 sz = sizeof(*event_data); 267 268 fwevt = mpi3mr_alloc_fwevt(sz); 269 if (!fwevt) { 270 ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); 271 return; 272 } 273 274 fwevt->mrioc = mrioc; 275 fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; 276 fwevt->send_ack = 0; 277 fwevt->process_evt = 1; 278 fwevt->evt_ctx = 0; 279 fwevt->event_data_size = sz; 280 memcpy(fwevt->event_data, event_data, sz); 281 282 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 283 } 284 285 /** 286 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 287 * @mrioc: Adapter instance reference 288 * @fwevt: Firmware event reference 289 * 290 * Delete the given firmware event from the firmware event list. 291 * 292 * Return: Nothing. 293 */ 294 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 295 struct mpi3mr_fwevt *fwevt) 296 { 297 unsigned long flags; 298 299 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 300 if (!list_empty(&fwevt->list)) { 301 list_del_init(&fwevt->list); 302 /* 303 * Put fwevt reference count after 304 * removing it from fwevt_list 305 */ 306 mpi3mr_fwevt_put(fwevt); 307 } 308 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 309 } 310 311 /** 312 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 313 * @mrioc: Adapter instance reference 314 * 315 * Dequeue a firmware event from the firmware event list. 316 * 317 * Return: firmware event. 318 */ 319 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 320 struct mpi3mr_ioc *mrioc) 321 { 322 unsigned long flags; 323 struct mpi3mr_fwevt *fwevt = NULL; 324 325 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 326 if (!list_empty(&mrioc->fwevt_list)) { 327 fwevt = list_first_entry(&mrioc->fwevt_list, 328 struct mpi3mr_fwevt, list); 329 list_del_init(&fwevt->list); 330 /* 331 * Put fwevt reference count after 332 * removing it from fwevt_list 333 */ 334 mpi3mr_fwevt_put(fwevt); 335 } 336 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 337 338 return fwevt; 339 } 340 341 /** 342 * mpi3mr_cancel_work - cancel firmware event 343 * @fwevt: fwevt object which needs to be canceled 344 * 345 * Return: Nothing. 346 */ 347 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 348 { 349 /* 350 * Wait on the fwevt to complete. If this returns 1, then 351 * the event was never executed. 352 * 353 * If it did execute, we wait for it to finish, and the put will 354 * happen from mpi3mr_process_fwevt() 355 */ 356 if (cancel_work_sync(&fwevt->work)) { 357 /* 358 * Put fwevt reference count after 359 * dequeuing it from worker queue 360 */ 361 mpi3mr_fwevt_put(fwevt); 362 /* 363 * Put fwevt reference count to neutralize 364 * kref_init increment 365 */ 366 mpi3mr_fwevt_put(fwevt); 367 } 368 } 369 370 /** 371 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 372 * @mrioc: Adapter instance reference 373 * 374 * Flush all pending firmware events from the firmware event 375 * list. 376 * 377 * Return: Nothing. 378 */ 379 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 380 { 381 struct mpi3mr_fwevt *fwevt = NULL; 382 383 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 384 !mrioc->fwevt_worker_thread) 385 return; 386 387 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 388 mpi3mr_cancel_work(fwevt); 389 390 if (mrioc->current_event) { 391 fwevt = mrioc->current_event; 392 /* 393 * Don't call cancel_work_sync() API for the 394 * fwevt work if the controller reset is 395 * get called as part of processing the 396 * same fwevt work (or) when worker thread is 397 * waiting for device add/remove APIs to complete. 398 * Otherwise we will see deadlock. 399 */ 400 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 401 fwevt->discard = 1; 402 return; 403 } 404 405 mpi3mr_cancel_work(fwevt); 406 } 407 } 408 409 /** 410 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 411 * @mrioc: Adapter instance reference 412 * @tg: Throttle group information pointer 413 * 414 * Accessor to queue on synthetically generated driver event to 415 * the event worker thread, the driver event will be used to 416 * reduce the QD of all VDs in the TG from the worker thread. 417 * 418 * Return: None. 419 */ 420 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 421 struct mpi3mr_throttle_group_info *tg) 422 { 423 struct mpi3mr_fwevt *fwevt; 424 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 425 426 /* 427 * If the QD reduction event is already queued due to throttle and if 428 * the QD is not restored through device info change event 429 * then dont queue further reduction events 430 */ 431 if (tg->fw_qd != tg->modified_qd) 432 return; 433 434 fwevt = mpi3mr_alloc_fwevt(sz); 435 if (!fwevt) { 436 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 437 return; 438 } 439 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 440 fwevt->mrioc = mrioc; 441 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 442 fwevt->send_ack = 0; 443 fwevt->process_evt = 1; 444 fwevt->evt_ctx = 0; 445 fwevt->event_data_size = sz; 446 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 447 448 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 449 tg->id); 450 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 451 } 452 453 /** 454 * mpi3mr_invalidate_devhandles -Invalidate device handles 455 * @mrioc: Adapter instance reference 456 * 457 * Invalidate the device handles in the target device structures 458 * . Called post reset prior to reinitializing the controller. 459 * 460 * Return: Nothing. 461 */ 462 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 463 { 464 struct mpi3mr_tgt_dev *tgtdev; 465 struct mpi3mr_stgt_priv_data *tgt_priv; 466 467 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 468 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 469 if (tgtdev->starget && tgtdev->starget->hostdata) { 470 tgt_priv = tgtdev->starget->hostdata; 471 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 472 tgt_priv->io_throttle_enabled = 0; 473 tgt_priv->io_divert = 0; 474 tgt_priv->throttle_group = NULL; 475 tgt_priv->wslen = 0; 476 if (tgtdev->host_exposed) 477 atomic_set(&tgt_priv->block_io, 1); 478 } 479 } 480 } 481 482 /** 483 * mpi3mr_print_scmd - print individual SCSI command 484 * @rq: Block request 485 * @data: Adapter instance reference 486 * 487 * Print the SCSI command details if it is in LLD scope. 488 * 489 * Return: true always. 490 */ 491 static bool mpi3mr_print_scmd(struct request *rq, void *data) 492 { 493 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 494 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 495 struct scmd_priv *priv = NULL; 496 497 if (scmd) { 498 priv = scsi_cmd_priv(scmd); 499 if (!priv->in_lld_scope) 500 goto out; 501 502 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 503 __func__, priv->host_tag, priv->req_q_idx + 1); 504 scsi_print_command(scmd); 505 } 506 507 out: 508 return(true); 509 } 510 511 /** 512 * mpi3mr_flush_scmd - Flush individual SCSI command 513 * @rq: Block request 514 * @data: Adapter instance reference 515 * 516 * Return the SCSI command to the upper layers if it is in LLD 517 * scope. 518 * 519 * Return: true always. 520 */ 521 522 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 523 { 524 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 525 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 526 struct scmd_priv *priv = NULL; 527 528 if (scmd) { 529 priv = scsi_cmd_priv(scmd); 530 if (!priv->in_lld_scope) 531 goto out; 532 533 if (priv->meta_sg_valid) 534 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 535 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 536 mpi3mr_clear_scmd_priv(mrioc, scmd); 537 scsi_dma_unmap(scmd); 538 scmd->result = DID_RESET << 16; 539 scsi_print_command(scmd); 540 scsi_done(scmd); 541 mrioc->flush_io_count++; 542 } 543 544 out: 545 return(true); 546 } 547 548 /** 549 * mpi3mr_count_dev_pending - Count commands pending for a lun 550 * @rq: Block request 551 * @data: SCSI device reference 552 * 553 * This is an iterator function called for each SCSI command in 554 * a host and if the command is pending in the LLD for the 555 * specific device(lun) then device specific pending I/O counter 556 * is updated in the device structure. 557 * 558 * Return: true always. 559 */ 560 561 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 562 { 563 struct scsi_device *sdev = (struct scsi_device *)data; 564 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 565 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 566 struct scmd_priv *priv; 567 568 if (scmd) { 569 priv = scsi_cmd_priv(scmd); 570 if (!priv->in_lld_scope) 571 goto out; 572 if (scmd->device == sdev) 573 sdev_priv_data->pend_count++; 574 } 575 576 out: 577 return true; 578 } 579 580 /** 581 * mpi3mr_count_tgt_pending - Count commands pending for target 582 * @rq: Block request 583 * @data: SCSI target reference 584 * 585 * This is an iterator function called for each SCSI command in 586 * a host and if the command is pending in the LLD for the 587 * specific target then target specific pending I/O counter is 588 * updated in the target structure. 589 * 590 * Return: true always. 591 */ 592 593 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 594 { 595 struct scsi_target *starget = (struct scsi_target *)data; 596 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 597 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 598 struct scmd_priv *priv; 599 600 if (scmd) { 601 priv = scsi_cmd_priv(scmd); 602 if (!priv->in_lld_scope) 603 goto out; 604 if (scmd->device && (scsi_target(scmd->device) == starget)) 605 stgt_priv_data->pend_count++; 606 } 607 608 out: 609 return true; 610 } 611 612 /** 613 * mpi3mr_flush_host_io - Flush host I/Os 614 * @mrioc: Adapter instance reference 615 * 616 * Flush all of the pending I/Os by calling 617 * blk_mq_tagset_busy_iter() for each possible tag. This is 618 * executed post controller reset 619 * 620 * Return: Nothing. 621 */ 622 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 623 { 624 struct Scsi_Host *shost = mrioc->shost; 625 626 mrioc->flush_io_count = 0; 627 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 628 blk_mq_tagset_busy_iter(&shost->tag_set, 629 mpi3mr_flush_scmd, (void *)mrioc); 630 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 631 mrioc->flush_io_count); 632 } 633 634 /** 635 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 636 * @mrioc: Adapter instance reference 637 * 638 * This function waits for currently running IO poll threads to 639 * exit and then flushes all host I/Os and any internal pending 640 * cmds. This is executed after controller is marked as 641 * unrecoverable. 642 * 643 * Return: Nothing. 644 */ 645 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 646 { 647 struct Scsi_Host *shost = mrioc->shost; 648 int i; 649 650 if (!mrioc->unrecoverable) 651 return; 652 653 if (mrioc->op_reply_qinfo) { 654 for (i = 0; i < mrioc->num_queues; i++) { 655 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 656 udelay(500); 657 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 658 } 659 } 660 mrioc->flush_io_count = 0; 661 blk_mq_tagset_busy_iter(&shost->tag_set, 662 mpi3mr_flush_scmd, (void *)mrioc); 663 mpi3mr_flush_delayed_cmd_lists(mrioc); 664 mpi3mr_flush_drv_cmds(mrioc); 665 } 666 667 /** 668 * mpi3mr_alloc_tgtdev - target device allocator 669 * 670 * Allocate target device instance and initialize the reference 671 * count 672 * 673 * Return: target device instance. 674 */ 675 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 676 { 677 struct mpi3mr_tgt_dev *tgtdev; 678 679 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 680 if (!tgtdev) 681 return NULL; 682 kref_init(&tgtdev->ref_count); 683 return tgtdev; 684 } 685 686 /** 687 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 688 * @mrioc: Adapter instance reference 689 * @tgtdev: Target device 690 * 691 * Add the target device to the target device list 692 * 693 * Return: Nothing. 694 */ 695 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 696 struct mpi3mr_tgt_dev *tgtdev) 697 { 698 unsigned long flags; 699 700 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 701 mpi3mr_tgtdev_get(tgtdev); 702 INIT_LIST_HEAD(&tgtdev->list); 703 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 704 tgtdev->state = MPI3MR_DEV_CREATED; 705 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 706 } 707 708 /** 709 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 710 * @mrioc: Adapter instance reference 711 * @tgtdev: Target device 712 * @must_delete: Must delete the target device from the list irrespective 713 * of the device state. 714 * 715 * Remove the target device from the target device list 716 * 717 * Return: Nothing. 718 */ 719 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 720 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 721 { 722 unsigned long flags; 723 724 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 725 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 726 if (!list_empty(&tgtdev->list)) { 727 list_del_init(&tgtdev->list); 728 tgtdev->state = MPI3MR_DEV_DELETED; 729 mpi3mr_tgtdev_put(tgtdev); 730 } 731 } 732 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 733 } 734 735 /** 736 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 737 * @mrioc: Adapter instance reference 738 * @handle: Device handle 739 * 740 * Accessor to retrieve target device from the device handle. 741 * Non Lock version 742 * 743 * Return: Target device reference. 744 */ 745 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 746 struct mpi3mr_ioc *mrioc, u16 handle) 747 { 748 struct mpi3mr_tgt_dev *tgtdev; 749 750 assert_spin_locked(&mrioc->tgtdev_lock); 751 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 752 if (tgtdev->dev_handle == handle) 753 goto found_tgtdev; 754 return NULL; 755 756 found_tgtdev: 757 mpi3mr_tgtdev_get(tgtdev); 758 return tgtdev; 759 } 760 761 /** 762 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 763 * @mrioc: Adapter instance reference 764 * @handle: Device handle 765 * 766 * Accessor to retrieve target device from the device handle. 767 * Lock version 768 * 769 * Return: Target device reference. 770 */ 771 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 772 struct mpi3mr_ioc *mrioc, u16 handle) 773 { 774 struct mpi3mr_tgt_dev *tgtdev; 775 unsigned long flags; 776 777 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 778 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 779 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 780 return tgtdev; 781 } 782 783 /** 784 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 785 * @mrioc: Adapter instance reference 786 * @persist_id: Persistent ID 787 * 788 * Accessor to retrieve target device from the Persistent ID. 789 * Non Lock version 790 * 791 * Return: Target device reference. 792 */ 793 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 794 struct mpi3mr_ioc *mrioc, u16 persist_id) 795 { 796 struct mpi3mr_tgt_dev *tgtdev; 797 798 assert_spin_locked(&mrioc->tgtdev_lock); 799 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 800 if (tgtdev->perst_id == persist_id) 801 goto found_tgtdev; 802 return NULL; 803 804 found_tgtdev: 805 mpi3mr_tgtdev_get(tgtdev); 806 return tgtdev; 807 } 808 809 /** 810 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 811 * @mrioc: Adapter instance reference 812 * @persist_id: Persistent ID 813 * 814 * Accessor to retrieve target device from the Persistent ID. 815 * Lock version 816 * 817 * Return: Target device reference. 818 */ 819 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 820 struct mpi3mr_ioc *mrioc, u16 persist_id) 821 { 822 struct mpi3mr_tgt_dev *tgtdev; 823 unsigned long flags; 824 825 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 826 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 827 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 828 return tgtdev; 829 } 830 831 /** 832 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 833 * @mrioc: Adapter instance reference 834 * @tgt_priv: Target private data 835 * 836 * Accessor to return target device from the target private 837 * data. Non Lock version 838 * 839 * Return: Target device reference. 840 */ 841 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 842 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 843 { 844 struct mpi3mr_tgt_dev *tgtdev; 845 846 assert_spin_locked(&mrioc->tgtdev_lock); 847 tgtdev = tgt_priv->tgt_dev; 848 if (tgtdev) 849 mpi3mr_tgtdev_get(tgtdev); 850 return tgtdev; 851 } 852 853 /** 854 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 855 * @mrioc: Adapter instance reference 856 * @tg: Throttle group information pointer 857 * @divert_value: 1 or 0 858 * 859 * Accessor to set io_divert flag for each device associated 860 * with the given throttle group with the given value. 861 * 862 * Return: None. 863 */ 864 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 865 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 866 { 867 unsigned long flags; 868 struct mpi3mr_tgt_dev *tgtdev; 869 struct mpi3mr_stgt_priv_data *tgt_priv; 870 871 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 872 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 873 if (tgtdev->starget && tgtdev->starget->hostdata) { 874 tgt_priv = tgtdev->starget->hostdata; 875 if (tgt_priv->throttle_group == tg) 876 tgt_priv->io_divert = divert_value; 877 } 878 } 879 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 880 } 881 882 /** 883 * mpi3mr_print_device_event_notice - print notice related to post processing of 884 * device event after controller reset. 885 * 886 * @mrioc: Adapter instance reference 887 * @device_add: true for device add event and false for device removal event 888 * 889 * Return: None. 890 */ 891 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 892 bool device_add) 893 { 894 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 895 (device_add ? "addition" : "removal")); 896 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 897 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 898 } 899 900 /** 901 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 902 * @mrioc: Adapter instance reference 903 * @tgtdev: Target device structure 904 * 905 * Checks whether the device is exposed to upper layers and if it 906 * is then remove the device from upper layers by calling 907 * scsi_remove_target(). 908 * 909 * Return: 0 on success, non zero on failure. 910 */ 911 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 912 struct mpi3mr_tgt_dev *tgtdev) 913 { 914 struct mpi3mr_stgt_priv_data *tgt_priv; 915 916 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 917 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 918 if (tgtdev->starget && tgtdev->starget->hostdata) { 919 tgt_priv = tgtdev->starget->hostdata; 920 atomic_set(&tgt_priv->block_io, 0); 921 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 922 } 923 924 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 925 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 926 if (tgtdev->starget) { 927 if (mrioc->current_event) 928 mrioc->current_event->pending_at_sml = 1; 929 scsi_remove_target(&tgtdev->starget->dev); 930 tgtdev->host_exposed = 0; 931 if (mrioc->current_event) { 932 mrioc->current_event->pending_at_sml = 0; 933 if (mrioc->current_event->discard) { 934 mpi3mr_print_device_event_notice(mrioc, 935 false); 936 return; 937 } 938 } 939 } 940 } else 941 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 942 mpi3mr_global_trigger(mrioc, 943 MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); 944 945 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 946 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 947 } 948 949 /** 950 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 951 * @mrioc: Adapter instance reference 952 * @perst_id: Persistent ID of the device 953 * 954 * Checks whether the device can be exposed to upper layers and 955 * if it is not then expose the device to upper layers by 956 * calling scsi_scan_target(). 957 * 958 * Return: 0 on success, non zero on failure. 959 */ 960 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 961 u16 perst_id) 962 { 963 int retval = 0; 964 struct mpi3mr_tgt_dev *tgtdev; 965 966 if (mrioc->reset_in_progress || mrioc->pci_err_recovery) 967 return -1; 968 969 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 970 if (!tgtdev) { 971 retval = -1; 972 goto out; 973 } 974 if (tgtdev->is_hidden || tgtdev->host_exposed) { 975 retval = -1; 976 goto out; 977 } 978 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 979 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 980 tgtdev->host_exposed = 1; 981 if (mrioc->current_event) 982 mrioc->current_event->pending_at_sml = 1; 983 scsi_scan_target(&mrioc->shost->shost_gendev, 984 mrioc->scsi_device_channel, tgtdev->perst_id, 985 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 986 if (!tgtdev->starget) 987 tgtdev->host_exposed = 0; 988 if (mrioc->current_event) { 989 mrioc->current_event->pending_at_sml = 0; 990 if (mrioc->current_event->discard) { 991 mpi3mr_print_device_event_notice(mrioc, true); 992 goto out; 993 } 994 } 995 dprint_event_bh(mrioc, 996 "exposed target device with handle(0x%04x), perst_id(%d)\n", 997 tgtdev->dev_handle, perst_id); 998 goto out; 999 } else 1000 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 1001 out: 1002 if (tgtdev) 1003 mpi3mr_tgtdev_put(tgtdev); 1004 1005 return retval; 1006 } 1007 1008 /** 1009 * mpi3mr_change_queue_depth- Change QD callback handler 1010 * @sdev: SCSI device reference 1011 * @q_depth: Queue depth 1012 * 1013 * Validate and limit QD and call scsi_change_queue_depth. 1014 * 1015 * Return: return value of scsi_change_queue_depth 1016 */ 1017 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 1018 int q_depth) 1019 { 1020 struct scsi_target *starget = scsi_target(sdev); 1021 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1022 int retval = 0; 1023 1024 if (!sdev->tagged_supported) 1025 q_depth = 1; 1026 if (q_depth > shost->can_queue) 1027 q_depth = shost->can_queue; 1028 else if (!q_depth) 1029 q_depth = MPI3MR_DEFAULT_SDEV_QD; 1030 retval = scsi_change_queue_depth(sdev, q_depth); 1031 sdev->max_queue_depth = sdev->queue_depth; 1032 1033 return retval; 1034 } 1035 1036 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, 1037 struct queue_limits *lim) 1038 { 1039 u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; 1040 1041 lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; 1042 lim->virt_boundary_mask = (1 << pgsz) - 1; 1043 } 1044 1045 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, 1046 struct queue_limits *lim) 1047 { 1048 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && 1049 (tgt_dev->dev_spec.pcie_inf.dev_info & 1050 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1051 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) 1052 mpi3mr_configure_nvme_dev(tgt_dev, lim); 1053 } 1054 1055 /** 1056 * mpi3mr_update_sdev - Update SCSI device information 1057 * @sdev: SCSI device reference 1058 * @data: target device reference 1059 * 1060 * This is an iterator function called for each SCSI device in a 1061 * target to update the target specific information into each 1062 * SCSI device. 1063 * 1064 * Return: Nothing. 1065 */ 1066 static void 1067 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1068 { 1069 struct mpi3mr_tgt_dev *tgtdev; 1070 struct queue_limits lim; 1071 1072 tgtdev = (struct mpi3mr_tgt_dev *)data; 1073 if (!tgtdev) 1074 return; 1075 1076 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1077 1078 lim = queue_limits_start_update(sdev->request_queue); 1079 mpi3mr_configure_tgt_dev(tgtdev, &lim); 1080 WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); 1081 } 1082 1083 /** 1084 * mpi3mr_refresh_tgtdevs - Refresh target device exposure 1085 * @mrioc: Adapter instance reference 1086 * 1087 * This is executed post controller reset to identify any 1088 * missing devices during reset and remove from the upper layers 1089 * or expose any newly detected device to the upper layers. 1090 * 1091 * Return: Nothing. 1092 */ 1093 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1094 { 1095 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1096 struct mpi3mr_stgt_priv_data *tgt_priv; 1097 1098 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1099 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1100 list) { 1101 if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || 1102 tgtdev->is_hidden) && 1103 tgtdev->host_exposed && tgtdev->starget && 1104 tgtdev->starget->hostdata) { 1105 tgt_priv = tgtdev->starget->hostdata; 1106 tgt_priv->dev_removed = 1; 1107 atomic_set(&tgt_priv->block_io, 0); 1108 } 1109 } 1110 1111 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1112 list) { 1113 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1114 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1115 tgtdev->perst_id); 1116 if (tgtdev->host_exposed) 1117 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1118 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1119 mpi3mr_tgtdev_put(tgtdev); 1120 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1121 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1122 tgtdev->perst_id); 1123 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1124 } 1125 } 1126 1127 tgtdev = NULL; 1128 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1129 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1130 !tgtdev->is_hidden) { 1131 if (!tgtdev->host_exposed) 1132 mpi3mr_report_tgtdev_to_host(mrioc, 1133 tgtdev->perst_id); 1134 else if (tgtdev->starget) 1135 starget_for_each_device(tgtdev->starget, 1136 (void *)tgtdev, mpi3mr_update_sdev); 1137 } 1138 } 1139 } 1140 1141 /** 1142 * mpi3mr_debug_dump_devpg0 - Dump device page0 1143 * @mrioc: Adapter instance reference 1144 * @dev_pg0: Device page 0. 1145 * 1146 * Prints pertinent details of the device page 0. 1147 * 1148 * Return: Nothing. 1149 */ 1150 static void 1151 mpi3mr_debug_dump_devpg0(struct mpi3mr_ioc *mrioc, struct mpi3_device_page0 *dev_pg0) 1152 { 1153 ioc_info(mrioc, 1154 "device_pg0: handle(0x%04x), perst_id(%d), wwid(0x%016llx), encl_handle(0x%04x), slot(%d)\n", 1155 le16_to_cpu(dev_pg0->dev_handle), 1156 le16_to_cpu(dev_pg0->persistent_id), 1157 le64_to_cpu(dev_pg0->wwid), le16_to_cpu(dev_pg0->enclosure_handle), 1158 le16_to_cpu(dev_pg0->slot)); 1159 ioc_info(mrioc, "device_pg0: access_status(0x%02x), flags(0x%04x), device_form(0x%02x), queue_depth(%d)\n", 1160 dev_pg0->access_status, le16_to_cpu(dev_pg0->flags), 1161 dev_pg0->device_form, le16_to_cpu(dev_pg0->queue_depth)); 1162 ioc_info(mrioc, "device_pg0: parent_handle(0x%04x), iounit_port(%d)\n", 1163 le16_to_cpu(dev_pg0->parent_dev_handle), dev_pg0->io_unit_port); 1164 1165 switch (dev_pg0->device_form) { 1166 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1167 { 1168 1169 struct mpi3_device0_sas_sata_format *sasinf = 1170 &dev_pg0->device_specific.sas_sata_format; 1171 ioc_info(mrioc, 1172 "device_pg0: sas_sata: sas_address(0x%016llx),flags(0x%04x),\n" 1173 "device_info(0x%04x), phy_num(%d), attached_phy_id(%d),negotiated_link_rate(0x%02x)\n", 1174 le64_to_cpu(sasinf->sas_address), 1175 le16_to_cpu(sasinf->flags), 1176 le16_to_cpu(sasinf->device_info), sasinf->phy_num, 1177 sasinf->attached_phy_identifier, sasinf->negotiated_link_rate); 1178 break; 1179 } 1180 case MPI3_DEVICE_DEVFORM_PCIE: 1181 { 1182 1183 struct mpi3_device0_pcie_format *pcieinf = 1184 &dev_pg0->device_specific.pcie_format; 1185 ioc_info(mrioc, 1186 "device_pg0: pcie: port_num(%d), device_info(0x%04x), mdts(%d), page_sz(0x%02x)\n", 1187 pcieinf->port_num, le16_to_cpu(pcieinf->device_info), 1188 le32_to_cpu(pcieinf->maximum_data_transfer_size), 1189 pcieinf->page_size); 1190 ioc_info(mrioc, 1191 "device_pg0: pcie: abort_timeout(%d), reset_timeout(%d) capabilities (0x%08x)\n", 1192 pcieinf->nvme_abort_to, pcieinf->controller_reset_to, 1193 le32_to_cpu(pcieinf->capabilities)); 1194 break; 1195 } 1196 case MPI3_DEVICE_DEVFORM_VD: 1197 { 1198 1199 struct mpi3_device0_vd_format *vdinf = 1200 &dev_pg0->device_specific.vd_format; 1201 1202 ioc_info(mrioc, 1203 "device_pg0: vd: state(0x%02x), raid_level(%d), flags(0x%04x),\n" 1204 "device_info(0x%04x) abort_timeout(%d), reset_timeout(%d)\n", 1205 vdinf->vd_state, vdinf->raid_level, 1206 le16_to_cpu(vdinf->flags), 1207 le16_to_cpu(vdinf->device_info), 1208 vdinf->vd_abort_to, vdinf->vd_reset_to); 1209 ioc_info(mrioc, 1210 "device_pg0: vd: tg_id(%d), high(%dMiB), low(%dMiB), qd_reduction_factor(%d)\n", 1211 vdinf->io_throttle_group, 1212 le16_to_cpu(vdinf->io_throttle_group_high), 1213 le16_to_cpu(vdinf->io_throttle_group_low), 1214 ((le16_to_cpu(vdinf->flags) & 1215 MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK) >> 12)); 1216 break; 1217 1218 } 1219 default: 1220 break; 1221 } 1222 } 1223 1224 /** 1225 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1226 * @mrioc: Adapter instance reference 1227 * @tgtdev: Target device internal structure 1228 * @dev_pg0: New device page0 1229 * @is_added: Flag to indicate the device is just added 1230 * 1231 * Update the information from the device page0 into the driver 1232 * cached target device structure. 1233 * 1234 * Return: Nothing. 1235 */ 1236 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1237 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1238 bool is_added) 1239 { 1240 u16 flags = 0; 1241 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1242 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1243 u8 prot_mask = 0; 1244 1245 if (mrioc->logging_level & 1246 (MPI3_DEBUG_EVENT | MPI3_DEBUG_EVENT_WORK_TASK)) 1247 mpi3mr_debug_dump_devpg0(mrioc, dev_pg0); 1248 1249 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1250 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1251 tgtdev->dev_type = dev_pg0->device_form; 1252 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1253 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1254 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1255 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1256 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1257 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1258 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1259 1260 if (tgtdev->encl_handle) 1261 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1262 tgtdev->encl_handle); 1263 if (enclosure_dev) 1264 tgtdev->enclosure_logical_id = le64_to_cpu( 1265 enclosure_dev->pg0.enclosure_logical_id); 1266 1267 flags = tgtdev->devpg0_flag; 1268 1269 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1270 1271 if (is_added == true) 1272 tgtdev->io_throttle_enabled = 1273 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1274 if (!mrioc->sas_transport_enabled) 1275 tgtdev->non_stl = 1; 1276 1277 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1278 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1279 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1280 break; 1281 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1282 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1283 break; 1284 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1285 default: 1286 tgtdev->wslen = 0; 1287 break; 1288 } 1289 1290 if (tgtdev->starget && tgtdev->starget->hostdata) { 1291 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1292 tgtdev->starget->hostdata; 1293 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1294 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1295 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1296 scsi_tgt_priv_data->io_throttle_enabled = 1297 tgtdev->io_throttle_enabled; 1298 if (is_added == true) 1299 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1300 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1301 } 1302 1303 switch (dev_pg0->access_status) { 1304 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1305 case MPI3_DEVICE0_ASTATUS_PREPARE: 1306 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1307 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1308 break; 1309 default: 1310 tgtdev->is_hidden = 1; 1311 break; 1312 } 1313 1314 switch (tgtdev->dev_type) { 1315 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1316 { 1317 struct mpi3_device0_sas_sata_format *sasinf = 1318 &dev_pg0->device_specific.sas_sata_format; 1319 u16 dev_info = le16_to_cpu(sasinf->device_info); 1320 1321 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1322 tgtdev->dev_spec.sas_sata_inf.sas_address = 1323 le64_to_cpu(sasinf->sas_address); 1324 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1325 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1326 sasinf->attached_phy_identifier; 1327 tgtdev->dev_spec.sas_sata_inf.negotiated_link_rate = 1328 sasinf->negotiated_link_rate; 1329 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1330 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1331 tgtdev->is_hidden = 1; 1332 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1333 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1334 tgtdev->is_hidden = 1; 1335 1336 if (((tgtdev->devpg0_flag & 1337 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1338 && (tgtdev->devpg0_flag & 1339 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1340 (tgtdev->parent_handle == 0xFFFF)) 1341 tgtdev->non_stl = 1; 1342 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1343 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1344 dev_pg0->io_unit_port; 1345 break; 1346 } 1347 case MPI3_DEVICE_DEVFORM_PCIE: 1348 { 1349 struct mpi3_device0_pcie_format *pcieinf = 1350 &dev_pg0->device_specific.pcie_format; 1351 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1352 1353 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1354 tgtdev->dev_spec.pcie_inf.capb = 1355 le32_to_cpu(pcieinf->capabilities); 1356 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1357 /* 2^12 = 4096 */ 1358 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1359 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1360 tgtdev->dev_spec.pcie_inf.mdts = 1361 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1362 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1363 tgtdev->dev_spec.pcie_inf.reset_to = 1364 max_t(u8, pcieinf->controller_reset_to, 1365 MPI3MR_INTADMCMD_TIMEOUT); 1366 tgtdev->dev_spec.pcie_inf.abort_to = 1367 max_t(u8, pcieinf->nvme_abort_to, 1368 MPI3MR_INTADMCMD_TIMEOUT); 1369 } 1370 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1371 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1372 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1373 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1374 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1375 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1376 tgtdev->is_hidden = 1; 1377 tgtdev->non_stl = 1; 1378 if (!mrioc->shost) 1379 break; 1380 prot_mask = scsi_host_get_prot(mrioc->shost); 1381 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1382 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1383 ioc_info(mrioc, 1384 "%s : Disabling DIX0 prot capability\n", __func__); 1385 ioc_info(mrioc, 1386 "because HBA does not support DIX0 operation on NVME drives\n"); 1387 } 1388 break; 1389 } 1390 case MPI3_DEVICE_DEVFORM_VD: 1391 { 1392 struct mpi3_device0_vd_format *vdinf = 1393 &dev_pg0->device_specific.vd_format; 1394 struct mpi3mr_throttle_group_info *tg = NULL; 1395 u16 vdinf_io_throttle_group = 1396 le16_to_cpu(vdinf->io_throttle_group); 1397 1398 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1399 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1400 tgtdev->is_hidden = 1; 1401 tgtdev->non_stl = 1; 1402 tgtdev->dev_spec.vd_inf.reset_to = 1403 max_t(u8, vdinf->vd_reset_to, 1404 MPI3MR_INTADMCMD_TIMEOUT); 1405 tgtdev->dev_spec.vd_inf.abort_to = 1406 max_t(u8, vdinf->vd_abort_to, 1407 MPI3MR_INTADMCMD_TIMEOUT); 1408 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1409 tgtdev->dev_spec.vd_inf.tg_high = 1410 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1411 tgtdev->dev_spec.vd_inf.tg_low = 1412 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1413 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1414 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1415 tg->id = vdinf_io_throttle_group; 1416 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1417 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1418 tg->qd_reduction = 1419 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1420 if (is_added == true) 1421 tg->fw_qd = tgtdev->q_depth; 1422 tg->modified_qd = tgtdev->q_depth; 1423 } 1424 tgtdev->dev_spec.vd_inf.tg = tg; 1425 if (scsi_tgt_priv_data) 1426 scsi_tgt_priv_data->throttle_group = tg; 1427 break; 1428 } 1429 default: 1430 break; 1431 } 1432 } 1433 1434 /** 1435 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1436 * @mrioc: Adapter instance reference 1437 * @fwevt: Firmware event information. 1438 * 1439 * Process Device status Change event and based on device's new 1440 * information, either expose the device to the upper layers, or 1441 * remove the device from upper layers. 1442 * 1443 * Return: Nothing. 1444 */ 1445 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1446 struct mpi3mr_fwevt *fwevt) 1447 { 1448 u16 dev_handle = 0; 1449 u8 uhide = 0, delete = 0, cleanup = 0; 1450 struct mpi3mr_tgt_dev *tgtdev = NULL; 1451 struct mpi3_event_data_device_status_change *evtdata = 1452 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1453 1454 dev_handle = le16_to_cpu(evtdata->dev_handle); 1455 dprint_event_bh(mrioc, 1456 "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", 1457 dev_handle, evtdata->reason_code); 1458 switch (evtdata->reason_code) { 1459 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1460 delete = 1; 1461 break; 1462 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1463 uhide = 1; 1464 break; 1465 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1466 delete = 1; 1467 cleanup = 1; 1468 break; 1469 default: 1470 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1471 evtdata->reason_code); 1472 break; 1473 } 1474 1475 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1476 if (!tgtdev) { 1477 dprint_event_bh(mrioc, 1478 "processing device status change event bottom half,\n" 1479 "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", 1480 dev_handle, evtdata->reason_code); 1481 goto out; 1482 } 1483 if (uhide) { 1484 tgtdev->is_hidden = 0; 1485 if (!tgtdev->host_exposed) 1486 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1487 } 1488 1489 if (delete) 1490 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1491 1492 if (cleanup) { 1493 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1494 mpi3mr_tgtdev_put(tgtdev); 1495 } 1496 1497 out: 1498 if (tgtdev) 1499 mpi3mr_tgtdev_put(tgtdev); 1500 } 1501 1502 /** 1503 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1504 * @mrioc: Adapter instance reference 1505 * @dev_pg0: New device page0 1506 * 1507 * Process Device Info Change event and based on device's new 1508 * information, either expose the device to the upper layers, or 1509 * remove the device from upper layers or update the details of 1510 * the device. 1511 * 1512 * Return: Nothing. 1513 */ 1514 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1515 struct mpi3_device_page0 *dev_pg0) 1516 { 1517 struct mpi3mr_tgt_dev *tgtdev = NULL; 1518 u16 dev_handle = 0, perst_id = 0; 1519 1520 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1521 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1522 dprint_event_bh(mrioc, 1523 "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", 1524 dev_handle, perst_id); 1525 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1526 if (!tgtdev) { 1527 dprint_event_bh(mrioc, 1528 "cannot identify target device for device info\n" 1529 "change event handle(0x%04x), perst_id(%d)\n", 1530 dev_handle, perst_id); 1531 goto out; 1532 } 1533 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1534 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1535 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1536 if (tgtdev->is_hidden && tgtdev->host_exposed) 1537 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1538 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1539 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1540 mpi3mr_update_sdev); 1541 out: 1542 if (tgtdev) 1543 mpi3mr_tgtdev_put(tgtdev); 1544 } 1545 1546 /** 1547 * mpi3mr_free_enclosure_list - release enclosures 1548 * @mrioc: Adapter instance reference 1549 * 1550 * Free memory allocated during encloure add. 1551 * 1552 * Return nothing. 1553 */ 1554 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1555 { 1556 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1557 1558 list_for_each_entry_safe(enclosure_dev, 1559 enclosure_dev_next, &mrioc->enclosure_list, list) { 1560 list_del(&enclosure_dev->list); 1561 kfree(enclosure_dev); 1562 } 1563 } 1564 1565 /** 1566 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1567 * @mrioc: Adapter instance reference 1568 * @handle: Firmware device handle of the enclosure 1569 * 1570 * This searches for enclosure device based on handle, then returns the 1571 * enclosure object. 1572 * 1573 * Return: Enclosure object reference or NULL 1574 */ 1575 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1576 struct mpi3mr_ioc *mrioc, u16 handle) 1577 { 1578 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1579 1580 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1581 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1582 continue; 1583 r = enclosure_dev; 1584 goto out; 1585 } 1586 out: 1587 return r; 1588 } 1589 1590 /** 1591 * mpi3mr_process_trigger_data_event_bh - Process trigger event 1592 * data 1593 * @mrioc: Adapter instance reference 1594 * @event_data: Event data 1595 * 1596 * This function releases diage buffers or issues diag fault 1597 * based on trigger conditions 1598 * 1599 * Return: Nothing 1600 */ 1601 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, 1602 struct trigger_event_data *event_data) 1603 { 1604 struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; 1605 struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; 1606 unsigned long flags; 1607 int retval = 0; 1608 u8 trigger_type = event_data->trigger_type; 1609 union mpi3mr_trigger_data *trigger_data = 1610 &event_data->trigger_specific_data; 1611 1612 if (event_data->snapdump) { 1613 if (trace_hdb) 1614 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1615 trigger_data, 1); 1616 if (fw_hdb) 1617 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1618 trigger_data, 1); 1619 mpi3mr_soft_reset_handler(mrioc, 1620 MPI3MR_RESET_FROM_TRIGGER, 1); 1621 return; 1622 } 1623 1624 if (trace_hdb) { 1625 retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); 1626 if (!retval) { 1627 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, 1628 trigger_data, 1); 1629 } 1630 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1631 mrioc->trace_release_trigger_active = false; 1632 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1633 } 1634 if (fw_hdb) { 1635 retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); 1636 if (!retval) { 1637 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, 1638 trigger_data, 1); 1639 } 1640 spin_lock_irqsave(&mrioc->trigger_lock, flags); 1641 mrioc->fw_release_trigger_active = false; 1642 spin_unlock_irqrestore(&mrioc->trigger_lock, flags); 1643 } 1644 } 1645 1646 /** 1647 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1648 * @mrioc: Adapter instance reference 1649 * @encl_pg0: Enclosure page 0. 1650 * @is_added: Added event or not 1651 * 1652 * Return nothing. 1653 */ 1654 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1655 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1656 { 1657 char *reason_str = NULL; 1658 1659 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1660 return; 1661 1662 if (is_added) 1663 reason_str = "enclosure added"; 1664 else 1665 reason_str = "enclosure dev status changed"; 1666 1667 ioc_info(mrioc, 1668 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1669 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1670 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1671 ioc_info(mrioc, 1672 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1673 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1674 le16_to_cpu(encl_pg0->flags), 1675 ((le16_to_cpu(encl_pg0->flags) & 1676 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1677 } 1678 1679 /** 1680 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1681 * @mrioc: Adapter instance reference 1682 * @fwevt: Firmware event reference 1683 * 1684 * Prints information about the Enclosure device status or 1685 * Enclosure add events if logging is enabled and add or remove 1686 * the enclosure from the controller's internal list of 1687 * enclosures. 1688 * 1689 * Return: Nothing. 1690 */ 1691 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1692 struct mpi3mr_fwevt *fwevt) 1693 { 1694 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1695 struct mpi3_enclosure_page0 *encl_pg0; 1696 u16 encl_handle; 1697 u8 added, present; 1698 1699 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1700 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1701 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1702 1703 1704 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1705 present = ((le16_to_cpu(encl_pg0->flags) & 1706 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1707 1708 if (encl_handle) 1709 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1710 encl_handle); 1711 if (!enclosure_dev && present) { 1712 enclosure_dev = 1713 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1714 GFP_KERNEL); 1715 if (!enclosure_dev) 1716 return; 1717 list_add_tail(&enclosure_dev->list, 1718 &mrioc->enclosure_list); 1719 } 1720 if (enclosure_dev) { 1721 if (!present) { 1722 list_del(&enclosure_dev->list); 1723 kfree(enclosure_dev); 1724 } else 1725 memcpy(&enclosure_dev->pg0, encl_pg0, 1726 sizeof(enclosure_dev->pg0)); 1727 1728 } 1729 } 1730 1731 /** 1732 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1733 * @mrioc: Adapter instance reference 1734 * @event_data: SAS topology change list event data 1735 * 1736 * Prints information about the SAS topology change event. 1737 * 1738 * Return: Nothing. 1739 */ 1740 static void 1741 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1742 struct mpi3_event_data_sas_topology_change_list *event_data) 1743 { 1744 int i; 1745 u16 handle; 1746 u8 reason_code, phy_number; 1747 char *status_str = NULL; 1748 u8 link_rate, prev_link_rate; 1749 1750 switch (event_data->exp_status) { 1751 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1752 status_str = "remove"; 1753 break; 1754 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1755 status_str = "responding"; 1756 break; 1757 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1758 status_str = "remove delay"; 1759 break; 1760 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1761 status_str = "direct attached"; 1762 break; 1763 default: 1764 status_str = "unknown status"; 1765 break; 1766 } 1767 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1768 __func__, status_str); 1769 ioc_info(mrioc, 1770 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1771 __func__, le16_to_cpu(event_data->expander_dev_handle), 1772 event_data->io_unit_port, 1773 le16_to_cpu(event_data->enclosure_handle), 1774 event_data->start_phy_num, event_data->num_entries); 1775 for (i = 0; i < event_data->num_entries; i++) { 1776 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1777 if (!handle) 1778 continue; 1779 phy_number = event_data->start_phy_num + i; 1780 reason_code = event_data->phy_entry[i].status & 1781 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1782 switch (reason_code) { 1783 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1784 status_str = "target remove"; 1785 break; 1786 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1787 status_str = "delay target remove"; 1788 break; 1789 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1790 status_str = "link status change"; 1791 break; 1792 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1793 status_str = "link status no change"; 1794 break; 1795 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1796 status_str = "target responding"; 1797 break; 1798 default: 1799 status_str = "unknown"; 1800 break; 1801 } 1802 link_rate = event_data->phy_entry[i].link_rate >> 4; 1803 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1804 ioc_info(mrioc, 1805 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1806 __func__, phy_number, handle, status_str, link_rate, 1807 prev_link_rate); 1808 } 1809 } 1810 1811 /** 1812 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1813 * @mrioc: Adapter instance reference 1814 * @fwevt: Firmware event reference 1815 * 1816 * Prints information about the SAS topology change event and 1817 * for "not responding" event code, removes the device from the 1818 * upper layers. 1819 * 1820 * Return: Nothing. 1821 */ 1822 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1823 struct mpi3mr_fwevt *fwevt) 1824 { 1825 struct mpi3_event_data_sas_topology_change_list *event_data = 1826 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1827 int i; 1828 u16 handle; 1829 u8 reason_code; 1830 u64 exp_sas_address = 0, parent_sas_address = 0; 1831 struct mpi3mr_hba_port *hba_port = NULL; 1832 struct mpi3mr_tgt_dev *tgtdev = NULL; 1833 struct mpi3mr_sas_node *sas_expander = NULL; 1834 unsigned long flags; 1835 u8 link_rate, prev_link_rate, parent_phy_number; 1836 1837 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1838 if (mrioc->sas_transport_enabled) { 1839 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1840 event_data->io_unit_port); 1841 if (le16_to_cpu(event_data->expander_dev_handle)) { 1842 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1843 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1844 le16_to_cpu(event_data->expander_dev_handle)); 1845 if (sas_expander) { 1846 exp_sas_address = sas_expander->sas_address; 1847 hba_port = sas_expander->hba_port; 1848 } 1849 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1850 parent_sas_address = exp_sas_address; 1851 } else 1852 parent_sas_address = mrioc->sas_hba.sas_address; 1853 } 1854 1855 for (i = 0; i < event_data->num_entries; i++) { 1856 if (fwevt->discard) 1857 return; 1858 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1859 if (!handle) 1860 continue; 1861 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1862 if (!tgtdev) 1863 continue; 1864 1865 reason_code = event_data->phy_entry[i].status & 1866 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1867 1868 switch (reason_code) { 1869 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1870 if (tgtdev->host_exposed) 1871 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1872 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1873 mpi3mr_tgtdev_put(tgtdev); 1874 break; 1875 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1876 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1877 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1878 { 1879 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1880 || tgtdev->is_hidden) 1881 break; 1882 link_rate = event_data->phy_entry[i].link_rate >> 4; 1883 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1884 if (link_rate == prev_link_rate) 1885 break; 1886 if (!parent_sas_address) 1887 break; 1888 parent_phy_number = event_data->start_phy_num + i; 1889 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1890 parent_phy_number, link_rate, hba_port); 1891 break; 1892 } 1893 default: 1894 break; 1895 } 1896 if (tgtdev) 1897 mpi3mr_tgtdev_put(tgtdev); 1898 } 1899 1900 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1901 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1902 if (sas_expander) 1903 mpi3mr_expander_remove(mrioc, exp_sas_address, 1904 hba_port); 1905 } 1906 } 1907 1908 /** 1909 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1910 * @mrioc: Adapter instance reference 1911 * @event_data: PCIe topology change list event data 1912 * 1913 * Prints information about the PCIe topology change event. 1914 * 1915 * Return: Nothing. 1916 */ 1917 static void 1918 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1919 struct mpi3_event_data_pcie_topology_change_list *event_data) 1920 { 1921 int i; 1922 u16 handle; 1923 u16 reason_code; 1924 u8 port_number; 1925 char *status_str = NULL; 1926 u8 link_rate, prev_link_rate; 1927 1928 switch (event_data->switch_status) { 1929 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1930 status_str = "remove"; 1931 break; 1932 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1933 status_str = "responding"; 1934 break; 1935 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1936 status_str = "remove delay"; 1937 break; 1938 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1939 status_str = "direct attached"; 1940 break; 1941 default: 1942 status_str = "unknown status"; 1943 break; 1944 } 1945 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1946 __func__, status_str); 1947 ioc_info(mrioc, 1948 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1949 __func__, le16_to_cpu(event_data->switch_dev_handle), 1950 le16_to_cpu(event_data->enclosure_handle), 1951 event_data->start_port_num, event_data->num_entries); 1952 for (i = 0; i < event_data->num_entries; i++) { 1953 handle = 1954 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1955 if (!handle) 1956 continue; 1957 port_number = event_data->start_port_num + i; 1958 reason_code = event_data->port_entry[i].port_status; 1959 switch (reason_code) { 1960 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1961 status_str = "target remove"; 1962 break; 1963 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1964 status_str = "delay target remove"; 1965 break; 1966 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1967 status_str = "link status change"; 1968 break; 1969 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1970 status_str = "link status no change"; 1971 break; 1972 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1973 status_str = "target responding"; 1974 break; 1975 default: 1976 status_str = "unknown"; 1977 break; 1978 } 1979 link_rate = event_data->port_entry[i].current_port_info & 1980 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1981 prev_link_rate = event_data->port_entry[i].previous_port_info & 1982 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1983 ioc_info(mrioc, 1984 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1985 __func__, port_number, handle, status_str, link_rate, 1986 prev_link_rate); 1987 } 1988 } 1989 1990 /** 1991 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1992 * @mrioc: Adapter instance reference 1993 * @fwevt: Firmware event reference 1994 * 1995 * Prints information about the PCIe topology change event and 1996 * for "not responding" event code, removes the device from the 1997 * upper layers. 1998 * 1999 * Return: Nothing. 2000 */ 2001 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 2002 struct mpi3mr_fwevt *fwevt) 2003 { 2004 struct mpi3_event_data_pcie_topology_change_list *event_data = 2005 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 2006 int i; 2007 u16 handle; 2008 u8 reason_code; 2009 struct mpi3mr_tgt_dev *tgtdev = NULL; 2010 2011 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 2012 2013 for (i = 0; i < event_data->num_entries; i++) { 2014 if (fwevt->discard) 2015 return; 2016 handle = 2017 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 2018 if (!handle) 2019 continue; 2020 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2021 if (!tgtdev) 2022 continue; 2023 2024 reason_code = event_data->port_entry[i].port_status; 2025 2026 switch (reason_code) { 2027 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2028 if (tgtdev->host_exposed) 2029 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 2030 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 2031 mpi3mr_tgtdev_put(tgtdev); 2032 break; 2033 default: 2034 break; 2035 } 2036 if (tgtdev) 2037 mpi3mr_tgtdev_put(tgtdev); 2038 } 2039 } 2040 2041 /** 2042 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 2043 * @mrioc: Adapter instance reference 2044 * @fwevt: Firmware event reference 2045 * 2046 * Extracts the event data and calls application interfacing 2047 * function to process the event further. 2048 * 2049 * Return: Nothing. 2050 */ 2051 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 2052 struct mpi3mr_fwevt *fwevt) 2053 { 2054 mpi3mr_app_save_logdata_th(mrioc, fwevt->event_data, 2055 fwevt->event_data_size); 2056 } 2057 2058 /** 2059 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 2060 * @sdev: SCSI device reference 2061 * @data: Queue depth reference 2062 * 2063 * This is an iterator function called for each SCSI device in a 2064 * target to update the QD of each SCSI device. 2065 * 2066 * Return: Nothing. 2067 */ 2068 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 2069 { 2070 u16 *q_depth = (u16 *)data; 2071 2072 scsi_change_queue_depth(sdev, (int)*q_depth); 2073 sdev->max_queue_depth = sdev->queue_depth; 2074 } 2075 2076 /** 2077 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 2078 * @mrioc: Adapter instance reference 2079 * @tg: Throttle group information pointer 2080 * 2081 * Accessor to reduce QD for each device associated with the 2082 * given throttle group. 2083 * 2084 * Return: None. 2085 */ 2086 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 2087 struct mpi3mr_throttle_group_info *tg) 2088 { 2089 unsigned long flags; 2090 struct mpi3mr_tgt_dev *tgtdev; 2091 struct mpi3mr_stgt_priv_data *tgt_priv; 2092 2093 2094 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2095 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 2096 if (tgtdev->starget && tgtdev->starget->hostdata) { 2097 tgt_priv = tgtdev->starget->hostdata; 2098 if (tgt_priv->throttle_group == tg) { 2099 dprint_event_bh(mrioc, 2100 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 2101 tgt_priv->perst_id, tgtdev->q_depth, 2102 tg->modified_qd); 2103 starget_for_each_device(tgtdev->starget, 2104 (void *)&tg->modified_qd, 2105 mpi3mr_update_sdev_qd); 2106 } 2107 } 2108 } 2109 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2110 } 2111 2112 /** 2113 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 2114 * @mrioc: Adapter instance reference 2115 * @fwevt: Firmware event reference 2116 * 2117 * Identifies the firmware event and calls corresponding bottomg 2118 * half handler and sends event acknowledgment if required. 2119 * 2120 * Return: Nothing. 2121 */ 2122 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 2123 struct mpi3mr_fwevt *fwevt) 2124 { 2125 struct mpi3_device_page0 *dev_pg0 = NULL; 2126 u16 perst_id, handle, dev_info; 2127 struct mpi3_device0_sas_sata_format *sasinf = NULL; 2128 unsigned int timeout; 2129 2130 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 2131 mrioc->current_event = fwevt; 2132 2133 if (mrioc->stop_drv_processing) { 2134 dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" 2135 "due to stop_drv_processing\n", fwevt->event_id); 2136 goto out; 2137 } 2138 2139 if (mrioc->unrecoverable) { 2140 dprint_event_bh(mrioc, 2141 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 2142 fwevt->event_id); 2143 goto out; 2144 } 2145 2146 if (!fwevt->process_evt) 2147 goto evt_ack; 2148 2149 dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n", 2150 fwevt->event_id, fwevt->evt_ctx); 2151 2152 switch (fwevt->event_id) { 2153 case MPI3_EVENT_DEVICE_ADDED: 2154 { 2155 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2156 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2157 handle = le16_to_cpu(dev_pg0->dev_handle); 2158 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2159 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 2160 else if (mrioc->sas_transport_enabled && 2161 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 2162 sasinf = &dev_pg0->device_specific.sas_sata_format; 2163 dev_info = le16_to_cpu(sasinf->device_info); 2164 if (!mrioc->sas_hba.num_phys) 2165 mpi3mr_sas_host_add(mrioc); 2166 else 2167 mpi3mr_sas_host_refresh(mrioc); 2168 2169 if (mpi3mr_is_expander_device(dev_info)) 2170 mpi3mr_expander_add(mrioc, handle); 2171 } 2172 break; 2173 } 2174 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2175 { 2176 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 2177 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2178 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 2179 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 2180 break; 2181 } 2182 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2183 { 2184 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 2185 break; 2186 } 2187 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2188 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2189 { 2190 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 2191 break; 2192 } 2193 2194 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2195 { 2196 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 2197 break; 2198 } 2199 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2200 { 2201 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 2202 break; 2203 } 2204 case MPI3_EVENT_LOG_DATA: 2205 { 2206 mpi3mr_logdata_evt_bh(mrioc, fwevt); 2207 break; 2208 } 2209 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 2210 { 2211 struct mpi3mr_throttle_group_info *tg; 2212 2213 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 2214 dprint_event_bh(mrioc, 2215 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 2216 tg->id, tg->need_qd_reduction); 2217 if (tg->need_qd_reduction) { 2218 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 2219 tg->need_qd_reduction = 0; 2220 } 2221 break; 2222 } 2223 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2224 { 2225 timeout = MPI3MR_RESET_TIMEOUT * 2; 2226 while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && 2227 !mrioc->unrecoverable && !mrioc->pci_err_recovery) { 2228 msleep(500); 2229 if (!timeout--) { 2230 mrioc->unrecoverable = 1; 2231 break; 2232 } 2233 } 2234 2235 if (mrioc->unrecoverable || mrioc->pci_err_recovery) 2236 break; 2237 2238 dprint_event_bh(mrioc, 2239 "scan for non responding and newly added devices after soft reset started\n"); 2240 if (mrioc->sas_transport_enabled) { 2241 mpi3mr_refresh_sas_ports(mrioc); 2242 mpi3mr_refresh_expanders(mrioc); 2243 } 2244 mpi3mr_refresh_tgtdevs(mrioc); 2245 ioc_info(mrioc, 2246 "scan for non responding and newly added devices after soft reset completed\n"); 2247 break; 2248 } 2249 case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: 2250 { 2251 mpi3mr_process_trigger_data_event_bh(mrioc, 2252 (struct trigger_event_data *)fwevt->event_data); 2253 break; 2254 } 2255 default: 2256 break; 2257 } 2258 2259 evt_ack: 2260 if (fwevt->send_ack) 2261 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2262 fwevt->evt_ctx); 2263 out: 2264 /* Put fwevt reference count to neutralize kref_init increment */ 2265 mpi3mr_fwevt_put(fwevt); 2266 mrioc->current_event = NULL; 2267 } 2268 2269 /** 2270 * mpi3mr_fwevt_worker - Firmware event worker 2271 * @work: Work struct containing firmware event 2272 * 2273 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2274 * 2275 * Return: Nothing. 2276 */ 2277 static void mpi3mr_fwevt_worker(struct work_struct *work) 2278 { 2279 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2280 work); 2281 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2282 /* 2283 * Put fwevt reference count after 2284 * dequeuing it from worker queue 2285 */ 2286 mpi3mr_fwevt_put(fwevt); 2287 } 2288 2289 /** 2290 * mpi3mr_create_tgtdev - Create and add a target device 2291 * @mrioc: Adapter instance reference 2292 * @dev_pg0: Device Page 0 data 2293 * 2294 * If the device specified by the device page 0 data is not 2295 * present in the driver's internal list, allocate the memory 2296 * for the device, populate the data and add to the list, else 2297 * update the device data. The key is persistent ID. 2298 * 2299 * Return: 0 on success, -ENOMEM on memory allocation failure 2300 */ 2301 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2302 struct mpi3_device_page0 *dev_pg0) 2303 { 2304 int retval = 0; 2305 struct mpi3mr_tgt_dev *tgtdev = NULL; 2306 u16 perst_id = 0; 2307 unsigned long flags; 2308 2309 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2310 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2311 return retval; 2312 2313 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2314 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2315 if (tgtdev) 2316 tgtdev->state = MPI3MR_DEV_CREATED; 2317 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2318 2319 if (tgtdev) { 2320 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2321 mpi3mr_tgtdev_put(tgtdev); 2322 } else { 2323 tgtdev = mpi3mr_alloc_tgtdev(); 2324 if (!tgtdev) 2325 return -ENOMEM; 2326 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2327 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2328 } 2329 2330 return retval; 2331 } 2332 2333 /** 2334 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2335 * @mrioc: Adapter instance reference 2336 * 2337 * Flush pending commands in the delayed lists due to a 2338 * controller reset or driver removal as a cleanup. 2339 * 2340 * Return: Nothing 2341 */ 2342 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2343 { 2344 struct delayed_dev_rmhs_node *_rmhs_node; 2345 struct delayed_evt_ack_node *_evtack_node; 2346 2347 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2348 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2349 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2350 struct delayed_dev_rmhs_node, list); 2351 list_del(&_rmhs_node->list); 2352 kfree(_rmhs_node); 2353 } 2354 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2355 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2356 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2357 struct delayed_evt_ack_node, list); 2358 list_del(&_evtack_node->list); 2359 kfree(_evtack_node); 2360 } 2361 } 2362 2363 /** 2364 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2365 * @mrioc: Adapter instance reference 2366 * @drv_cmd: Internal command tracker 2367 * 2368 * Issues a target reset TM to the firmware from the device 2369 * removal TM pend list or retry the removal handshake sequence 2370 * based on the IOU control request IOC status. 2371 * 2372 * Return: Nothing 2373 */ 2374 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2375 struct mpi3mr_drv_cmd *drv_cmd) 2376 { 2377 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2378 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2379 2380 if (drv_cmd->state & MPI3MR_CMD_RESET) 2381 goto clear_drv_cmd; 2382 2383 ioc_info(mrioc, 2384 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2385 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2386 drv_cmd->ioc_loginfo); 2387 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2388 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2389 drv_cmd->retry_count++; 2390 ioc_info(mrioc, 2391 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2392 __func__, drv_cmd->dev_handle, 2393 drv_cmd->retry_count); 2394 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2395 drv_cmd, drv_cmd->iou_rc); 2396 return; 2397 } 2398 ioc_err(mrioc, 2399 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2400 __func__, drv_cmd->dev_handle); 2401 } else { 2402 ioc_info(mrioc, 2403 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2404 __func__, drv_cmd->dev_handle); 2405 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2406 } 2407 2408 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2409 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2410 struct delayed_dev_rmhs_node, list); 2411 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2412 drv_cmd->retry_count = 0; 2413 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2414 ioc_info(mrioc, 2415 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2416 __func__, drv_cmd->dev_handle); 2417 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2418 drv_cmd->iou_rc); 2419 list_del(&delayed_dev_rmhs->list); 2420 kfree(delayed_dev_rmhs); 2421 return; 2422 } 2423 2424 clear_drv_cmd: 2425 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2426 drv_cmd->callback = NULL; 2427 drv_cmd->retry_count = 0; 2428 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2429 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2430 } 2431 2432 /** 2433 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2434 * @mrioc: Adapter instance reference 2435 * @drv_cmd: Internal command tracker 2436 * 2437 * Issues a target reset TM to the firmware from the device 2438 * removal TM pend list or issue IO unit control request as 2439 * part of device removal or hidden acknowledgment handshake. 2440 * 2441 * Return: Nothing 2442 */ 2443 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2444 struct mpi3mr_drv_cmd *drv_cmd) 2445 { 2446 struct mpi3_iounit_control_request iou_ctrl; 2447 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2448 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2449 int retval; 2450 2451 if (drv_cmd->state & MPI3MR_CMD_RESET) 2452 goto clear_drv_cmd; 2453 2454 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2455 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2456 2457 if (tm_reply) 2458 pr_info(IOCNAME 2459 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2460 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2461 drv_cmd->ioc_loginfo, 2462 le32_to_cpu(tm_reply->termination_count)); 2463 2464 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2465 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2466 2467 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2468 2469 drv_cmd->state = MPI3MR_CMD_PENDING; 2470 drv_cmd->is_waiting = 0; 2471 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2472 iou_ctrl.operation = drv_cmd->iou_rc; 2473 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2474 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2475 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2476 2477 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2478 1); 2479 if (retval) { 2480 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2481 mrioc->name); 2482 goto clear_drv_cmd; 2483 } 2484 2485 return; 2486 clear_drv_cmd: 2487 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2488 drv_cmd->callback = NULL; 2489 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2490 drv_cmd->retry_count = 0; 2491 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2492 } 2493 2494 /** 2495 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2496 * @mrioc: Adapter instance reference 2497 * @handle: Device handle 2498 * @cmdparam: Internal command tracker 2499 * @iou_rc: IO unit reason code 2500 * 2501 * Issues a target reset TM to the firmware or add it to a pend 2502 * list as part of device removal or hidden acknowledgment 2503 * handshake. 2504 * 2505 * Return: Nothing 2506 */ 2507 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2508 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2509 { 2510 struct mpi3_scsi_task_mgmt_request tm_req; 2511 int retval = 0; 2512 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2513 u8 retrycount = 5; 2514 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2515 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2516 struct mpi3mr_tgt_dev *tgtdev = NULL; 2517 unsigned long flags; 2518 2519 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2520 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2521 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2522 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2523 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2524 2525 if (drv_cmd) 2526 goto issue_cmd; 2527 do { 2528 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2529 MPI3MR_NUM_DEVRMCMD); 2530 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2531 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2532 break; 2533 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2534 } 2535 } while (retrycount--); 2536 2537 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2538 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2539 GFP_ATOMIC); 2540 if (!delayed_dev_rmhs) 2541 return; 2542 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2543 delayed_dev_rmhs->handle = handle; 2544 delayed_dev_rmhs->iou_rc = iou_rc; 2545 list_add_tail(&delayed_dev_rmhs->list, 2546 &mrioc->delayed_rmhs_list); 2547 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2548 __func__, handle); 2549 return; 2550 } 2551 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2552 2553 issue_cmd: 2554 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2555 ioc_info(mrioc, 2556 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2557 __func__, handle, cmd_idx); 2558 2559 memset(&tm_req, 0, sizeof(tm_req)); 2560 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2561 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2562 goto out; 2563 } 2564 drv_cmd->state = MPI3MR_CMD_PENDING; 2565 drv_cmd->is_waiting = 0; 2566 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2567 drv_cmd->dev_handle = handle; 2568 drv_cmd->iou_rc = iou_rc; 2569 tm_req.dev_handle = cpu_to_le16(handle); 2570 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2571 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2572 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2573 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2574 2575 set_bit(handle, mrioc->removepend_bitmap); 2576 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2577 if (retval) { 2578 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2579 __func__); 2580 goto out_failed; 2581 } 2582 out: 2583 return; 2584 out_failed: 2585 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2586 drv_cmd->callback = NULL; 2587 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2588 drv_cmd->retry_count = 0; 2589 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2590 } 2591 2592 /** 2593 * mpi3mr_complete_evt_ack - event ack request completion 2594 * @mrioc: Adapter instance reference 2595 * @drv_cmd: Internal command tracker 2596 * 2597 * This is the completion handler for non blocking event 2598 * acknowledgment sent to the firmware and this will issue any 2599 * pending event acknowledgment request. 2600 * 2601 * Return: Nothing 2602 */ 2603 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2604 struct mpi3mr_drv_cmd *drv_cmd) 2605 { 2606 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2607 struct delayed_evt_ack_node *delayed_evtack = NULL; 2608 2609 if (drv_cmd->state & MPI3MR_CMD_RESET) 2610 goto clear_drv_cmd; 2611 2612 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2613 dprint_event_th(mrioc, 2614 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2615 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2616 drv_cmd->ioc_loginfo); 2617 } 2618 2619 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2620 delayed_evtack = 2621 list_entry(mrioc->delayed_evtack_cmds_list.next, 2622 struct delayed_evt_ack_node, list); 2623 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2624 delayed_evtack->event_ctx); 2625 list_del(&delayed_evtack->list); 2626 kfree(delayed_evtack); 2627 return; 2628 } 2629 clear_drv_cmd: 2630 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2631 drv_cmd->callback = NULL; 2632 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2633 } 2634 2635 /** 2636 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2637 * @mrioc: Adapter instance reference 2638 * @event: MPI3 event id 2639 * @cmdparam: Internal command tracker 2640 * @event_ctx: event context 2641 * 2642 * Issues event acknowledgment request to the firmware if there 2643 * is a free command to send the event ack else it to a pend 2644 * list so that it will be processed on a completion of a prior 2645 * event acknowledgment . 2646 * 2647 * Return: Nothing 2648 */ 2649 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2650 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2651 { 2652 struct mpi3_event_ack_request evtack_req; 2653 int retval = 0; 2654 u8 retrycount = 5; 2655 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2656 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2657 struct delayed_evt_ack_node *delayed_evtack = NULL; 2658 2659 if (drv_cmd) { 2660 dprint_event_th(mrioc, 2661 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2662 event, event_ctx); 2663 goto issue_cmd; 2664 } 2665 dprint_event_th(mrioc, 2666 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2667 event, event_ctx); 2668 do { 2669 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2670 MPI3MR_NUM_EVTACKCMD); 2671 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2672 if (!test_and_set_bit(cmd_idx, 2673 mrioc->evtack_cmds_bitmap)) 2674 break; 2675 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2676 } 2677 } while (retrycount--); 2678 2679 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2680 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2681 GFP_ATOMIC); 2682 if (!delayed_evtack) 2683 return; 2684 INIT_LIST_HEAD(&delayed_evtack->list); 2685 delayed_evtack->event = event; 2686 delayed_evtack->event_ctx = event_ctx; 2687 list_add_tail(&delayed_evtack->list, 2688 &mrioc->delayed_evtack_cmds_list); 2689 dprint_event_th(mrioc, 2690 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2691 event, event_ctx); 2692 return; 2693 } 2694 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2695 2696 issue_cmd: 2697 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2698 2699 memset(&evtack_req, 0, sizeof(evtack_req)); 2700 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2701 dprint_event_th(mrioc, 2702 "sending event ack failed due to command in use\n"); 2703 goto out; 2704 } 2705 drv_cmd->state = MPI3MR_CMD_PENDING; 2706 drv_cmd->is_waiting = 0; 2707 drv_cmd->callback = mpi3mr_complete_evt_ack; 2708 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2709 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2710 evtack_req.event = event; 2711 evtack_req.event_context = cpu_to_le32(event_ctx); 2712 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2713 sizeof(evtack_req), 1); 2714 if (retval) { 2715 dprint_event_th(mrioc, 2716 "posting event ack request is failed\n"); 2717 goto out_failed; 2718 } 2719 2720 dprint_event_th(mrioc, 2721 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2722 event, event_ctx); 2723 out: 2724 return; 2725 out_failed: 2726 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2727 drv_cmd->callback = NULL; 2728 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2729 } 2730 2731 /** 2732 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2733 * @mrioc: Adapter instance reference 2734 * @event_reply: event data 2735 * 2736 * Checks for the reason code and based on that either block I/O 2737 * to device, or unblock I/O to the device, or start the device 2738 * removal handshake with reason as remove with the firmware for 2739 * PCIe devices. 2740 * 2741 * Return: Nothing 2742 */ 2743 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2744 struct mpi3_event_notification_reply *event_reply) 2745 { 2746 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2747 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2748 int i; 2749 u16 handle; 2750 u8 reason_code; 2751 struct mpi3mr_tgt_dev *tgtdev = NULL; 2752 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2753 2754 for (i = 0; i < topo_evt->num_entries; i++) { 2755 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2756 if (!handle) 2757 continue; 2758 reason_code = topo_evt->port_entry[i].port_status; 2759 scsi_tgt_priv_data = NULL; 2760 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2761 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2762 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2763 tgtdev->starget->hostdata; 2764 switch (reason_code) { 2765 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2766 if (scsi_tgt_priv_data) { 2767 scsi_tgt_priv_data->dev_removed = 1; 2768 scsi_tgt_priv_data->dev_removedelay = 0; 2769 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2770 } 2771 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2772 MPI3_CTRL_OP_REMOVE_DEVICE); 2773 break; 2774 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2775 if (scsi_tgt_priv_data) { 2776 scsi_tgt_priv_data->dev_removedelay = 1; 2777 atomic_inc(&scsi_tgt_priv_data->block_io); 2778 } 2779 break; 2780 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2781 if (scsi_tgt_priv_data && 2782 scsi_tgt_priv_data->dev_removedelay) { 2783 scsi_tgt_priv_data->dev_removedelay = 0; 2784 atomic_dec_if_positive 2785 (&scsi_tgt_priv_data->block_io); 2786 } 2787 break; 2788 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2789 default: 2790 break; 2791 } 2792 if (tgtdev) 2793 mpi3mr_tgtdev_put(tgtdev); 2794 } 2795 } 2796 2797 /** 2798 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2799 * @mrioc: Adapter instance reference 2800 * @event_reply: event data 2801 * 2802 * Checks for the reason code and based on that either block I/O 2803 * to device, or unblock I/O to the device, or start the device 2804 * removal handshake with reason as remove with the firmware for 2805 * SAS/SATA devices. 2806 * 2807 * Return: Nothing 2808 */ 2809 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2810 struct mpi3_event_notification_reply *event_reply) 2811 { 2812 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2813 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2814 int i; 2815 u16 handle; 2816 u8 reason_code; 2817 struct mpi3mr_tgt_dev *tgtdev = NULL; 2818 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2819 2820 for (i = 0; i < topo_evt->num_entries; i++) { 2821 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2822 if (!handle) 2823 continue; 2824 reason_code = topo_evt->phy_entry[i].status & 2825 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2826 scsi_tgt_priv_data = NULL; 2827 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2828 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2829 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2830 tgtdev->starget->hostdata; 2831 switch (reason_code) { 2832 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2833 if (scsi_tgt_priv_data) { 2834 scsi_tgt_priv_data->dev_removed = 1; 2835 scsi_tgt_priv_data->dev_removedelay = 0; 2836 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2837 } 2838 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2839 MPI3_CTRL_OP_REMOVE_DEVICE); 2840 break; 2841 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2842 if (scsi_tgt_priv_data) { 2843 scsi_tgt_priv_data->dev_removedelay = 1; 2844 atomic_inc(&scsi_tgt_priv_data->block_io); 2845 } 2846 break; 2847 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2848 if (scsi_tgt_priv_data && 2849 scsi_tgt_priv_data->dev_removedelay) { 2850 scsi_tgt_priv_data->dev_removedelay = 0; 2851 atomic_dec_if_positive 2852 (&scsi_tgt_priv_data->block_io); 2853 } 2854 break; 2855 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2856 default: 2857 break; 2858 } 2859 if (tgtdev) 2860 mpi3mr_tgtdev_put(tgtdev); 2861 } 2862 } 2863 2864 /** 2865 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2866 * @mrioc: Adapter instance reference 2867 * @event_reply: event data 2868 * 2869 * Checks for the reason code and based on that either block I/O 2870 * to device, or unblock I/O to the device, or start the device 2871 * removal handshake with reason as remove/hide acknowledgment 2872 * with the firmware. 2873 * 2874 * Return: Nothing 2875 */ 2876 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2877 struct mpi3_event_notification_reply *event_reply) 2878 { 2879 u16 dev_handle = 0; 2880 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2881 struct mpi3mr_tgt_dev *tgtdev = NULL; 2882 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2883 struct mpi3_event_data_device_status_change *evtdata = 2884 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2885 2886 if (mrioc->stop_drv_processing) 2887 goto out; 2888 2889 dev_handle = le16_to_cpu(evtdata->dev_handle); 2890 dprint_event_th(mrioc, 2891 "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", 2892 evtdata->reason_code, dev_handle); 2893 2894 switch (evtdata->reason_code) { 2895 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2896 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2897 block = 1; 2898 break; 2899 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2900 delete = 1; 2901 hide = 1; 2902 break; 2903 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2904 delete = 1; 2905 remove = 1; 2906 break; 2907 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2908 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2909 ublock = 1; 2910 break; 2911 default: 2912 break; 2913 } 2914 2915 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2916 if (!tgtdev) { 2917 dprint_event_th(mrioc, 2918 "processing device status change event could not identify device for handle(0x%04x)\n", 2919 dev_handle); 2920 goto out; 2921 } 2922 if (hide) 2923 tgtdev->is_hidden = hide; 2924 if (tgtdev->starget && tgtdev->starget->hostdata) { 2925 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2926 tgtdev->starget->hostdata; 2927 if (block) 2928 atomic_inc(&scsi_tgt_priv_data->block_io); 2929 if (delete) 2930 scsi_tgt_priv_data->dev_removed = 1; 2931 if (ublock) 2932 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2933 } 2934 if (remove) 2935 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2936 MPI3_CTRL_OP_REMOVE_DEVICE); 2937 if (hide) 2938 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2939 MPI3_CTRL_OP_HIDDEN_ACK); 2940 2941 out: 2942 if (tgtdev) 2943 mpi3mr_tgtdev_put(tgtdev); 2944 } 2945 2946 /** 2947 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2948 * @mrioc: Adapter instance reference 2949 * @event_reply: event data 2950 * 2951 * Blocks and unblocks host level I/O based on the reason code 2952 * 2953 * Return: Nothing 2954 */ 2955 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2956 struct mpi3_event_notification_reply *event_reply) 2957 { 2958 struct mpi3_event_data_prepare_for_reset *evtdata = 2959 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2960 2961 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2962 dprint_event_th(mrioc, 2963 "prepare for reset event top half with rc=start\n"); 2964 if (mrioc->prepare_for_reset) 2965 return; 2966 scsi_block_requests(mrioc->shost); 2967 mrioc->prepare_for_reset = 1; 2968 mrioc->prepare_for_reset_timeout_counter = 0; 2969 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2970 dprint_event_th(mrioc, 2971 "prepare for reset top half with rc=abort\n"); 2972 mrioc->prepare_for_reset = 0; 2973 scsi_unblock_requests(mrioc->shost); 2974 mrioc->prepare_for_reset_timeout_counter = 0; 2975 } 2976 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2977 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2978 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2979 le32_to_cpu(event_reply->event_context)); 2980 } 2981 2982 /** 2983 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2984 * @mrioc: Adapter instance reference 2985 * @event_reply: event data 2986 * 2987 * Identifies the new shutdown timeout value and update. 2988 * 2989 * Return: Nothing 2990 */ 2991 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2992 struct mpi3_event_notification_reply *event_reply) 2993 { 2994 struct mpi3_event_data_energy_pack_change *evtdata = 2995 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2996 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2997 2998 if (shutdown_timeout <= 0) { 2999 dprint_event_th(mrioc, 3000 "%s :Invalid Shutdown Timeout received = %d\n", 3001 __func__, shutdown_timeout); 3002 return; 3003 } 3004 3005 dprint_event_th(mrioc, 3006 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 3007 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 3008 mrioc->facts.shutdown_timeout = shutdown_timeout; 3009 } 3010 3011 /** 3012 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 3013 * @mrioc: Adapter instance reference 3014 * @event_reply: event data 3015 * 3016 * Displays Cable manegemt event details. 3017 * 3018 * Return: Nothing 3019 */ 3020 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 3021 struct mpi3_event_notification_reply *event_reply) 3022 { 3023 struct mpi3_event_data_cable_management *evtdata = 3024 (struct mpi3_event_data_cable_management *)event_reply->event_data; 3025 3026 switch (evtdata->status) { 3027 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 3028 { 3029 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 3030 "Devices connected to this cable are not detected.\n" 3031 "This cable requires %d mW of power.\n", 3032 evtdata->receptacle_id, 3033 le32_to_cpu(evtdata->active_cable_power_requirement)); 3034 break; 3035 } 3036 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 3037 { 3038 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 3039 evtdata->receptacle_id); 3040 break; 3041 } 3042 default: 3043 break; 3044 } 3045 } 3046 3047 /** 3048 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 3049 * @mrioc: Adapter instance reference 3050 * 3051 * Add driver specific event to make sure that the driver won't process the 3052 * events until all the devices are refreshed during soft reset. 3053 * 3054 * Return: Nothing 3055 */ 3056 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 3057 { 3058 struct mpi3mr_fwevt *fwevt = NULL; 3059 3060 fwevt = mpi3mr_alloc_fwevt(0); 3061 if (!fwevt) { 3062 dprint_event_th(mrioc, 3063 "failed to schedule bottom half handler for event(0x%02x)\n", 3064 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 3065 return; 3066 } 3067 fwevt->mrioc = mrioc; 3068 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 3069 fwevt->send_ack = 0; 3070 fwevt->process_evt = 1; 3071 fwevt->evt_ctx = 0; 3072 fwevt->event_data_size = 0; 3073 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 3074 } 3075 3076 /** 3077 * mpi3mr_os_handle_events - Firmware event handler 3078 * @mrioc: Adapter instance reference 3079 * @event_reply: event data 3080 * 3081 * Identifies whether the event has to be handled and acknowledged, 3082 * and either processes the event in the top-half and/or schedule a 3083 * bottom-half through mpi3mr_fwevt_worker(). 3084 * 3085 * Return: Nothing 3086 */ 3087 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 3088 struct mpi3_event_notification_reply *event_reply) 3089 { 3090 u16 evt_type, sz; 3091 struct mpi3mr_fwevt *fwevt = NULL; 3092 bool ack_req = 0, process_evt_bh = 0; 3093 3094 if (mrioc->stop_drv_processing) 3095 return; 3096 3097 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 3098 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 3099 ack_req = 1; 3100 3101 evt_type = event_reply->event; 3102 mpi3mr_event_trigger(mrioc, event_reply->event); 3103 3104 switch (evt_type) { 3105 case MPI3_EVENT_DEVICE_ADDED: 3106 { 3107 struct mpi3_device_page0 *dev_pg0 = 3108 (struct mpi3_device_page0 *)event_reply->event_data; 3109 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 3110 dprint_event_th(mrioc, 3111 "failed to process device added event for handle(0x%04x),\n" 3112 "perst_id(%d) in the event top half handler\n", 3113 le16_to_cpu(dev_pg0->dev_handle), 3114 le16_to_cpu(dev_pg0->persistent_id)); 3115 else 3116 process_evt_bh = 1; 3117 break; 3118 } 3119 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 3120 { 3121 process_evt_bh = 1; 3122 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 3123 break; 3124 } 3125 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 3126 { 3127 process_evt_bh = 1; 3128 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 3129 break; 3130 } 3131 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 3132 { 3133 process_evt_bh = 1; 3134 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 3135 break; 3136 } 3137 case MPI3_EVENT_PREPARE_FOR_RESET: 3138 { 3139 mpi3mr_preparereset_evt_th(mrioc, event_reply); 3140 ack_req = 0; 3141 break; 3142 } 3143 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: 3144 { 3145 mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); 3146 break; 3147 } 3148 case MPI3_EVENT_DEVICE_INFO_CHANGED: 3149 case MPI3_EVENT_LOG_DATA: 3150 3151 sz = event_reply->event_data_length * 4; 3152 mpi3mr_app_save_logdata_th(mrioc, 3153 (char *)event_reply->event_data, sz); 3154 break; 3155 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 3156 case MPI3_EVENT_ENCL_DEVICE_ADDED: 3157 { 3158 process_evt_bh = 1; 3159 break; 3160 } 3161 case MPI3_EVENT_ENERGY_PACK_CHANGE: 3162 { 3163 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 3164 break; 3165 } 3166 case MPI3_EVENT_CABLE_MGMT: 3167 { 3168 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 3169 break; 3170 } 3171 case MPI3_EVENT_SAS_DISCOVERY: 3172 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 3173 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 3174 case MPI3_EVENT_PCIE_ENUMERATION: 3175 break; 3176 default: 3177 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 3178 __func__, evt_type); 3179 break; 3180 } 3181 if (process_evt_bh || ack_req) { 3182 dprint_event_th(mrioc, 3183 "scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n", 3184 evt_type, le32_to_cpu(event_reply->event_context), ack_req); 3185 sz = event_reply->event_data_length * 4; 3186 fwevt = mpi3mr_alloc_fwevt(sz); 3187 if (!fwevt) { 3188 dprint_event_th(mrioc, 3189 "failed to schedule bottom half handler for\n" 3190 "event(0x%02x), ack_required=%d\n", evt_type, ack_req); 3191 return; 3192 } 3193 3194 memcpy(fwevt->event_data, event_reply->event_data, sz); 3195 fwevt->mrioc = mrioc; 3196 fwevt->event_id = evt_type; 3197 fwevt->send_ack = ack_req; 3198 fwevt->process_evt = process_evt_bh; 3199 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 3200 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 3201 } 3202 } 3203 3204 /** 3205 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 3206 * @mrioc: Adapter instance reference 3207 * @scmd: SCSI command reference 3208 * @scsiio_req: MPI3 SCSI IO request 3209 * 3210 * Identifies the protection information flags from the SCSI 3211 * command and set appropriate flags in the MPI3 SCSI IO 3212 * request. 3213 * 3214 * Return: Nothing 3215 */ 3216 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 3217 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3218 { 3219 u16 eedp_flags = 0; 3220 unsigned char prot_op = scsi_get_prot_op(scmd); 3221 3222 switch (prot_op) { 3223 case SCSI_PROT_NORMAL: 3224 return; 3225 case SCSI_PROT_READ_STRIP: 3226 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3227 break; 3228 case SCSI_PROT_WRITE_INSERT: 3229 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3230 break; 3231 case SCSI_PROT_READ_INSERT: 3232 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 3233 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3234 break; 3235 case SCSI_PROT_WRITE_STRIP: 3236 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 3237 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3238 break; 3239 case SCSI_PROT_READ_PASS: 3240 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3241 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3242 break; 3243 case SCSI_PROT_WRITE_PASS: 3244 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 3245 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 3246 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 3247 0xffff; 3248 } else 3249 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 3250 3251 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 3252 break; 3253 default: 3254 return; 3255 } 3256 3257 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 3258 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 3259 3260 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 3261 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 3262 3263 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3264 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3265 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3266 scsiio_req->cdb.eedp32.primary_reference_tag = 3267 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3268 } 3269 3270 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3271 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3272 3273 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3274 3275 switch (scsi_prot_interval(scmd)) { 3276 case 512: 3277 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3278 break; 3279 case 520: 3280 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3281 break; 3282 case 4080: 3283 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3284 break; 3285 case 4088: 3286 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3287 break; 3288 case 4096: 3289 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3290 break; 3291 case 4104: 3292 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3293 break; 3294 case 4160: 3295 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3296 break; 3297 default: 3298 break; 3299 } 3300 3301 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3302 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3303 } 3304 3305 /** 3306 * mpi3mr_build_sense_buffer - Map sense information 3307 * @desc: Sense type 3308 * @buf: Sense buffer to populate 3309 * @key: Sense key 3310 * @asc: Additional sense code 3311 * @ascq: Additional sense code qualifier 3312 * 3313 * Maps the given sense information into either descriptor or 3314 * fixed format sense data. 3315 * 3316 * Return: Nothing 3317 */ 3318 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3319 u8 asc, u8 ascq) 3320 { 3321 if (desc) { 3322 buf[0] = 0x72; /* descriptor, current */ 3323 buf[1] = key; 3324 buf[2] = asc; 3325 buf[3] = ascq; 3326 buf[7] = 0; 3327 } else { 3328 buf[0] = 0x70; /* fixed, current */ 3329 buf[2] = key; 3330 buf[7] = 0xa; 3331 buf[12] = asc; 3332 buf[13] = ascq; 3333 } 3334 } 3335 3336 /** 3337 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3338 * @scmd: SCSI command reference 3339 * @ioc_status: status of MPI3 request 3340 * 3341 * Maps the EEDP error status of the SCSI IO request to sense 3342 * data. 3343 * 3344 * Return: Nothing 3345 */ 3346 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3347 u16 ioc_status) 3348 { 3349 u8 ascq = 0; 3350 3351 switch (ioc_status) { 3352 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3353 ascq = 0x01; 3354 break; 3355 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3356 ascq = 0x02; 3357 break; 3358 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3359 ascq = 0x03; 3360 break; 3361 default: 3362 ascq = 0x00; 3363 break; 3364 } 3365 3366 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3367 0x10, ascq); 3368 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3369 } 3370 3371 /** 3372 * mpi3mr_process_op_reply_desc - reply descriptor handler 3373 * @mrioc: Adapter instance reference 3374 * @reply_desc: Operational reply descriptor 3375 * @reply_dma: place holder for reply DMA address 3376 * @qidx: Operational queue index 3377 * 3378 * Process the operational reply descriptor and identifies the 3379 * descriptor type. Based on the descriptor map the MPI3 request 3380 * status to a SCSI command status and calls scsi_done call 3381 * back. 3382 * 3383 * Return: Nothing 3384 */ 3385 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3386 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3387 { 3388 u16 reply_desc_type, host_tag = 0; 3389 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3390 u32 ioc_loginfo = 0; 3391 struct mpi3_status_reply_descriptor *status_desc = NULL; 3392 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3393 struct mpi3_success_reply_descriptor *success_desc = NULL; 3394 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3395 struct scsi_cmnd *scmd = NULL; 3396 struct scmd_priv *priv = NULL; 3397 u8 *sense_buf = NULL; 3398 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3399 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3400 u16 dev_handle = 0xFFFF; 3401 struct scsi_sense_hdr sshdr; 3402 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3403 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3404 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3405 struct mpi3mr_throttle_group_info *tg = NULL; 3406 u8 throttle_enabled_dev = 0; 3407 3408 *reply_dma = 0; 3409 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3410 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3411 switch (reply_desc_type) { 3412 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3413 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3414 host_tag = le16_to_cpu(status_desc->host_tag); 3415 ioc_status = le16_to_cpu(status_desc->ioc_status); 3416 if (ioc_status & 3417 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3418 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3419 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3420 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3421 break; 3422 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3423 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3424 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3425 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3426 *reply_dma); 3427 if (!scsi_reply) { 3428 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3429 mrioc->name); 3430 goto out; 3431 } 3432 host_tag = le16_to_cpu(scsi_reply->host_tag); 3433 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3434 scsi_status = scsi_reply->scsi_status; 3435 scsi_state = scsi_reply->scsi_state; 3436 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3437 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3438 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3439 sense_count = le32_to_cpu(scsi_reply->sense_count); 3440 resp_data = le32_to_cpu(scsi_reply->response_data); 3441 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3442 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3443 if (ioc_status & 3444 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3445 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3446 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3447 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3448 panic("%s: Ran out of sense buffers\n", mrioc->name); 3449 if (sense_buf) { 3450 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3451 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, 3452 sshdr.asc, sshdr.ascq); 3453 } 3454 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); 3455 break; 3456 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3457 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3458 host_tag = le16_to_cpu(success_desc->host_tag); 3459 break; 3460 default: 3461 break; 3462 } 3463 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3464 if (!scmd) { 3465 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3466 mrioc->name, host_tag); 3467 goto out; 3468 } 3469 priv = scsi_cmd_priv(scmd); 3470 3471 data_len_blks = scsi_bufflen(scmd) >> 9; 3472 sdev_priv_data = scmd->device->hostdata; 3473 if (sdev_priv_data) { 3474 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3475 if (stgt_priv_data) { 3476 tg = stgt_priv_data->throttle_group; 3477 throttle_enabled_dev = 3478 stgt_priv_data->io_throttle_enabled; 3479 dev_handle = stgt_priv_data->dev_handle; 3480 } 3481 } 3482 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3483 throttle_enabled_dev)) { 3484 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3485 &mrioc->pend_large_data_sz); 3486 if (tg) { 3487 tg_pend_data_len = atomic_sub_return(data_len_blks, 3488 &tg->pend_large_data_sz); 3489 if (tg->io_divert && ((ioc_pend_data_len <= 3490 mrioc->io_throttle_low) && 3491 (tg_pend_data_len <= tg->low))) { 3492 tg->io_divert = 0; 3493 mpi3mr_set_io_divert_for_all_vd_in_tg( 3494 mrioc, tg, 0); 3495 } 3496 } else { 3497 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3498 stgt_priv_data->io_divert = 0; 3499 } 3500 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3501 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3502 if (!tg) { 3503 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3504 stgt_priv_data->io_divert = 0; 3505 3506 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3507 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3508 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3509 tg->io_divert = 0; 3510 mpi3mr_set_io_divert_for_all_vd_in_tg( 3511 mrioc, tg, 0); 3512 } 3513 } 3514 } 3515 3516 if (success_desc) { 3517 scmd->result = DID_OK << 16; 3518 goto out_success; 3519 } 3520 3521 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3522 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3523 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3524 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3525 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3526 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3527 3528 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3529 sense_buf) { 3530 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3531 3532 memcpy(scmd->sense_buffer, sense_buf, sz); 3533 } 3534 3535 switch (ioc_status) { 3536 case MPI3_IOCSTATUS_BUSY: 3537 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3538 scmd->result = SAM_STAT_BUSY; 3539 break; 3540 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3541 scmd->result = DID_NO_CONNECT << 16; 3542 break; 3543 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3544 if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { 3545 /* 3546 * This is a ATA NCQ command aborted due to another NCQ 3547 * command failure. We must retry this command 3548 * immediately but without incrementing its retry 3549 * counter. 3550 */ 3551 WARN_ON_ONCE(xfer_count != 0); 3552 scmd->result = DID_IMM_RETRY << 16; 3553 } else { 3554 scmd->result = DID_SOFT_ERROR << 16; 3555 } 3556 break; 3557 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3558 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3559 scmd->result = DID_RESET << 16; 3560 break; 3561 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3562 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3563 scmd->result = DID_SOFT_ERROR << 16; 3564 else 3565 scmd->result = (DID_OK << 16) | scsi_status; 3566 break; 3567 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3568 scmd->result = (DID_OK << 16) | scsi_status; 3569 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3570 break; 3571 if (xfer_count < scmd->underflow) { 3572 if (scsi_status == SAM_STAT_BUSY) 3573 scmd->result = SAM_STAT_BUSY; 3574 else 3575 scmd->result = DID_SOFT_ERROR << 16; 3576 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3577 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3578 scmd->result = DID_SOFT_ERROR << 16; 3579 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3580 scmd->result = DID_RESET << 16; 3581 break; 3582 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3583 scsi_set_resid(scmd, 0); 3584 fallthrough; 3585 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3586 case MPI3_IOCSTATUS_SUCCESS: 3587 scmd->result = (DID_OK << 16) | scsi_status; 3588 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3589 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3590 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3591 scmd->result = DID_SOFT_ERROR << 16; 3592 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3593 scmd->result = DID_RESET << 16; 3594 break; 3595 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3596 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3597 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3598 mpi3mr_map_eedp_error(scmd, ioc_status); 3599 break; 3600 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3601 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3602 case MPI3_IOCSTATUS_INVALID_SGL: 3603 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3604 case MPI3_IOCSTATUS_INVALID_FIELD: 3605 case MPI3_IOCSTATUS_INVALID_STATE: 3606 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3607 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3608 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3609 default: 3610 scmd->result = DID_SOFT_ERROR << 16; 3611 break; 3612 } 3613 3614 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3615 (scmd->cmnd[0] != ATA_16) && 3616 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3617 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3618 scmd->result); 3619 scsi_print_command(scmd); 3620 ioc_info(mrioc, 3621 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3622 __func__, dev_handle, ioc_status, ioc_loginfo, 3623 priv->req_q_idx + 1); 3624 ioc_info(mrioc, 3625 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3626 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3627 if (sense_buf) { 3628 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3629 ioc_info(mrioc, 3630 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3631 __func__, sense_count, sshdr.sense_key, 3632 sshdr.asc, sshdr.ascq); 3633 } 3634 } 3635 out_success: 3636 if (priv->meta_sg_valid) { 3637 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3638 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3639 } 3640 mpi3mr_clear_scmd_priv(mrioc, scmd); 3641 scsi_dma_unmap(scmd); 3642 scsi_done(scmd); 3643 out: 3644 if (sense_buf) 3645 mpi3mr_repost_sense_buf(mrioc, 3646 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3647 } 3648 3649 /** 3650 * mpi3mr_get_chain_idx - get free chain buffer index 3651 * @mrioc: Adapter instance reference 3652 * 3653 * Try to get a free chain buffer index from the free pool. 3654 * 3655 * Return: -1 on failure or the free chain buffer index 3656 */ 3657 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3658 { 3659 u8 retry_count = 5; 3660 int cmd_idx = -1; 3661 unsigned long flags; 3662 3663 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3664 do { 3665 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3666 mrioc->chain_buf_count); 3667 if (cmd_idx < mrioc->chain_buf_count) { 3668 set_bit(cmd_idx, mrioc->chain_bitmap); 3669 break; 3670 } 3671 cmd_idx = -1; 3672 } while (retry_count--); 3673 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3674 return cmd_idx; 3675 } 3676 3677 /** 3678 * mpi3mr_prepare_sg_scmd - build scatter gather list 3679 * @mrioc: Adapter instance reference 3680 * @scmd: SCSI command reference 3681 * @scsiio_req: MPI3 SCSI IO request 3682 * 3683 * This function maps SCSI command's data and protection SGEs to 3684 * MPI request SGEs. If required additional 4K chain buffer is 3685 * used to send the SGEs. 3686 * 3687 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3688 */ 3689 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3690 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3691 { 3692 dma_addr_t chain_dma; 3693 struct scatterlist *sg_scmd; 3694 void *sg_local, *chain; 3695 u32 chain_length; 3696 int sges_left, chain_idx; 3697 u32 sges_in_segment; 3698 u8 simple_sgl_flags; 3699 u8 simple_sgl_flags_last; 3700 u8 last_chain_sgl_flags; 3701 struct chain_element *chain_req; 3702 struct scmd_priv *priv = NULL; 3703 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3704 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3705 3706 priv = scsi_cmd_priv(scmd); 3707 3708 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3709 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3710 simple_sgl_flags_last = simple_sgl_flags | 3711 MPI3_SGE_FLAGS_END_OF_LIST; 3712 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3713 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3714 3715 if (meta_sg) 3716 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3717 else 3718 sg_local = &scsiio_req->sgl; 3719 3720 if (!scsiio_req->data_length && !meta_sg) { 3721 mpi3mr_build_zero_len_sge(sg_local); 3722 return 0; 3723 } 3724 3725 if (meta_sg) { 3726 sg_scmd = scsi_prot_sglist(scmd); 3727 sges_left = dma_map_sg(&mrioc->pdev->dev, 3728 scsi_prot_sglist(scmd), 3729 scsi_prot_sg_count(scmd), 3730 scmd->sc_data_direction); 3731 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3732 } else { 3733 /* 3734 * Some firmware versions byte-swap the REPORT ZONES command 3735 * reply from ATA-ZAC devices by directly accessing in the host 3736 * buffer. This does not respect the default command DMA 3737 * direction and causes IOMMU page faults on some architectures 3738 * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3739 * Avoid such issue by making the REPORT ZONES buffer mapping 3740 * bi-directional. 3741 */ 3742 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3743 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3744 sg_scmd = scsi_sglist(scmd); 3745 sges_left = scsi_dma_map(scmd); 3746 } 3747 3748 if (sges_left < 0) { 3749 sdev_printk(KERN_ERR, scmd->device, 3750 "scsi_dma_map failed: request for %d bytes!\n", 3751 scsi_bufflen(scmd)); 3752 return -ENOMEM; 3753 } 3754 if (sges_left > mrioc->max_sgl_entries) { 3755 sdev_printk(KERN_ERR, scmd->device, 3756 "scsi_dma_map returned unsupported sge count %d!\n", 3757 sges_left); 3758 return -ENOMEM; 3759 } 3760 3761 sges_in_segment = (mrioc->facts.op_req_sz - 3762 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3763 3764 if (scsiio_req->sgl[0].eedp.flags == 3765 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3766 sg_local += sizeof(struct mpi3_sge_common); 3767 sges_in_segment--; 3768 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3769 } 3770 3771 if (scsiio_req->msg_flags == 3772 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3773 sges_in_segment--; 3774 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3775 } 3776 3777 if (meta_sg) 3778 sges_in_segment = 1; 3779 3780 if (sges_left <= sges_in_segment) 3781 goto fill_in_last_segment; 3782 3783 /* fill in main message segment when there is a chain following */ 3784 while (sges_in_segment > 1) { 3785 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3786 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3787 sg_scmd = sg_next(sg_scmd); 3788 sg_local += sizeof(struct mpi3_sge_common); 3789 sges_left--; 3790 sges_in_segment--; 3791 } 3792 3793 chain_idx = mpi3mr_get_chain_idx(mrioc); 3794 if (chain_idx < 0) 3795 return -1; 3796 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3797 if (meta_sg) 3798 priv->meta_chain_idx = chain_idx; 3799 else 3800 priv->chain_idx = chain_idx; 3801 3802 chain = chain_req->addr; 3803 chain_dma = chain_req->dma_addr; 3804 sges_in_segment = sges_left; 3805 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3806 3807 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3808 chain_length, chain_dma); 3809 3810 sg_local = chain; 3811 3812 fill_in_last_segment: 3813 while (sges_left > 0) { 3814 if (sges_left == 1) 3815 mpi3mr_add_sg_single(sg_local, 3816 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3817 sg_dma_address(sg_scmd)); 3818 else 3819 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3820 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3821 sg_scmd = sg_next(sg_scmd); 3822 sg_local += sizeof(struct mpi3_sge_common); 3823 sges_left--; 3824 } 3825 3826 return 0; 3827 } 3828 3829 /** 3830 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3831 * @mrioc: Adapter instance reference 3832 * @scmd: SCSI command reference 3833 * @scsiio_req: MPI3 SCSI IO request 3834 * 3835 * This function calls mpi3mr_prepare_sg_scmd for constructing 3836 * both data SGEs and protection information SGEs in the MPI 3837 * format from the SCSI Command as appropriate . 3838 * 3839 * Return: return value of mpi3mr_prepare_sg_scmd. 3840 */ 3841 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3842 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3843 { 3844 int ret; 3845 3846 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3847 if (ret) 3848 return ret; 3849 3850 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3851 /* There is a valid meta sg */ 3852 scsiio_req->flags |= 3853 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3854 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3855 } 3856 3857 return ret; 3858 } 3859 3860 /** 3861 * mpi3mr_tm_response_name - get TM response as a string 3862 * @resp_code: TM response code 3863 * 3864 * Convert known task management response code as a readable 3865 * string. 3866 * 3867 * Return: response code string. 3868 */ 3869 static const char *mpi3mr_tm_response_name(u8 resp_code) 3870 { 3871 char *desc; 3872 3873 switch (resp_code) { 3874 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3875 desc = "task management request completed"; 3876 break; 3877 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3878 desc = "invalid frame"; 3879 break; 3880 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3881 desc = "task management request not supported"; 3882 break; 3883 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3884 desc = "task management request failed"; 3885 break; 3886 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3887 desc = "task management request succeeded"; 3888 break; 3889 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3890 desc = "invalid LUN"; 3891 break; 3892 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3893 desc = "overlapped tag attempted"; 3894 break; 3895 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3896 desc = "task queued, however not sent to target"; 3897 break; 3898 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3899 desc = "task management request denied by NVMe device"; 3900 break; 3901 default: 3902 desc = "unknown"; 3903 break; 3904 } 3905 3906 return desc; 3907 } 3908 3909 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3910 { 3911 int i; 3912 int num_of_reply_queues = 3913 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3914 3915 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3916 mpi3mr_process_op_reply_q(mrioc, 3917 mrioc->intr_info[i].op_reply_q); 3918 } 3919 3920 /** 3921 * mpi3mr_issue_tm - Issue Task Management request 3922 * @mrioc: Adapter instance reference 3923 * @tm_type: Task Management type 3924 * @handle: Device handle 3925 * @lun: lun ID 3926 * @htag: Host tag of the TM request 3927 * @timeout: TM timeout value 3928 * @drv_cmd: Internal command tracker 3929 * @resp_code: Response code place holder 3930 * @scmd: SCSI command 3931 * 3932 * Issues a Task Management Request to the controller for a 3933 * specified target, lun and command and wait for its completion 3934 * and check TM response. Recover the TM if it timed out by 3935 * issuing controller reset. 3936 * 3937 * Return: 0 on success, non-zero on errors 3938 */ 3939 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3940 u16 handle, uint lun, u16 htag, ulong timeout, 3941 struct mpi3mr_drv_cmd *drv_cmd, 3942 u8 *resp_code, struct scsi_cmnd *scmd) 3943 { 3944 struct mpi3_scsi_task_mgmt_request tm_req; 3945 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3946 int retval = 0; 3947 struct mpi3mr_tgt_dev *tgtdev = NULL; 3948 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3949 struct scmd_priv *cmd_priv = NULL; 3950 struct scsi_device *sdev = NULL; 3951 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3952 3953 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3954 __func__, tm_type, handle); 3955 if (mrioc->unrecoverable) { 3956 retval = -1; 3957 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3958 __func__); 3959 goto out; 3960 } 3961 3962 memset(&tm_req, 0, sizeof(tm_req)); 3963 mutex_lock(&drv_cmd->mutex); 3964 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3965 retval = -1; 3966 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3967 mutex_unlock(&drv_cmd->mutex); 3968 goto out; 3969 } 3970 if (mrioc->reset_in_progress) { 3971 retval = -1; 3972 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3973 mutex_unlock(&drv_cmd->mutex); 3974 goto out; 3975 } 3976 if (mrioc->block_on_pci_err) { 3977 retval = -1; 3978 dprint_tm(mrioc, "sending task management failed due to\n" 3979 "pci error recovery in progress\n"); 3980 mutex_unlock(&drv_cmd->mutex); 3981 goto out; 3982 } 3983 3984 drv_cmd->state = MPI3MR_CMD_PENDING; 3985 drv_cmd->is_waiting = 1; 3986 drv_cmd->callback = NULL; 3987 tm_req.dev_handle = cpu_to_le16(handle); 3988 tm_req.task_type = tm_type; 3989 tm_req.host_tag = cpu_to_le16(htag); 3990 3991 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3992 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3993 3994 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3995 3996 if (scmd) { 3997 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 3998 cmd_priv = scsi_cmd_priv(scmd); 3999 if (!cmd_priv) 4000 goto out_unlock; 4001 4002 struct op_req_qinfo *op_req_q; 4003 4004 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; 4005 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); 4006 tm_req.task_request_queue_id = 4007 cpu_to_le16(op_req_q->qid); 4008 } 4009 sdev = scmd->device; 4010 sdev_priv_data = sdev->hostdata; 4011 scsi_tgt_priv_data = ((sdev_priv_data) ? 4012 sdev_priv_data->tgt_priv_data : NULL); 4013 } else { 4014 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 4015 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 4016 tgtdev->starget->hostdata; 4017 } 4018 4019 if (scsi_tgt_priv_data) 4020 atomic_inc(&scsi_tgt_priv_data->block_io); 4021 4022 if (tgtdev) { 4023 if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4024 timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to 4025 : tgtdev->dev_spec.pcie_inf.reset_to; 4026 else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4027 timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to 4028 : tgtdev->dev_spec.vd_inf.reset_to; 4029 } 4030 4031 init_completion(&drv_cmd->done); 4032 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 4033 if (retval) { 4034 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 4035 goto out_unlock; 4036 } 4037 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 4038 4039 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 4040 drv_cmd->is_waiting = 0; 4041 retval = -1; 4042 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 4043 dprint_tm(mrioc, 4044 "task management request timed out after %ld seconds\n", 4045 timeout); 4046 if (mrioc->logging_level & MPI3_DEBUG_TM) 4047 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 4048 mpi3mr_soft_reset_handler(mrioc, 4049 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 4050 } 4051 goto out_unlock; 4052 } 4053 4054 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 4055 dprint_tm(mrioc, "invalid task management reply message\n"); 4056 retval = -1; 4057 goto out_unlock; 4058 } 4059 4060 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 4061 4062 switch (drv_cmd->ioc_status) { 4063 case MPI3_IOCSTATUS_SUCCESS: 4064 *resp_code = le32_to_cpu(tm_reply->response_data) & 4065 MPI3MR_RI_MASK_RESPCODE; 4066 break; 4067 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 4068 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 4069 break; 4070 default: 4071 dprint_tm(mrioc, 4072 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 4073 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 4074 retval = -1; 4075 goto out_unlock; 4076 } 4077 4078 switch (*resp_code) { 4079 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 4080 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 4081 break; 4082 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 4083 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 4084 retval = -1; 4085 break; 4086 default: 4087 retval = -1; 4088 break; 4089 } 4090 4091 dprint_tm(mrioc, 4092 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 4093 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 4094 le32_to_cpu(tm_reply->termination_count), 4095 mpi3mr_tm_response_name(*resp_code), *resp_code); 4096 4097 if (!retval) { 4098 mpi3mr_ioc_disable_intr(mrioc); 4099 mpi3mr_poll_pend_io_completions(mrioc); 4100 mpi3mr_ioc_enable_intr(mrioc); 4101 mpi3mr_poll_pend_io_completions(mrioc); 4102 mpi3mr_process_admin_reply_q(mrioc); 4103 } 4104 switch (tm_type) { 4105 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 4106 if (!scsi_tgt_priv_data) 4107 break; 4108 scsi_tgt_priv_data->pend_count = 0; 4109 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 4110 mpi3mr_count_tgt_pending, 4111 (void *)scsi_tgt_priv_data->starget); 4112 break; 4113 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 4114 if (!sdev_priv_data) 4115 break; 4116 sdev_priv_data->pend_count = 0; 4117 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 4118 mpi3mr_count_dev_pending, (void *)sdev); 4119 break; 4120 default: 4121 break; 4122 } 4123 mpi3mr_global_trigger(mrioc, 4124 MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); 4125 4126 out_unlock: 4127 drv_cmd->state = MPI3MR_CMD_NOTUSED; 4128 mutex_unlock(&drv_cmd->mutex); 4129 if (scsi_tgt_priv_data) 4130 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 4131 if (tgtdev) 4132 mpi3mr_tgtdev_put(tgtdev); 4133 out: 4134 return retval; 4135 } 4136 4137 /** 4138 * mpi3mr_bios_param - BIOS param callback 4139 * @sdev: SCSI device reference 4140 * @unused: gendisk reference 4141 * @capacity: Capacity in logical sectors 4142 * @params: Parameter array 4143 * 4144 * Just the parameters with heads/secots/cylinders. 4145 * 4146 * Return: 0 always 4147 */ 4148 static int mpi3mr_bios_param(struct scsi_device *sdev, 4149 struct gendisk *unused, sector_t capacity, int params[]) 4150 { 4151 int heads; 4152 int sectors; 4153 sector_t cylinders; 4154 ulong dummy; 4155 4156 heads = 64; 4157 sectors = 32; 4158 4159 dummy = heads * sectors; 4160 cylinders = capacity; 4161 sector_div(cylinders, dummy); 4162 4163 if ((ulong)capacity >= 0x200000) { 4164 heads = 255; 4165 sectors = 63; 4166 dummy = heads * sectors; 4167 cylinders = capacity; 4168 sector_div(cylinders, dummy); 4169 } 4170 4171 params[0] = heads; 4172 params[1] = sectors; 4173 params[2] = cylinders; 4174 return 0; 4175 } 4176 4177 /** 4178 * mpi3mr_map_queues - Map queues callback handler 4179 * @shost: SCSI host reference 4180 * 4181 * Maps default and poll queues. 4182 * 4183 * Return: return zero. 4184 */ 4185 static void mpi3mr_map_queues(struct Scsi_Host *shost) 4186 { 4187 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4188 int i, qoff, offset; 4189 struct blk_mq_queue_map *map = NULL; 4190 4191 offset = mrioc->op_reply_q_offset; 4192 4193 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 4194 map = &shost->tag_set.map[i]; 4195 4196 map->nr_queues = 0; 4197 4198 if (i == HCTX_TYPE_DEFAULT) 4199 map->nr_queues = mrioc->default_qcount; 4200 else if (i == HCTX_TYPE_POLL) 4201 map->nr_queues = mrioc->active_poll_qcount; 4202 4203 if (!map->nr_queues) { 4204 BUG_ON(i == HCTX_TYPE_DEFAULT); 4205 continue; 4206 } 4207 4208 /* 4209 * The poll queue(s) doesn't have an IRQ (and hence IRQ 4210 * affinity), so use the regular blk-mq cpu mapping 4211 */ 4212 map->queue_offset = qoff; 4213 if (i != HCTX_TYPE_POLL) 4214 blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); 4215 else 4216 blk_mq_map_queues(map); 4217 4218 qoff += map->nr_queues; 4219 offset += map->nr_queues; 4220 } 4221 } 4222 4223 /** 4224 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 4225 * @mrioc: Adapter instance reference 4226 * 4227 * Calculate the pending I/Os for the controller and return. 4228 * 4229 * Return: Number of pending I/Os 4230 */ 4231 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 4232 { 4233 u16 i; 4234 uint pend_ios = 0; 4235 4236 for (i = 0; i < mrioc->num_op_reply_q; i++) 4237 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 4238 return pend_ios; 4239 } 4240 4241 /** 4242 * mpi3mr_print_pending_host_io - print pending I/Os 4243 * @mrioc: Adapter instance reference 4244 * 4245 * Print number of pending I/Os and each I/O details prior to 4246 * reset for debug purpose. 4247 * 4248 * Return: Nothing 4249 */ 4250 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 4251 { 4252 struct Scsi_Host *shost = mrioc->shost; 4253 4254 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 4255 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 4256 blk_mq_tagset_busy_iter(&shost->tag_set, 4257 mpi3mr_print_scmd, (void *)mrioc); 4258 } 4259 4260 /** 4261 * mpi3mr_wait_for_host_io - block for I/Os to complete 4262 * @mrioc: Adapter instance reference 4263 * @timeout: time out in seconds 4264 * Waits for pending I/Os for the given adapter to complete or 4265 * to hit the timeout. 4266 * 4267 * Return: Nothing 4268 */ 4269 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 4270 { 4271 enum mpi3mr_iocstate iocstate; 4272 int i = 0; 4273 4274 iocstate = mpi3mr_get_iocstate(mrioc); 4275 if (iocstate != MRIOC_STATE_READY) 4276 return; 4277 4278 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4279 return; 4280 ioc_info(mrioc, 4281 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 4282 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 4283 4284 for (i = 0; i < timeout; i++) { 4285 if (!mpi3mr_get_fw_pending_ios(mrioc)) 4286 break; 4287 iocstate = mpi3mr_get_iocstate(mrioc); 4288 if (iocstate != MRIOC_STATE_READY) 4289 break; 4290 msleep(1000); 4291 } 4292 4293 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 4294 mpi3mr_get_fw_pending_ios(mrioc)); 4295 } 4296 4297 /** 4298 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 4299 * @mrioc: Adapter instance reference 4300 * @scmd: SCSI command reference 4301 * @scsiio_req: MPI3 SCSI IO request 4302 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 4303 * @wslen: write same max length 4304 * 4305 * Gets values of unmap, ndob and number of blocks from write 4306 * same scsi io and based on these values it sets divert IO flag 4307 * and reason for diverting IO to firmware. 4308 * 4309 * Return: Nothing 4310 */ 4311 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 4312 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 4313 u32 *scsiio_flags, u16 wslen) 4314 { 4315 u8 unmap = 0, ndob = 0; 4316 u8 opcode = scmd->cmnd[0]; 4317 u32 num_blocks = 0; 4318 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4319 4320 if (opcode == WRITE_SAME_16) { 4321 unmap = scmd->cmnd[1] & 0x08; 4322 ndob = scmd->cmnd[1] & 0x01; 4323 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4324 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4325 unmap = scmd->cmnd[10] & 0x08; 4326 ndob = scmd->cmnd[10] & 0x01; 4327 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4328 } else 4329 return; 4330 4331 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4332 scsiio_req->msg_flags |= 4333 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4334 *scsiio_flags |= 4335 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4336 } 4337 } 4338 4339 /** 4340 * mpi3mr_eh_host_reset - Host reset error handling callback 4341 * @scmd: SCSI command reference 4342 * 4343 * Issue controller reset 4344 * 4345 * Return: SUCCESS of successful reset else FAILED 4346 */ 4347 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4348 { 4349 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4350 int retval = FAILED, ret; 4351 4352 ret = mpi3mr_soft_reset_handler(mrioc, 4353 MPI3MR_RESET_FROM_EH_HOS, 1); 4354 if (ret) 4355 goto out; 4356 4357 retval = SUCCESS; 4358 out: 4359 sdev_printk(KERN_INFO, scmd->device, 4360 "Host reset is %s for scmd(%p)\n", 4361 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4362 4363 return retval; 4364 } 4365 4366 /** 4367 * mpi3mr_eh_bus_reset - Bus reset error handling callback 4368 * @scmd: SCSI command reference 4369 * 4370 * Checks whether pending I/Os are present for the RAID volume; 4371 * if not there's no need to reset the adapter. 4372 * 4373 * Return: SUCCESS of successful reset else FAILED 4374 */ 4375 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) 4376 { 4377 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4378 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4379 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4380 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4381 int retval = FAILED; 4382 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 4383 4384 sdev_priv_data = scmd->device->hostdata; 4385 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4386 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4387 dev_type = stgt_priv_data->dev_type; 4388 } 4389 4390 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4391 mpi3mr_wait_for_host_io(mrioc, 4392 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4393 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4394 while (mrioc->reset_in_progress || 4395 mrioc->prepare_for_reset || 4396 mrioc->block_on_pci_err) { 4397 ssleep(1); 4398 if (!timeout--) { 4399 retval = FAILED; 4400 goto out; 4401 } 4402 } 4403 retval = SUCCESS; 4404 goto out; 4405 } 4406 } 4407 if (retval == FAILED) 4408 mpi3mr_print_pending_host_io(mrioc); 4409 4410 out: 4411 sdev_printk(KERN_INFO, scmd->device, 4412 "Bus reset is %s for scmd(%p)\n", 4413 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4414 return retval; 4415 } 4416 4417 /** 4418 * mpi3mr_eh_target_reset - Target reset error handling callback 4419 * @scmd: SCSI command reference 4420 * 4421 * Issue Target reset Task Management and verify the scmd is 4422 * terminated successfully and return status accordingly. 4423 * 4424 * Return: SUCCESS of successful termination of the scmd else 4425 * FAILED 4426 */ 4427 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4428 { 4429 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4430 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4431 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4432 u16 dev_handle; 4433 u8 resp_code = 0; 4434 int retval = FAILED, ret = 0; 4435 4436 sdev_printk(KERN_INFO, scmd->device, 4437 "Attempting Target Reset! scmd(%p)\n", scmd); 4438 scsi_print_command(scmd); 4439 4440 sdev_priv_data = scmd->device->hostdata; 4441 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4442 sdev_printk(KERN_INFO, scmd->device, 4443 "SCSI device is not available\n"); 4444 retval = SUCCESS; 4445 goto out; 4446 } 4447 4448 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4449 dev_handle = stgt_priv_data->dev_handle; 4450 if (stgt_priv_data->dev_removed) { 4451 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4452 sdev_printk(KERN_INFO, scmd->device, 4453 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4454 mrioc->name, dev_handle); 4455 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4456 retval = SUCCESS; 4457 else 4458 retval = FAILED; 4459 goto out; 4460 } 4461 sdev_printk(KERN_INFO, scmd->device, 4462 "Target Reset is issued to handle(0x%04x)\n", 4463 dev_handle); 4464 4465 ret = mpi3mr_issue_tm(mrioc, 4466 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4467 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4468 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4469 4470 if (ret) 4471 goto out; 4472 4473 if (stgt_priv_data->pend_count) { 4474 sdev_printk(KERN_INFO, scmd->device, 4475 "%s: target has %d pending commands, target reset is failed\n", 4476 mrioc->name, stgt_priv_data->pend_count); 4477 goto out; 4478 } 4479 4480 retval = SUCCESS; 4481 out: 4482 sdev_printk(KERN_INFO, scmd->device, 4483 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4484 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4485 4486 return retval; 4487 } 4488 4489 /** 4490 * mpi3mr_eh_dev_reset- Device reset error handling callback 4491 * @scmd: SCSI command reference 4492 * 4493 * Issue lun reset Task Management and verify the scmd is 4494 * terminated successfully and return status accordingly. 4495 * 4496 * Return: SUCCESS of successful termination of the scmd else 4497 * FAILED 4498 */ 4499 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4500 { 4501 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4502 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4503 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4504 u16 dev_handle; 4505 u8 resp_code = 0; 4506 int retval = FAILED, ret = 0; 4507 4508 sdev_printk(KERN_INFO, scmd->device, 4509 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4510 scsi_print_command(scmd); 4511 4512 sdev_priv_data = scmd->device->hostdata; 4513 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4514 sdev_printk(KERN_INFO, scmd->device, 4515 "SCSI device is not available\n"); 4516 retval = SUCCESS; 4517 goto out; 4518 } 4519 4520 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4521 dev_handle = stgt_priv_data->dev_handle; 4522 if (stgt_priv_data->dev_removed) { 4523 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4524 sdev_printk(KERN_INFO, scmd->device, 4525 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4526 mrioc->name, dev_handle); 4527 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4528 retval = SUCCESS; 4529 else 4530 retval = FAILED; 4531 goto out; 4532 } 4533 sdev_printk(KERN_INFO, scmd->device, 4534 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4535 4536 ret = mpi3mr_issue_tm(mrioc, 4537 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4538 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4539 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4540 4541 if (ret) 4542 goto out; 4543 4544 if (sdev_priv_data->pend_count) { 4545 sdev_printk(KERN_INFO, scmd->device, 4546 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4547 mrioc->name, sdev_priv_data->pend_count); 4548 goto out; 4549 } 4550 retval = SUCCESS; 4551 out: 4552 sdev_printk(KERN_INFO, scmd->device, 4553 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4554 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4555 4556 return retval; 4557 } 4558 4559 /** 4560 * mpi3mr_eh_abort - Callback function for abort error handling 4561 * @scmd: SCSI command reference 4562 * 4563 * Issues Abort Task Management if the command is in LLD scope 4564 * and verifies if it is aborted successfully, and return status 4565 * accordingly. 4566 * 4567 * Return: SUCCESS if the abort was successful, otherwise FAILED 4568 */ 4569 static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) 4570 { 4571 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4572 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4573 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4574 struct scmd_priv *cmd_priv; 4575 u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; 4576 u8 resp_code = 0; 4577 int retval = FAILED, ret = 0; 4578 struct request *rq = scsi_cmd_to_rq(scmd); 4579 unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); 4580 unsigned long scmd_age_sec = scmd_age_ms / HZ; 4581 4582 sdev_printk(KERN_INFO, scmd->device, 4583 "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); 4584 4585 sdev_printk(KERN_INFO, scmd->device, 4586 "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", 4587 mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, 4588 scmd->retries, scmd->allowed); 4589 4590 scsi_print_command(scmd); 4591 4592 sdev_priv_data = scmd->device->hostdata; 4593 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4594 sdev_printk(KERN_INFO, scmd->device, 4595 "%s: Device not available, Skip issuing abort task\n", 4596 mrioc->name); 4597 retval = SUCCESS; 4598 goto out; 4599 } 4600 4601 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4602 dev_handle = stgt_priv_data->dev_handle; 4603 4604 cmd_priv = scsi_cmd_priv(scmd); 4605 if (!cmd_priv->in_lld_scope || 4606 cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { 4607 sdev_printk(KERN_INFO, scmd->device, 4608 "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", 4609 mrioc->name, scmd); 4610 retval = SUCCESS; 4611 goto out; 4612 } 4613 4614 if (stgt_priv_data->dev_removed) { 4615 sdev_printk(KERN_INFO, scmd->device, 4616 "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", 4617 mrioc->name, dev_handle); 4618 retval = FAILED; 4619 goto out; 4620 } 4621 4622 ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4623 dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4624 timeout, &mrioc->host_tm_cmds, &resp_code, scmd); 4625 4626 if (ret) 4627 goto out; 4628 4629 if (cmd_priv->in_lld_scope) { 4630 sdev_printk(KERN_INFO, scmd->device, 4631 "%s: Abort task failed. scmd (0x%p) was not terminated\n", 4632 mrioc->name, scmd); 4633 goto out; 4634 } 4635 4636 retval = SUCCESS; 4637 out: 4638 sdev_printk(KERN_INFO, scmd->device, 4639 "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, 4640 ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); 4641 4642 return retval; 4643 } 4644 4645 /** 4646 * mpi3mr_scan_start - Scan start callback handler 4647 * @shost: SCSI host reference 4648 * 4649 * Issue port enable request asynchronously. 4650 * 4651 * Return: Nothing 4652 */ 4653 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4654 { 4655 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4656 4657 mrioc->scan_started = 1; 4658 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4659 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4660 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4661 mrioc->scan_started = 0; 4662 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4663 } 4664 } 4665 4666 /** 4667 * mpi3mr_scan_finished - Scan finished callback handler 4668 * @shost: SCSI host reference 4669 * @time: Jiffies from the scan start 4670 * 4671 * Checks whether the port enable is completed or timedout or 4672 * failed and set the scan status accordingly after taking any 4673 * recovery if required. 4674 * 4675 * Return: 1 on scan finished or timed out, 0 for in progress 4676 */ 4677 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4678 unsigned long time) 4679 { 4680 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4681 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4682 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4683 4684 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4685 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4686 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4687 mpi3mr_print_fault_info(mrioc); 4688 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4689 mrioc->scan_started = 0; 4690 mrioc->init_cmds.is_waiting = 0; 4691 mrioc->init_cmds.callback = NULL; 4692 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4693 } 4694 4695 if (time >= (pe_timeout * HZ)) { 4696 ioc_err(mrioc, "port enable failed due to time out\n"); 4697 mpi3mr_check_rh_fault_ioc(mrioc, 4698 MPI3MR_RESET_FROM_PE_TIMEOUT); 4699 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4700 mrioc->scan_started = 0; 4701 mrioc->init_cmds.is_waiting = 0; 4702 mrioc->init_cmds.callback = NULL; 4703 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4704 } 4705 4706 if (mrioc->scan_started) 4707 return 0; 4708 4709 if (mrioc->scan_failed) { 4710 ioc_err(mrioc, 4711 "port enable failed with status=0x%04x\n", 4712 mrioc->scan_failed); 4713 } else 4714 ioc_info(mrioc, "port enable is successfully completed\n"); 4715 4716 mpi3mr_start_watchdog(mrioc); 4717 mrioc->is_driver_loading = 0; 4718 mrioc->stop_bsgs = 0; 4719 return 1; 4720 } 4721 4722 /** 4723 * mpi3mr_sdev_destroy - Slave destroy callback handler 4724 * @sdev: SCSI device reference 4725 * 4726 * Cleanup and free per device(lun) private data. 4727 * 4728 * Return: Nothing. 4729 */ 4730 static void mpi3mr_sdev_destroy(struct scsi_device *sdev) 4731 { 4732 struct Scsi_Host *shost; 4733 struct mpi3mr_ioc *mrioc; 4734 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4735 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4736 unsigned long flags; 4737 struct scsi_target *starget; 4738 struct sas_rphy *rphy = NULL; 4739 4740 if (!sdev->hostdata) 4741 return; 4742 4743 starget = scsi_target(sdev); 4744 shost = dev_to_shost(&starget->dev); 4745 mrioc = shost_priv(shost); 4746 scsi_tgt_priv_data = starget->hostdata; 4747 4748 scsi_tgt_priv_data->num_luns--; 4749 4750 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4751 if (starget->channel == mrioc->scsi_device_channel) 4752 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4753 else if (mrioc->sas_transport_enabled && !starget->channel) { 4754 rphy = dev_to_rphy(starget->dev.parent); 4755 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4756 rphy->identify.sas_address, rphy); 4757 } 4758 4759 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4760 tgt_dev->starget = NULL; 4761 if (tgt_dev) 4762 mpi3mr_tgtdev_put(tgt_dev); 4763 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4764 4765 kfree(sdev->hostdata); 4766 sdev->hostdata = NULL; 4767 } 4768 4769 /** 4770 * mpi3mr_target_destroy - Target destroy callback handler 4771 * @starget: SCSI target reference 4772 * 4773 * Cleanup and free per target private data. 4774 * 4775 * Return: Nothing. 4776 */ 4777 static void mpi3mr_target_destroy(struct scsi_target *starget) 4778 { 4779 struct Scsi_Host *shost; 4780 struct mpi3mr_ioc *mrioc; 4781 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4782 struct mpi3mr_tgt_dev *tgt_dev; 4783 unsigned long flags; 4784 4785 if (!starget->hostdata) 4786 return; 4787 4788 shost = dev_to_shost(&starget->dev); 4789 mrioc = shost_priv(shost); 4790 scsi_tgt_priv_data = starget->hostdata; 4791 4792 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4793 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4794 if (tgt_dev && (tgt_dev->starget == starget) && 4795 (tgt_dev->perst_id == starget->id)) 4796 tgt_dev->starget = NULL; 4797 if (tgt_dev) { 4798 scsi_tgt_priv_data->tgt_dev = NULL; 4799 scsi_tgt_priv_data->perst_id = 0; 4800 mpi3mr_tgtdev_put(tgt_dev); 4801 mpi3mr_tgtdev_put(tgt_dev); 4802 } 4803 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4804 4805 kfree(starget->hostdata); 4806 starget->hostdata = NULL; 4807 } 4808 4809 /** 4810 * mpi3mr_sdev_configure - Slave configure callback handler 4811 * @sdev: SCSI device reference 4812 * @lim: queue limits 4813 * 4814 * Configure queue depth, max hardware sectors and virt boundary 4815 * as required 4816 * 4817 * Return: 0 always. 4818 */ 4819 static int mpi3mr_sdev_configure(struct scsi_device *sdev, 4820 struct queue_limits *lim) 4821 { 4822 struct scsi_target *starget; 4823 struct Scsi_Host *shost; 4824 struct mpi3mr_ioc *mrioc; 4825 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4826 unsigned long flags; 4827 int retval = 0; 4828 struct sas_rphy *rphy = NULL; 4829 4830 starget = scsi_target(sdev); 4831 shost = dev_to_shost(&starget->dev); 4832 mrioc = shost_priv(shost); 4833 4834 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4835 if (starget->channel == mrioc->scsi_device_channel) 4836 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4837 else if (mrioc->sas_transport_enabled && !starget->channel) { 4838 rphy = dev_to_rphy(starget->dev.parent); 4839 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4840 rphy->identify.sas_address, rphy); 4841 } 4842 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4843 if (!tgt_dev) 4844 return -ENXIO; 4845 4846 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4847 4848 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4849 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4850 4851 mpi3mr_configure_tgt_dev(tgt_dev, lim); 4852 mpi3mr_tgtdev_put(tgt_dev); 4853 return retval; 4854 } 4855 4856 /** 4857 * mpi3mr_sdev_init -Slave alloc callback handler 4858 * @sdev: SCSI device reference 4859 * 4860 * Allocate per device(lun) private data and initialize it. 4861 * 4862 * Return: 0 on success -ENOMEM on memory allocation failure. 4863 */ 4864 static int mpi3mr_sdev_init(struct scsi_device *sdev) 4865 { 4866 struct Scsi_Host *shost; 4867 struct mpi3mr_ioc *mrioc; 4868 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4869 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4870 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4871 unsigned long flags; 4872 struct scsi_target *starget; 4873 int retval = 0; 4874 struct sas_rphy *rphy = NULL; 4875 4876 starget = scsi_target(sdev); 4877 shost = dev_to_shost(&starget->dev); 4878 mrioc = shost_priv(shost); 4879 scsi_tgt_priv_data = starget->hostdata; 4880 4881 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4882 4883 if (starget->channel == mrioc->scsi_device_channel) 4884 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4885 else if (mrioc->sas_transport_enabled && !starget->channel) { 4886 rphy = dev_to_rphy(starget->dev.parent); 4887 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4888 rphy->identify.sas_address, rphy); 4889 } 4890 4891 if (tgt_dev) { 4892 if (tgt_dev->starget == NULL) 4893 tgt_dev->starget = starget; 4894 mpi3mr_tgtdev_put(tgt_dev); 4895 retval = 0; 4896 } else { 4897 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4898 return -ENXIO; 4899 } 4900 4901 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4902 4903 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4904 if (!scsi_dev_priv_data) 4905 return -ENOMEM; 4906 4907 scsi_dev_priv_data->lun_id = sdev->lun; 4908 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4909 sdev->hostdata = scsi_dev_priv_data; 4910 4911 scsi_tgt_priv_data->num_luns++; 4912 4913 return retval; 4914 } 4915 4916 /** 4917 * mpi3mr_target_alloc - Target alloc callback handler 4918 * @starget: SCSI target reference 4919 * 4920 * Allocate per target private data and initialize it. 4921 * 4922 * Return: 0 on success -ENOMEM on memory allocation failure. 4923 */ 4924 static int mpi3mr_target_alloc(struct scsi_target *starget) 4925 { 4926 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4927 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4928 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4929 struct mpi3mr_tgt_dev *tgt_dev; 4930 unsigned long flags; 4931 int retval = 0; 4932 struct sas_rphy *rphy = NULL; 4933 4934 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4935 if (!scsi_tgt_priv_data) 4936 return -ENOMEM; 4937 4938 starget->hostdata = scsi_tgt_priv_data; 4939 4940 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4941 if (starget->channel == mrioc->scsi_device_channel) { 4942 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4943 if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) { 4944 scsi_tgt_priv_data->starget = starget; 4945 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4946 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4947 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4948 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4949 tgt_dev->starget = starget; 4950 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4951 retval = 0; 4952 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4953 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4954 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4955 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4956 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4957 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4958 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4959 scsi_tgt_priv_data->dev_nvme_dif = 1; 4960 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4961 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4962 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4963 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4964 } else 4965 retval = -ENXIO; 4966 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4967 rphy = dev_to_rphy(starget->dev.parent); 4968 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4969 rphy->identify.sas_address, rphy); 4970 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4971 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4972 scsi_tgt_priv_data->starget = starget; 4973 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4974 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4975 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4976 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4977 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4978 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4979 tgt_dev->starget = starget; 4980 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4981 retval = 0; 4982 } else 4983 retval = -ENXIO; 4984 } 4985 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4986 4987 return retval; 4988 } 4989 4990 /** 4991 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4992 * @mrioc: Adapter instance reference 4993 * @scmd: SCSI Command reference 4994 * 4995 * The controller hardware cannot handle certain unmap commands 4996 * for NVMe drives, this routine checks those and return true 4997 * and completes the SCSI command with proper status and sense 4998 * data. 4999 * 5000 * Return: TRUE for not allowed unmap, FALSE otherwise. 5001 */ 5002 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 5003 struct scsi_cmnd *scmd) 5004 { 5005 unsigned char *buf; 5006 u16 param_len, desc_len, trunc_param_len; 5007 5008 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 5009 5010 if (mrioc->pdev->revision) { 5011 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 5012 trunc_param_len -= (param_len - 8) & 0xF; 5013 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 5014 dprint_scsi_err(mrioc, 5015 "truncating param_len from (%d) to (%d)\n", 5016 param_len, trunc_param_len); 5017 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 5018 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 5019 } 5020 return false; 5021 } 5022 5023 if (!param_len) { 5024 ioc_warn(mrioc, 5025 "%s: cdb received with zero parameter length\n", 5026 __func__); 5027 scsi_print_command(scmd); 5028 scmd->result = DID_OK << 16; 5029 scsi_done(scmd); 5030 return true; 5031 } 5032 5033 if (param_len < 24) { 5034 ioc_warn(mrioc, 5035 "%s: cdb received with invalid param_len: %d\n", 5036 __func__, param_len); 5037 scsi_print_command(scmd); 5038 scmd->result = SAM_STAT_CHECK_CONDITION; 5039 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 5040 0x1A, 0); 5041 scsi_done(scmd); 5042 return true; 5043 } 5044 if (param_len != scsi_bufflen(scmd)) { 5045 ioc_warn(mrioc, 5046 "%s: cdb received with param_len: %d bufflen: %d\n", 5047 __func__, param_len, scsi_bufflen(scmd)); 5048 scsi_print_command(scmd); 5049 scmd->result = SAM_STAT_CHECK_CONDITION; 5050 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 5051 0x1A, 0); 5052 scsi_done(scmd); 5053 return true; 5054 } 5055 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 5056 if (!buf) { 5057 scsi_print_command(scmd); 5058 scmd->result = SAM_STAT_CHECK_CONDITION; 5059 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 5060 0x55, 0x03); 5061 scsi_done(scmd); 5062 return true; 5063 } 5064 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 5065 desc_len = get_unaligned_be16(&buf[2]); 5066 5067 if (desc_len < 16) { 5068 ioc_warn(mrioc, 5069 "%s: Invalid descriptor length in param list: %d\n", 5070 __func__, desc_len); 5071 scsi_print_command(scmd); 5072 scmd->result = SAM_STAT_CHECK_CONDITION; 5073 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 5074 0x26, 0); 5075 scsi_done(scmd); 5076 kfree(buf); 5077 return true; 5078 } 5079 5080 if (param_len > (desc_len + 8)) { 5081 trunc_param_len = desc_len + 8; 5082 scsi_print_command(scmd); 5083 dprint_scsi_err(mrioc, 5084 "truncating param_len(%d) to desc_len+8(%d)\n", 5085 param_len, trunc_param_len); 5086 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 5087 scsi_print_command(scmd); 5088 } 5089 5090 kfree(buf); 5091 return false; 5092 } 5093 5094 /** 5095 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 5096 * @scmd: SCSI Command reference 5097 * 5098 * Checks whether a cdb is allowed during shutdown or not. 5099 * 5100 * Return: TRUE for allowed commands, FALSE otherwise. 5101 */ 5102 5103 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 5104 { 5105 switch (scmd->cmnd[0]) { 5106 case SYNCHRONIZE_CACHE: 5107 case START_STOP: 5108 return true; 5109 default: 5110 return false; 5111 } 5112 } 5113 5114 /** 5115 * mpi3mr_qcmd - I/O request despatcher 5116 * @shost: SCSI Host reference 5117 * @scmd: SCSI Command reference 5118 * 5119 * Issues the SCSI Command as an MPI3 request. 5120 * 5121 * Return: 0 on successful queueing of the request or if the 5122 * request is completed with failure. 5123 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 5124 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 5125 */ 5126 static enum scsi_qc_status mpi3mr_qcmd(struct Scsi_Host *shost, 5127 struct scsi_cmnd *scmd) 5128 { 5129 struct mpi3mr_ioc *mrioc = shost_priv(shost); 5130 struct mpi3mr_stgt_priv_data *stgt_priv_data; 5131 struct mpi3mr_sdev_priv_data *sdev_priv_data; 5132 struct scmd_priv *scmd_priv_data = NULL; 5133 struct mpi3_scsi_io_request *scsiio_req = NULL; 5134 struct op_req_qinfo *op_req_q = NULL; 5135 int retval = 0; 5136 u16 dev_handle; 5137 u16 host_tag; 5138 u32 scsiio_flags = 0, data_len_blks = 0; 5139 struct request *rq = scsi_cmd_to_rq(scmd); 5140 int iprio_class; 5141 u8 is_pcie_dev = 0; 5142 u32 tracked_io_sz = 0; 5143 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 5144 struct mpi3mr_throttle_group_info *tg = NULL; 5145 5146 if (mrioc->unrecoverable) { 5147 scmd->result = DID_ERROR << 16; 5148 scsi_done(scmd); 5149 goto out; 5150 } 5151 5152 sdev_priv_data = scmd->device->hostdata; 5153 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 5154 scmd->result = DID_NO_CONNECT << 16; 5155 scsi_done(scmd); 5156 goto out; 5157 } 5158 5159 if (mrioc->stop_drv_processing && 5160 !(mpi3mr_allow_scmd_to_fw(scmd))) { 5161 scmd->result = DID_NO_CONNECT << 16; 5162 scsi_done(scmd); 5163 goto out; 5164 } 5165 5166 stgt_priv_data = sdev_priv_data->tgt_priv_data; 5167 dev_handle = stgt_priv_data->dev_handle; 5168 5169 /* Avoid error handling escalation when device is removed or blocked */ 5170 5171 if (scmd->device->host->shost_state == SHOST_RECOVERY && 5172 scmd->cmnd[0] == TEST_UNIT_READY && 5173 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 5174 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 5175 scsi_done(scmd); 5176 goto out; 5177 } 5178 5179 if (mrioc->reset_in_progress || mrioc->prepare_for_reset 5180 || mrioc->block_on_pci_err) { 5181 retval = SCSI_MLQUEUE_HOST_BUSY; 5182 goto out; 5183 } 5184 5185 if (atomic_read(&stgt_priv_data->block_io)) { 5186 if (mrioc->stop_drv_processing) { 5187 scmd->result = DID_NO_CONNECT << 16; 5188 scsi_done(scmd); 5189 goto out; 5190 } 5191 retval = SCSI_MLQUEUE_DEVICE_BUSY; 5192 goto out; 5193 } 5194 5195 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 5196 scmd->result = DID_NO_CONNECT << 16; 5197 scsi_done(scmd); 5198 goto out; 5199 } 5200 if (stgt_priv_data->dev_removed) { 5201 scmd->result = DID_NO_CONNECT << 16; 5202 scsi_done(scmd); 5203 goto out; 5204 } 5205 5206 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 5207 is_pcie_dev = 1; 5208 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 5209 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5210 mpi3mr_check_return_unmap(mrioc, scmd)) 5211 goto out; 5212 5213 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 5214 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 5215 scmd->result = DID_ERROR << 16; 5216 scsi_done(scmd); 5217 goto out; 5218 } 5219 5220 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5221 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 5222 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5223 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 5224 else 5225 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 5226 5227 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 5228 5229 if (sdev_priv_data->ncq_prio_enable) { 5230 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5231 if (iprio_class == IOPRIO_CLASS_RT) 5232 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 5233 } 5234 5235 if (scmd->cmd_len > 16) 5236 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 5237 5238 scmd_priv_data = scsi_cmd_priv(scmd); 5239 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 5240 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 5241 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 5242 scsiio_req->host_tag = cpu_to_le16(host_tag); 5243 5244 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 5245 5246 if (stgt_priv_data->wslen) 5247 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 5248 stgt_priv_data->wslen); 5249 5250 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 5251 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 5252 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 5253 scsiio_req->flags = cpu_to_le32(scsiio_flags); 5254 int_to_scsilun(sdev_priv_data->lun_id, 5255 (struct scsi_lun *)scsiio_req->lun); 5256 5257 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 5258 mpi3mr_clear_scmd_priv(mrioc, scmd); 5259 retval = SCSI_MLQUEUE_HOST_BUSY; 5260 goto out; 5261 } 5262 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 5263 data_len_blks = scsi_bufflen(scmd) >> 9; 5264 if ((data_len_blks >= mrioc->io_throttle_data_length) && 5265 stgt_priv_data->io_throttle_enabled) { 5266 tracked_io_sz = data_len_blks; 5267 tg = stgt_priv_data->throttle_group; 5268 if (tg) { 5269 ioc_pend_data_len = atomic_add_return(data_len_blks, 5270 &mrioc->pend_large_data_sz); 5271 tg_pend_data_len = atomic_add_return(data_len_blks, 5272 &tg->pend_large_data_sz); 5273 if (!tg->io_divert && ((ioc_pend_data_len >= 5274 mrioc->io_throttle_high) || 5275 (tg_pend_data_len >= tg->high))) { 5276 tg->io_divert = 1; 5277 tg->need_qd_reduction = 1; 5278 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 5279 tg, 1); 5280 mpi3mr_queue_qd_reduction_event(mrioc, tg); 5281 } 5282 } else { 5283 ioc_pend_data_len = atomic_add_return(data_len_blks, 5284 &mrioc->pend_large_data_sz); 5285 if (ioc_pend_data_len >= mrioc->io_throttle_high) 5286 stgt_priv_data->io_divert = 1; 5287 } 5288 } 5289 5290 if (stgt_priv_data->io_divert) { 5291 scsiio_req->msg_flags |= 5292 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 5293 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 5294 } 5295 scsiio_req->flags |= cpu_to_le32(scsiio_flags); 5296 5297 if (mpi3mr_op_request_post(mrioc, op_req_q, 5298 scmd_priv_data->mpi3mr_scsiio_req)) { 5299 mpi3mr_clear_scmd_priv(mrioc, scmd); 5300 retval = SCSI_MLQUEUE_HOST_BUSY; 5301 if (tracked_io_sz) { 5302 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 5303 if (tg) 5304 atomic_sub(tracked_io_sz, 5305 &tg->pend_large_data_sz); 5306 } 5307 goto out; 5308 } 5309 5310 out: 5311 return retval; 5312 } 5313 5314 static const struct scsi_host_template mpi3mr_driver_template = { 5315 .module = THIS_MODULE, 5316 .name = "MPI3 Storage Controller", 5317 .proc_name = MPI3MR_DRIVER_NAME, 5318 .queuecommand = mpi3mr_qcmd, 5319 .target_alloc = mpi3mr_target_alloc, 5320 .sdev_init = mpi3mr_sdev_init, 5321 .sdev_configure = mpi3mr_sdev_configure, 5322 .target_destroy = mpi3mr_target_destroy, 5323 .sdev_destroy = mpi3mr_sdev_destroy, 5324 .scan_finished = mpi3mr_scan_finished, 5325 .scan_start = mpi3mr_scan_start, 5326 .change_queue_depth = mpi3mr_change_queue_depth, 5327 .eh_abort_handler = mpi3mr_eh_abort, 5328 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 5329 .eh_target_reset_handler = mpi3mr_eh_target_reset, 5330 .eh_bus_reset_handler = mpi3mr_eh_bus_reset, 5331 .eh_host_reset_handler = mpi3mr_eh_host_reset, 5332 .bios_param = mpi3mr_bios_param, 5333 .map_queues = mpi3mr_map_queues, 5334 .mq_poll = mpi3mr_blk_mq_poll, 5335 .no_write_same = 1, 5336 .can_queue = 1, 5337 .this_id = -1, 5338 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 5339 /* max xfer supported is 1M (2K in 512 byte sized sectors) 5340 */ 5341 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 5342 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 5343 .max_segment_size = 0xffffffff, 5344 .track_queue_depth = 1, 5345 .cmd_size = sizeof(struct scmd_priv), 5346 .shost_groups = mpi3mr_host_groups, 5347 .sdev_groups = mpi3mr_dev_groups, 5348 }; 5349 5350 /** 5351 * mpi3mr_init_drv_cmd - Initialize internal command tracker 5352 * @cmdptr: Internal command tracker 5353 * @host_tag: Host tag used for the specific command 5354 * 5355 * Initialize the internal command tracker structure with 5356 * specified host tag. 5357 * 5358 * Return: Nothing. 5359 */ 5360 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 5361 u16 host_tag) 5362 { 5363 mutex_init(&cmdptr->mutex); 5364 cmdptr->reply = NULL; 5365 cmdptr->state = MPI3MR_CMD_NOTUSED; 5366 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 5367 cmdptr->host_tag = host_tag; 5368 } 5369 5370 /** 5371 * osintfc_mrioc_security_status -Check controller secure status 5372 * @pdev: PCI device instance 5373 * 5374 * Read the Device Serial Number capability from PCI config 5375 * space and decide whether the controller is secure or not. 5376 * 5377 * Return: 0 on success, non-zero on failure. 5378 */ 5379 static int 5380 osintfc_mrioc_security_status(struct pci_dev *pdev) 5381 { 5382 u32 cap_data; 5383 int base; 5384 u32 ctlr_status; 5385 u32 debug_status; 5386 int retval = 0; 5387 5388 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 5389 if (!base) { 5390 dev_err(&pdev->dev, 5391 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 5392 return -1; 5393 } 5394 5395 pci_read_config_dword(pdev, base + 4, &cap_data); 5396 5397 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 5398 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 5399 5400 switch (ctlr_status) { 5401 case MPI3MR_INVALID_DEVICE: 5402 dev_err(&pdev->dev, 5403 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5404 __func__, pdev->device, pdev->subsystem_vendor, 5405 pdev->subsystem_device); 5406 retval = -1; 5407 break; 5408 case MPI3MR_CONFIG_SECURE_DEVICE: 5409 if (!debug_status) 5410 dev_info(&pdev->dev, 5411 "%s: Config secure ctlr is detected\n", 5412 __func__); 5413 break; 5414 case MPI3MR_HARD_SECURE_DEVICE: 5415 break; 5416 case MPI3MR_TAMPERED_DEVICE: 5417 dev_err(&pdev->dev, 5418 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5419 __func__, pdev->device, pdev->subsystem_vendor, 5420 pdev->subsystem_device); 5421 retval = -1; 5422 break; 5423 default: 5424 retval = -1; 5425 break; 5426 } 5427 5428 if (!retval && debug_status) { 5429 dev_err(&pdev->dev, 5430 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5431 __func__, pdev->device, pdev->subsystem_vendor, 5432 pdev->subsystem_device); 5433 retval = -1; 5434 } 5435 5436 return retval; 5437 } 5438 5439 /** 5440 * mpi3mr_probe - PCI probe callback 5441 * @pdev: PCI device instance 5442 * @id: PCI device ID details 5443 * 5444 * controller initialization routine. Checks the security status 5445 * of the controller and if it is invalid or tampered return the 5446 * probe without initializing the controller. Otherwise, 5447 * allocate per adapter instance through shost_priv and 5448 * initialize controller specific data structures, initializae 5449 * the controller hardware, add shost to the SCSI subsystem. 5450 * 5451 * Return: 0 on success, non-zero on failure. 5452 */ 5453 5454 static int 5455 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5456 { 5457 struct mpi3mr_ioc *mrioc = NULL; 5458 struct Scsi_Host *shost = NULL; 5459 int retval = 0, i; 5460 5461 if (osintfc_mrioc_security_status(pdev)) { 5462 warn_non_secure_ctlr = 1; 5463 return 1; /* For Invalid and Tampered device */ 5464 } 5465 5466 shost = scsi_host_alloc(&mpi3mr_driver_template, 5467 sizeof(struct mpi3mr_ioc)); 5468 if (!shost) { 5469 retval = -ENODEV; 5470 goto shost_failed; 5471 } 5472 5473 mrioc = shost_priv(shost); 5474 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); 5475 if (retval < 0) 5476 goto id_alloc_failed; 5477 mrioc->id = (u8)retval; 5478 strscpy(mrioc->driver_name, MPI3MR_DRIVER_NAME, 5479 sizeof(mrioc->driver_name)); 5480 scnprintf(mrioc->name, sizeof(mrioc->name), 5481 "%s%u", mrioc->driver_name, mrioc->id); 5482 INIT_LIST_HEAD(&mrioc->list); 5483 spin_lock(&mrioc_list_lock); 5484 list_add_tail(&mrioc->list, &mrioc_list); 5485 spin_unlock(&mrioc_list_lock); 5486 5487 spin_lock_init(&mrioc->admin_req_lock); 5488 spin_lock_init(&mrioc->reply_free_queue_lock); 5489 spin_lock_init(&mrioc->sbq_lock); 5490 spin_lock_init(&mrioc->fwevt_lock); 5491 spin_lock_init(&mrioc->tgtdev_lock); 5492 spin_lock_init(&mrioc->watchdog_lock); 5493 spin_lock_init(&mrioc->chain_buf_lock); 5494 spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); 5495 spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); 5496 spin_lock_init(&mrioc->sas_node_lock); 5497 spin_lock_init(&mrioc->trigger_lock); 5498 5499 INIT_LIST_HEAD(&mrioc->fwevt_list); 5500 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5501 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5502 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5503 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5504 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5505 INIT_LIST_HEAD(&mrioc->enclosure_list); 5506 5507 mutex_init(&mrioc->reset_mutex); 5508 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5509 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5510 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5511 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5512 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5513 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5514 5515 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5516 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5517 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5518 5519 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5520 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5521 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5522 5523 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5524 !pdev->revision) 5525 mrioc->enable_segqueue = false; 5526 else 5527 mrioc->enable_segqueue = true; 5528 5529 init_waitqueue_head(&mrioc->reset_waitq); 5530 mrioc->logging_level = logging_level; 5531 mrioc->shost = shost; 5532 mrioc->pdev = pdev; 5533 mrioc->stop_bsgs = 1; 5534 5535 mrioc->max_sgl_entries = max_sgl_entries; 5536 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5537 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5538 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5539 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5540 else { 5541 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5542 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5543 } 5544 5545 /* init shost parameters */ 5546 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5547 shost->max_lun = -1; 5548 shost->unique_id = mrioc->id; 5549 5550 shost->max_channel = 0; 5551 shost->max_id = 0xFFFFFFFF; 5552 5553 shost->host_tagset = 1; 5554 5555 if (prot_mask >= 0) 5556 scsi_host_set_prot(shost, prot_mask); 5557 else { 5558 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5559 | SHOST_DIF_TYPE2_PROTECTION 5560 | SHOST_DIF_TYPE3_PROTECTION; 5561 scsi_host_set_prot(shost, prot_mask); 5562 } 5563 5564 ioc_info(mrioc, 5565 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5566 __func__, 5567 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5568 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5569 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5570 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5571 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5572 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5573 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5574 5575 if (prot_guard_mask) 5576 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5577 else 5578 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5579 5580 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5581 "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); 5582 if (!mrioc->fwevt_worker_thread) { 5583 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5584 __FILE__, __LINE__, __func__); 5585 retval = -ENODEV; 5586 goto fwevtthread_failed; 5587 } 5588 5589 mrioc->is_driver_loading = 1; 5590 mrioc->cpu_count = num_online_cpus(); 5591 if (mpi3mr_setup_resources(mrioc)) { 5592 ioc_err(mrioc, "setup resources failed\n"); 5593 retval = -ENODEV; 5594 goto resource_alloc_failed; 5595 } 5596 if (mpi3mr_init_ioc(mrioc)) { 5597 ioc_err(mrioc, "initializing IOC failed\n"); 5598 retval = -ENODEV; 5599 goto init_ioc_failed; 5600 } 5601 5602 shost->nr_hw_queues = mrioc->num_op_reply_q; 5603 if (mrioc->active_poll_qcount) 5604 shost->nr_maps = 3; 5605 5606 shost->can_queue = mrioc->max_host_ios; 5607 shost->sg_tablesize = mrioc->max_sgl_entries; 5608 shost->max_id = mrioc->facts.max_perids + 1; 5609 5610 retval = scsi_add_host(shost, &pdev->dev); 5611 if (retval) { 5612 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5613 __FILE__, __LINE__, __func__); 5614 goto addhost_failed; 5615 } 5616 5617 scsi_scan_host(shost); 5618 mpi3mr_bsg_init(mrioc); 5619 return retval; 5620 5621 addhost_failed: 5622 mpi3mr_stop_watchdog(mrioc); 5623 mpi3mr_cleanup_ioc(mrioc); 5624 init_ioc_failed: 5625 mpi3mr_free_mem(mrioc); 5626 mpi3mr_cleanup_resources(mrioc); 5627 resource_alloc_failed: 5628 destroy_workqueue(mrioc->fwevt_worker_thread); 5629 fwevtthread_failed: 5630 ida_free(&mrioc_ida, mrioc->id); 5631 spin_lock(&mrioc_list_lock); 5632 list_del(&mrioc->list); 5633 spin_unlock(&mrioc_list_lock); 5634 id_alloc_failed: 5635 scsi_host_put(shost); 5636 shost_failed: 5637 return retval; 5638 } 5639 5640 /** 5641 * mpi3mr_remove - PCI remove callback 5642 * @pdev: PCI device instance 5643 * 5644 * Cleanup the IOC by issuing MUR and shutdown notification. 5645 * Free up all memory and resources associated with the 5646 * controllerand target devices, unregister the shost. 5647 * 5648 * Return: Nothing. 5649 */ 5650 static void mpi3mr_remove(struct pci_dev *pdev) 5651 { 5652 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5653 struct mpi3mr_ioc *mrioc; 5654 struct workqueue_struct *wq; 5655 unsigned long flags; 5656 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5657 struct mpi3mr_hba_port *port, *hba_port_next; 5658 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5659 5660 if (!shost) 5661 return; 5662 5663 mrioc = shost_priv(shost); 5664 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5665 ssleep(1); 5666 5667 if (mrioc->block_on_pci_err) { 5668 mrioc->block_on_pci_err = false; 5669 scsi_unblock_requests(shost); 5670 mrioc->unrecoverable = 1; 5671 } 5672 5673 if (!pci_device_is_present(mrioc->pdev) || 5674 mrioc->pci_err_recovery) { 5675 mrioc->unrecoverable = 1; 5676 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5677 } 5678 5679 mpi3mr_bsg_exit(mrioc); 5680 mrioc->stop_drv_processing = 1; 5681 mpi3mr_cleanup_fwevt_list(mrioc); 5682 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5683 wq = mrioc->fwevt_worker_thread; 5684 mrioc->fwevt_worker_thread = NULL; 5685 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5686 if (wq) 5687 destroy_workqueue(wq); 5688 5689 if (mrioc->sas_transport_enabled) 5690 sas_remove_host(shost); 5691 else 5692 scsi_remove_host(shost); 5693 5694 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5695 list) { 5696 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5697 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5698 mpi3mr_tgtdev_put(tgtdev); 5699 } 5700 mpi3mr_stop_watchdog(mrioc); 5701 mpi3mr_cleanup_ioc(mrioc); 5702 mpi3mr_free_mem(mrioc); 5703 mpi3mr_cleanup_resources(mrioc); 5704 5705 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5706 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5707 &mrioc->sas_expander_list, list) { 5708 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5709 mpi3mr_expander_node_remove(mrioc, sas_expander); 5710 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5711 } 5712 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5713 ioc_info(mrioc, 5714 "removing hba_port entry: %p port: %d from hba_port list\n", 5715 port, port->port_id); 5716 list_del(&port->list); 5717 kfree(port); 5718 } 5719 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5720 5721 if (mrioc->sas_hba.num_phys) { 5722 kfree(mrioc->sas_hba.phy); 5723 mrioc->sas_hba.phy = NULL; 5724 mrioc->sas_hba.num_phys = 0; 5725 } 5726 5727 ida_free(&mrioc_ida, mrioc->id); 5728 spin_lock(&mrioc_list_lock); 5729 list_del(&mrioc->list); 5730 spin_unlock(&mrioc_list_lock); 5731 5732 scsi_host_put(shost); 5733 } 5734 5735 /** 5736 * mpi3mr_shutdown - PCI shutdown callback 5737 * @pdev: PCI device instance 5738 * 5739 * Free up all memory and resources associated with the 5740 * controller 5741 * 5742 * Return: Nothing. 5743 */ 5744 static void mpi3mr_shutdown(struct pci_dev *pdev) 5745 { 5746 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5747 struct mpi3mr_ioc *mrioc; 5748 struct workqueue_struct *wq; 5749 unsigned long flags; 5750 5751 if (!shost) 5752 return; 5753 5754 mrioc = shost_priv(shost); 5755 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5756 ssleep(1); 5757 5758 mrioc->stop_drv_processing = 1; 5759 mpi3mr_cleanup_fwevt_list(mrioc); 5760 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5761 wq = mrioc->fwevt_worker_thread; 5762 mrioc->fwevt_worker_thread = NULL; 5763 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5764 if (wq) 5765 destroy_workqueue(wq); 5766 5767 mpi3mr_stop_watchdog(mrioc); 5768 mpi3mr_cleanup_ioc(mrioc); 5769 mpi3mr_cleanup_resources(mrioc); 5770 } 5771 5772 /** 5773 * mpi3mr_suspend - PCI power management suspend callback 5774 * @dev: Device struct 5775 * 5776 * Change the power state to the given value and cleanup the IOC 5777 * by issuing MUR and shutdown notification 5778 * 5779 * Return: 0 always. 5780 */ 5781 static int __maybe_unused 5782 mpi3mr_suspend(struct device *dev) 5783 { 5784 struct pci_dev *pdev = to_pci_dev(dev); 5785 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5786 struct mpi3mr_ioc *mrioc; 5787 5788 if (!shost) 5789 return 0; 5790 5791 mrioc = shost_priv(shost); 5792 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5793 ssleep(1); 5794 mrioc->stop_drv_processing = 1; 5795 mpi3mr_cleanup_fwevt_list(mrioc); 5796 scsi_block_requests(shost); 5797 mpi3mr_stop_watchdog(mrioc); 5798 mpi3mr_cleanup_ioc(mrioc); 5799 5800 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5801 pdev, pci_name(pdev)); 5802 mpi3mr_cleanup_resources(mrioc); 5803 5804 return 0; 5805 } 5806 5807 /** 5808 * mpi3mr_resume - PCI power management resume callback 5809 * @dev: Device struct 5810 * 5811 * Restore the power state to D0 and reinitialize the controller 5812 * and resume I/O operations to the target devices 5813 * 5814 * Return: 0 on success, non-zero on failure 5815 */ 5816 static int __maybe_unused 5817 mpi3mr_resume(struct device *dev) 5818 { 5819 struct pci_dev *pdev = to_pci_dev(dev); 5820 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5821 struct mpi3mr_ioc *mrioc; 5822 pci_power_t device_state = pdev->current_state; 5823 int r; 5824 5825 if (!shost) 5826 return 0; 5827 5828 mrioc = shost_priv(shost); 5829 5830 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5831 pdev, pci_name(pdev), device_state); 5832 mrioc->pdev = pdev; 5833 mrioc->cpu_count = num_online_cpus(); 5834 r = mpi3mr_setup_resources(mrioc); 5835 if (r) { 5836 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5837 __func__, r); 5838 return r; 5839 } 5840 5841 mrioc->stop_drv_processing = 0; 5842 mpi3mr_invalidate_devhandles(mrioc); 5843 mpi3mr_free_enclosure_list(mrioc); 5844 mpi3mr_memset_buffers(mrioc); 5845 r = mpi3mr_reinit_ioc(mrioc, 1); 5846 if (r) { 5847 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5848 return r; 5849 } 5850 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5851 scsi_unblock_requests(shost); 5852 mrioc->device_refresh_on = 0; 5853 mpi3mr_start_watchdog(mrioc); 5854 5855 return 0; 5856 } 5857 5858 /** 5859 * mpi3mr_pcierr_error_detected - PCI error detected callback 5860 * @pdev: PCI device instance 5861 * @state: channel state 5862 * 5863 * This function is called by the PCI error recovery driver and 5864 * based on the state passed the driver decides what actions to 5865 * be recommended back to PCI driver. 5866 * 5867 * For all of the states if there is no valid mrioc or scsi host 5868 * references in the PCI device then this function will return 5869 * the result as disconnect. 5870 * 5871 * For normal state, this function will return the result as can 5872 * recover. 5873 * 5874 * For frozen state, this function will block for any pending 5875 * controller initialization or re-initialization to complete, 5876 * stop any new interactions with the controller and return 5877 * status as reset required. 5878 * 5879 * For permanent failure state, this function will mark the 5880 * controller as unrecoverable and return status as disconnect. 5881 * 5882 * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or 5883 * DISCONNECT based on the controller state. 5884 */ 5885 static pci_ers_result_t 5886 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5887 { 5888 struct Scsi_Host *shost; 5889 struct mpi3mr_ioc *mrioc; 5890 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5891 5892 dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, 5893 state); 5894 5895 shost = pci_get_drvdata(pdev); 5896 mrioc = shost_priv(shost); 5897 5898 switch (state) { 5899 case pci_channel_io_normal: 5900 return PCI_ERS_RESULT_CAN_RECOVER; 5901 case pci_channel_io_frozen: 5902 mrioc->pci_err_recovery = true; 5903 mrioc->block_on_pci_err = true; 5904 do { 5905 if (mrioc->reset_in_progress || mrioc->is_driver_loading) 5906 ssleep(1); 5907 else 5908 break; 5909 } while (--timeout); 5910 5911 if (!timeout) { 5912 mrioc->pci_err_recovery = true; 5913 mrioc->block_on_pci_err = true; 5914 mrioc->unrecoverable = 1; 5915 mpi3mr_stop_watchdog(mrioc); 5916 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5917 return PCI_ERS_RESULT_DISCONNECT; 5918 } 5919 5920 scsi_block_requests(mrioc->shost); 5921 mpi3mr_stop_watchdog(mrioc); 5922 mpi3mr_cleanup_resources(mrioc); 5923 return PCI_ERS_RESULT_NEED_RESET; 5924 case pci_channel_io_perm_failure: 5925 mrioc->pci_err_recovery = true; 5926 mrioc->block_on_pci_err = true; 5927 mrioc->unrecoverable = 1; 5928 mpi3mr_stop_watchdog(mrioc); 5929 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5930 return PCI_ERS_RESULT_DISCONNECT; 5931 default: 5932 return PCI_ERS_RESULT_DISCONNECT; 5933 } 5934 } 5935 5936 /** 5937 * mpi3mr_pcierr_slot_reset - Post slot reset callback 5938 * @pdev: PCI device instance 5939 * 5940 * This function is called by the PCI error recovery driver 5941 * after a slot or link reset issued by it for the recovery, the 5942 * driver is expected to bring back the controller and 5943 * initialize it. 5944 * 5945 * This function restores PCI state and reinitializes controller 5946 * resources and the controller, this blocks for any pending 5947 * reset to complete. 5948 * 5949 * Returns: PCI_ERS_RESULT_DISCONNECT on failure or 5950 * PCI_ERS_RESULT_RECOVERED 5951 */ 5952 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) 5953 { 5954 struct Scsi_Host *shost; 5955 struct mpi3mr_ioc *mrioc; 5956 unsigned int timeout = MPI3MR_RESET_TIMEOUT; 5957 5958 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 5959 5960 shost = pci_get_drvdata(pdev); 5961 mrioc = shost_priv(shost); 5962 5963 do { 5964 if (mrioc->reset_in_progress) 5965 ssleep(1); 5966 else 5967 break; 5968 } while (--timeout); 5969 5970 if (!timeout) 5971 goto out_failed; 5972 5973 pci_restore_state(pdev); 5974 5975 if (mpi3mr_setup_resources(mrioc)) { 5976 ioc_err(mrioc, "setup resources failed\n"); 5977 goto out_failed; 5978 } 5979 mrioc->unrecoverable = 0; 5980 mrioc->pci_err_recovery = false; 5981 5982 if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) 5983 goto out_failed; 5984 5985 return PCI_ERS_RESULT_RECOVERED; 5986 5987 out_failed: 5988 mrioc->unrecoverable = 1; 5989 mrioc->block_on_pci_err = false; 5990 scsi_unblock_requests(shost); 5991 mpi3mr_start_watchdog(mrioc); 5992 return PCI_ERS_RESULT_DISCONNECT; 5993 } 5994 5995 /** 5996 * mpi3mr_pcierr_resume - PCI error recovery resume 5997 * callback 5998 * @pdev: PCI device instance 5999 * 6000 * This function enables all I/O and IOCTLs post reset issued as 6001 * part of the PCI error recovery 6002 * 6003 * Return: Nothing. 6004 */ 6005 static void mpi3mr_pcierr_resume(struct pci_dev *pdev) 6006 { 6007 struct Scsi_Host *shost; 6008 struct mpi3mr_ioc *mrioc; 6009 6010 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 6011 6012 shost = pci_get_drvdata(pdev); 6013 mrioc = shost_priv(shost); 6014 6015 if (mrioc->block_on_pci_err) { 6016 mrioc->block_on_pci_err = false; 6017 scsi_unblock_requests(shost); 6018 mpi3mr_start_watchdog(mrioc); 6019 } 6020 } 6021 6022 /** 6023 * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback 6024 * @pdev: PCI device instance 6025 * 6026 * This is called only if mpi3mr_pcierr_error_detected returns 6027 * PCI_ERS_RESULT_CAN_RECOVER. 6028 * 6029 * Return: PCI_ERS_RESULT_DISCONNECT when the controller is 6030 * unrecoverable or when the shost/mrioc reference cannot be 6031 * found, else return PCI_ERS_RESULT_RECOVERED 6032 */ 6033 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) 6034 { 6035 struct Scsi_Host *shost; 6036 struct mpi3mr_ioc *mrioc; 6037 6038 dev_info(&pdev->dev, "%s: callback invoked\n", __func__); 6039 6040 shost = pci_get_drvdata(pdev); 6041 mrioc = shost_priv(shost); 6042 6043 if (mrioc->unrecoverable) 6044 return PCI_ERS_RESULT_DISCONNECT; 6045 6046 return PCI_ERS_RESULT_RECOVERED; 6047 } 6048 6049 static const struct pci_device_id mpi3mr_pci_id_table[] = { 6050 { 6051 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 6052 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 6053 }, 6054 { 6055 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 6056 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 6057 }, 6058 { 6059 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 6060 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 6061 }, 6062 { 0 } 6063 }; 6064 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 6065 6066 static const struct pci_error_handlers mpi3mr_err_handler = { 6067 .error_detected = mpi3mr_pcierr_error_detected, 6068 .mmio_enabled = mpi3mr_pcierr_mmio_enabled, 6069 .slot_reset = mpi3mr_pcierr_slot_reset, 6070 .resume = mpi3mr_pcierr_resume, 6071 }; 6072 6073 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 6074 6075 static struct pci_driver mpi3mr_pci_driver = { 6076 .name = MPI3MR_DRIVER_NAME, 6077 .id_table = mpi3mr_pci_id_table, 6078 .probe = mpi3mr_probe, 6079 .remove = mpi3mr_remove, 6080 .shutdown = mpi3mr_shutdown, 6081 .err_handler = &mpi3mr_err_handler, 6082 .driver = { 6083 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 6084 .pm = &mpi3mr_pm_ops, 6085 }, 6086 }; 6087 6088 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 6089 { 6090 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 6091 } 6092 static DRIVER_ATTR_RO(event_counter); 6093 6094 static int __init mpi3mr_init(void) 6095 { 6096 int ret_val; 6097 6098 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 6099 MPI3MR_DRIVER_VERSION); 6100 6101 mpi3mr_transport_template = 6102 sas_attach_transport(&mpi3mr_transport_functions); 6103 if (!mpi3mr_transport_template) { 6104 pr_err("%s failed to load due to sas transport attach failure\n", 6105 MPI3MR_DRIVER_NAME); 6106 return -ENODEV; 6107 } 6108 6109 ret_val = pci_register_driver(&mpi3mr_pci_driver); 6110 if (ret_val) { 6111 pr_err("%s failed to load due to pci register driver failure\n", 6112 MPI3MR_DRIVER_NAME); 6113 goto err_pci_reg_fail; 6114 } 6115 6116 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 6117 &driver_attr_event_counter); 6118 if (ret_val) 6119 goto err_event_counter; 6120 6121 return ret_val; 6122 6123 err_event_counter: 6124 pci_unregister_driver(&mpi3mr_pci_driver); 6125 6126 err_pci_reg_fail: 6127 sas_release_transport(mpi3mr_transport_template); 6128 return ret_val; 6129 } 6130 6131 static void __exit mpi3mr_exit(void) 6132 { 6133 if (warn_non_secure_ctlr) 6134 pr_warn( 6135 "Unloading %s version %s while managing a non secure controller\n", 6136 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 6137 else 6138 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 6139 MPI3MR_DRIVER_VERSION); 6140 6141 driver_remove_file(&mpi3mr_pci_driver.driver, 6142 &driver_attr_event_counter); 6143 pci_unregister_driver(&mpi3mr_pci_driver); 6144 sas_release_transport(mpi3mr_transport_template); 6145 ida_destroy(&mrioc_ida); 6146 } 6147 6148 module_init(mpi3mr_init); 6149 module_exit(mpi3mr_exit); 6150