1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2009-2013 LSI Corporation 5 * Copyright (c) 2013-2014 Avago Technologies 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * 20 * FILE: megaraid_sas_fusion.c 21 * 22 * Authors: Avago Technologies 23 * Sumant Patro 24 * Adam Radford 25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * 28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, 31 * San Jose, California 95131 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/list.h> 38 #include <linux/moduleparam.h> 39 #include <linux/module.h> 40 #include <linux/spinlock.h> 41 #include <linux/interrupt.h> 42 #include <linux/delay.h> 43 #include <linux/uio.h> 44 #include <linux/uaccess.h> 45 #include <linux/fs.h> 46 #include <linux/compat.h> 47 #include <linux/blkdev.h> 48 #include <linux/mutex.h> 49 #include <linux/poll.h> 50 51 #include <scsi/scsi.h> 52 #include <scsi/scsi_cmnd.h> 53 #include <scsi/scsi_device.h> 54 #include <scsi/scsi_host.h> 55 #include <scsi/scsi_dbg.h> 56 #include <linux/dmi.h> 57 58 #include "megaraid_sas_fusion.h" 59 #include "megaraid_sas.h" 60 61 62 extern void megasas_free_cmds(struct megasas_instance *instance); 63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance 64 *instance); 65 extern void 66 megasas_complete_cmd(struct megasas_instance *instance, 67 struct megasas_cmd *cmd, u8 alt_status); 68 int 69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 70 int seconds); 71 72 void 73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 74 int megasas_alloc_cmds(struct megasas_instance *instance); 75 int 76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); 77 int 78 megasas_issue_polled(struct megasas_instance *instance, 79 struct megasas_cmd *cmd); 80 void 81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 82 83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 84 void megaraid_sas_kill_hba(struct megasas_instance *instance); 85 86 extern u32 megasas_dbg_lvl; 87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr); 88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 89 int initial); 90 void megasas_start_timer(struct megasas_instance *instance, 91 struct timer_list *timer, 92 void *fn, unsigned long interval); 93 extern struct megasas_mgmt_info megasas_mgmt_info; 94 extern unsigned int resetwaittime; 95 extern unsigned int dual_qdepth_disable; 96 static void megasas_free_rdpq_fusion(struct megasas_instance *instance); 97 static void megasas_free_reply_fusion(struct megasas_instance *instance); 98 99 100 101 /** 102 * megasas_enable_intr_fusion - Enables interrupts 103 * @regs: MFI register set 104 */ 105 void 106 megasas_enable_intr_fusion(struct megasas_instance *instance) 107 { 108 struct megasas_register_set __iomem *regs; 109 regs = instance->reg_set; 110 111 instance->mask_interrupts = 0; 112 /* For Thunderbolt/Invader also clear intr on enable */ 113 writel(~0, ®s->outbound_intr_status); 114 readl(®s->outbound_intr_status); 115 116 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 117 118 /* Dummy readl to force pci flush */ 119 readl(®s->outbound_intr_mask); 120 } 121 122 /** 123 * megasas_disable_intr_fusion - Disables interrupt 124 * @regs: MFI register set 125 */ 126 void 127 megasas_disable_intr_fusion(struct megasas_instance *instance) 128 { 129 u32 mask = 0xFFFFFFFF; 130 u32 status; 131 struct megasas_register_set __iomem *regs; 132 regs = instance->reg_set; 133 instance->mask_interrupts = 1; 134 135 writel(mask, ®s->outbound_intr_mask); 136 /* Dummy readl to force pci flush */ 137 status = readl(®s->outbound_intr_mask); 138 } 139 140 int 141 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) 142 { 143 u32 status; 144 /* 145 * Check if it is our interrupt 146 */ 147 status = readl(®s->outbound_intr_status); 148 149 if (status & 1) { 150 writel(status, ®s->outbound_intr_status); 151 readl(®s->outbound_intr_status); 152 return 1; 153 } 154 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 155 return 0; 156 157 return 1; 158 } 159 160 /** 161 * megasas_get_cmd_fusion - Get a command from the free pool 162 * @instance: Adapter soft state 163 * 164 * Returns a blk_tag indexed mpt frame 165 */ 166 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 167 *instance, u32 blk_tag) 168 { 169 struct fusion_context *fusion; 170 171 fusion = instance->ctrl_context; 172 return fusion->cmd_list[blk_tag]; 173 } 174 175 /** 176 * megasas_return_cmd_fusion - Return a cmd to free command pool 177 * @instance: Adapter soft state 178 * @cmd: Command packet to be returned to free command pool 179 */ 180 inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 181 struct megasas_cmd_fusion *cmd) 182 { 183 cmd->scmd = NULL; 184 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 185 } 186 187 /** 188 * megasas_fire_cmd_fusion - Sends command to the FW 189 */ 190 static void 191 megasas_fire_cmd_fusion(struct megasas_instance *instance, 192 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 193 { 194 #if defined(writeq) && defined(CONFIG_64BIT) 195 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 196 le32_to_cpu(req_desc->u.low)); 197 198 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 199 #else 200 unsigned long flags; 201 202 spin_lock_irqsave(&instance->hba_lock, flags); 203 writel(le32_to_cpu(req_desc->u.low), 204 &instance->reg_set->inbound_low_queue_port); 205 writel(le32_to_cpu(req_desc->u.high), 206 &instance->reg_set->inbound_high_queue_port); 207 mmiowb(); 208 spin_unlock_irqrestore(&instance->hba_lock, flags); 209 #endif 210 } 211 212 /** 213 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here 214 * @instance: Adapter soft state 215 * fw_boot_context: Whether this function called during probe or after OCR 216 * 217 * This function is only for fusion controllers. 218 * Update host can queue, if firmware downgrade max supported firmware commands. 219 * Firmware upgrade case will be skiped because underlying firmware has 220 * more resource than exposed to the OS. 221 * 222 */ 223 static void 224 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context) 225 { 226 u16 cur_max_fw_cmds = 0; 227 u16 ldio_threshold = 0; 228 struct megasas_register_set __iomem *reg_set; 229 230 reg_set = instance->reg_set; 231 232 cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; 233 234 if (dual_qdepth_disable || !cur_max_fw_cmds) 235 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 236 else 237 ldio_threshold = 238 (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; 239 240 dev_info(&instance->pdev->dev, 241 "Current firmware maximum commands: %d\t LDIO threshold: %d\n", 242 cur_max_fw_cmds, ldio_threshold); 243 244 if (fw_boot_context == OCR_CONTEXT) { 245 cur_max_fw_cmds = cur_max_fw_cmds - 1; 246 if (cur_max_fw_cmds <= instance->max_fw_cmds) { 247 instance->cur_can_queue = 248 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + 249 MEGASAS_FUSION_IOCTL_CMDS); 250 instance->host->can_queue = instance->cur_can_queue; 251 instance->ldio_threshold = ldio_threshold; 252 } 253 } else { 254 instance->max_fw_cmds = cur_max_fw_cmds; 255 instance->ldio_threshold = ldio_threshold; 256 257 if (!instance->is_rdpq) 258 instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); 259 260 /* 261 * Reduce the max supported cmds by 1. This is to ensure that the 262 * reply_q_sz (1 more than the max cmd that driver may send) 263 * does not exceed max cmds that the FW can support 264 */ 265 instance->max_fw_cmds = instance->max_fw_cmds-1; 266 267 instance->max_scsi_cmds = instance->max_fw_cmds - 268 (MEGASAS_FUSION_INTERNAL_CMDS + 269 MEGASAS_FUSION_IOCTL_CMDS); 270 instance->cur_can_queue = instance->max_scsi_cmds; 271 } 272 } 273 /** 274 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 275 * @instance: Adapter soft state 276 */ 277 void 278 megasas_free_cmds_fusion(struct megasas_instance *instance) 279 { 280 int i; 281 struct fusion_context *fusion = instance->ctrl_context; 282 struct megasas_cmd_fusion *cmd; 283 284 /* SG, Sense */ 285 for (i = 0; i < instance->max_fw_cmds; i++) { 286 cmd = fusion->cmd_list[i]; 287 if (cmd) { 288 if (cmd->sg_frame) 289 pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, 290 cmd->sg_frame_phys_addr); 291 if (cmd->sense) 292 pci_pool_free(fusion->sense_dma_pool, cmd->sense, 293 cmd->sense_phys_addr); 294 } 295 } 296 297 if (fusion->sg_dma_pool) { 298 pci_pool_destroy(fusion->sg_dma_pool); 299 fusion->sg_dma_pool = NULL; 300 } 301 if (fusion->sense_dma_pool) { 302 pci_pool_destroy(fusion->sense_dma_pool); 303 fusion->sense_dma_pool = NULL; 304 } 305 306 307 /* Reply Frame, Desc*/ 308 if (instance->is_rdpq) 309 megasas_free_rdpq_fusion(instance); 310 else 311 megasas_free_reply_fusion(instance); 312 313 /* Request Frame, Desc*/ 314 if (fusion->req_frames_desc) 315 dma_free_coherent(&instance->pdev->dev, 316 fusion->request_alloc_sz, fusion->req_frames_desc, 317 fusion->req_frames_desc_phys); 318 if (fusion->io_request_frames) 319 pci_pool_free(fusion->io_request_frames_pool, 320 fusion->io_request_frames, 321 fusion->io_request_frames_phys); 322 if (fusion->io_request_frames_pool) { 323 pci_pool_destroy(fusion->io_request_frames_pool); 324 fusion->io_request_frames_pool = NULL; 325 } 326 327 328 /* cmd_list */ 329 for (i = 0; i < instance->max_fw_cmds; i++) 330 kfree(fusion->cmd_list[i]); 331 332 kfree(fusion->cmd_list); 333 } 334 335 /** 336 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames 337 * @instance: Adapter soft state 338 * 339 */ 340 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) 341 { 342 int i; 343 u32 max_cmd; 344 struct fusion_context *fusion; 345 struct megasas_cmd_fusion *cmd; 346 347 fusion = instance->ctrl_context; 348 max_cmd = instance->max_fw_cmds; 349 350 351 fusion->sg_dma_pool = 352 pci_pool_create("mr_sg", instance->pdev, 353 instance->max_chain_frame_sz, 4, 0); 354 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ 355 fusion->sense_dma_pool = 356 pci_pool_create("mr_sense", instance->pdev, 357 SCSI_SENSE_BUFFERSIZE, 64, 0); 358 359 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { 360 dev_err(&instance->pdev->dev, 361 "Failed from %s %d\n", __func__, __LINE__); 362 return -ENOMEM; 363 } 364 365 /* 366 * Allocate and attach a frame to each of the commands in cmd_list 367 */ 368 for (i = 0; i < max_cmd; i++) { 369 cmd = fusion->cmd_list[i]; 370 cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, 371 GFP_KERNEL, &cmd->sg_frame_phys_addr); 372 373 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, 374 GFP_KERNEL, &cmd->sense_phys_addr); 375 if (!cmd->sg_frame || !cmd->sense) { 376 dev_err(&instance->pdev->dev, 377 "Failed from %s %d\n", __func__, __LINE__); 378 return -ENOMEM; 379 } 380 } 381 return 0; 382 } 383 384 int 385 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 386 { 387 u32 max_cmd, i; 388 struct fusion_context *fusion; 389 390 fusion = instance->ctrl_context; 391 392 max_cmd = instance->max_fw_cmds; 393 394 /* 395 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 396 * Allocate the dynamic array first and then allocate individual 397 * commands. 398 */ 399 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd, 400 GFP_KERNEL); 401 if (!fusion->cmd_list) { 402 dev_err(&instance->pdev->dev, 403 "Failed from %s %d\n", __func__, __LINE__); 404 return -ENOMEM; 405 } 406 407 for (i = 0; i < max_cmd; i++) { 408 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), 409 GFP_KERNEL); 410 if (!fusion->cmd_list[i]) { 411 dev_err(&instance->pdev->dev, 412 "Failed from %s %d\n", __func__, __LINE__); 413 return -ENOMEM; 414 } 415 } 416 return 0; 417 } 418 int 419 megasas_alloc_request_fusion(struct megasas_instance *instance) 420 { 421 struct fusion_context *fusion; 422 423 fusion = instance->ctrl_context; 424 425 fusion->req_frames_desc = 426 dma_alloc_coherent(&instance->pdev->dev, 427 fusion->request_alloc_sz, 428 &fusion->req_frames_desc_phys, GFP_KERNEL); 429 if (!fusion->req_frames_desc) { 430 dev_err(&instance->pdev->dev, 431 "Failed from %s %d\n", __func__, __LINE__); 432 return -ENOMEM; 433 } 434 435 fusion->io_request_frames_pool = 436 pci_pool_create("mr_ioreq", instance->pdev, 437 fusion->io_frames_alloc_sz, 16, 0); 438 439 if (!fusion->io_request_frames_pool) { 440 dev_err(&instance->pdev->dev, 441 "Failed from %s %d\n", __func__, __LINE__); 442 return -ENOMEM; 443 } 444 445 fusion->io_request_frames = 446 pci_pool_alloc(fusion->io_request_frames_pool, 447 GFP_KERNEL, &fusion->io_request_frames_phys); 448 if (!fusion->io_request_frames) { 449 dev_err(&instance->pdev->dev, 450 "Failed from %s %d\n", __func__, __LINE__); 451 return -ENOMEM; 452 } 453 return 0; 454 } 455 456 int 457 megasas_alloc_reply_fusion(struct megasas_instance *instance) 458 { 459 int i, count; 460 struct fusion_context *fusion; 461 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 462 fusion = instance->ctrl_context; 463 464 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 465 fusion->reply_frames_desc_pool = 466 pci_pool_create("mr_reply", instance->pdev, 467 fusion->reply_alloc_sz * count, 16, 0); 468 469 if (!fusion->reply_frames_desc_pool) { 470 dev_err(&instance->pdev->dev, 471 "Failed from %s %d\n", __func__, __LINE__); 472 return -ENOMEM; 473 } 474 475 fusion->reply_frames_desc[0] = 476 pci_pool_alloc(fusion->reply_frames_desc_pool, 477 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]); 478 if (!fusion->reply_frames_desc[0]) { 479 dev_err(&instance->pdev->dev, 480 "Failed from %s %d\n", __func__, __LINE__); 481 return -ENOMEM; 482 } 483 reply_desc = fusion->reply_frames_desc[0]; 484 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 485 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 486 487 /* This is not a rdpq mode, but driver still populate 488 * reply_frame_desc array to use same msix index in ISR path. 489 */ 490 for (i = 0; i < (count - 1); i++) 491 fusion->reply_frames_desc[i + 1] = 492 fusion->reply_frames_desc[i] + 493 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION); 494 495 return 0; 496 } 497 498 int 499 megasas_alloc_rdpq_fusion(struct megasas_instance *instance) 500 { 501 int i, j, count; 502 struct fusion_context *fusion; 503 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 504 505 fusion = instance->ctrl_context; 506 507 fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, 508 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 509 &fusion->rdpq_phys); 510 if (!fusion->rdpq_virt) { 511 dev_err(&instance->pdev->dev, 512 "Failed from %s %d\n", __func__, __LINE__); 513 return -ENOMEM; 514 } 515 516 memset(fusion->rdpq_virt, 0, 517 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION); 518 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 519 fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq", 520 instance->pdev, fusion->reply_alloc_sz, 16, 0); 521 522 if (!fusion->reply_frames_desc_pool) { 523 dev_err(&instance->pdev->dev, 524 "Failed from %s %d\n", __func__, __LINE__); 525 return -ENOMEM; 526 } 527 528 for (i = 0; i < count; i++) { 529 fusion->reply_frames_desc[i] = 530 pci_pool_alloc(fusion->reply_frames_desc_pool, 531 GFP_KERNEL, &fusion->reply_frames_desc_phys[i]); 532 if (!fusion->reply_frames_desc[i]) { 533 dev_err(&instance->pdev->dev, 534 "Failed from %s %d\n", __func__, __LINE__); 535 return -ENOMEM; 536 } 537 538 fusion->rdpq_virt[i].RDPQBaseAddress = 539 fusion->reply_frames_desc_phys[i]; 540 541 reply_desc = fusion->reply_frames_desc[i]; 542 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) 543 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 544 } 545 return 0; 546 } 547 548 static void 549 megasas_free_rdpq_fusion(struct megasas_instance *instance) { 550 551 int i; 552 struct fusion_context *fusion; 553 554 fusion = instance->ctrl_context; 555 556 for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) { 557 if (fusion->reply_frames_desc[i]) 558 pci_pool_free(fusion->reply_frames_desc_pool, 559 fusion->reply_frames_desc[i], 560 fusion->reply_frames_desc_phys[i]); 561 } 562 563 if (fusion->reply_frames_desc_pool) 564 pci_pool_destroy(fusion->reply_frames_desc_pool); 565 566 if (fusion->rdpq_virt) 567 pci_free_consistent(instance->pdev, 568 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 569 fusion->rdpq_virt, fusion->rdpq_phys); 570 } 571 572 static void 573 megasas_free_reply_fusion(struct megasas_instance *instance) { 574 575 struct fusion_context *fusion; 576 577 fusion = instance->ctrl_context; 578 579 if (fusion->reply_frames_desc[0]) 580 pci_pool_free(fusion->reply_frames_desc_pool, 581 fusion->reply_frames_desc[0], 582 fusion->reply_frames_desc_phys[0]); 583 584 if (fusion->reply_frames_desc_pool) 585 pci_pool_destroy(fusion->reply_frames_desc_pool); 586 587 } 588 589 590 /** 591 * megasas_alloc_cmds_fusion - Allocates the command packets 592 * @instance: Adapter soft state 593 * 594 * 595 * Each frame has a 32-bit field called context. This context is used to get 596 * back the megasas_cmd_fusion from the frame when a frame gets completed 597 * In this driver, the 32 bit values are the indices into an array cmd_list. 598 * This array is used only to look up the megasas_cmd_fusion given the context. 599 * The free commands themselves are maintained in a linked list called cmd_pool. 600 * 601 * cmds are formed in the io_request and sg_frame members of the 602 * megasas_cmd_fusion. The context field is used to get a request descriptor 603 * and is used as SMID of the cmd. 604 * SMID value range is from 1 to max_fw_cmds. 605 */ 606 int 607 megasas_alloc_cmds_fusion(struct megasas_instance *instance) 608 { 609 int i; 610 struct fusion_context *fusion; 611 struct megasas_cmd_fusion *cmd; 612 u32 offset; 613 dma_addr_t io_req_base_phys; 614 u8 *io_req_base; 615 616 617 fusion = instance->ctrl_context; 618 619 if (megasas_alloc_cmdlist_fusion(instance)) 620 goto fail_exit; 621 622 if (megasas_alloc_request_fusion(instance)) 623 goto fail_exit; 624 625 if (instance->is_rdpq) { 626 if (megasas_alloc_rdpq_fusion(instance)) 627 goto fail_exit; 628 } else 629 if (megasas_alloc_reply_fusion(instance)) 630 goto fail_exit; 631 632 633 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 634 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 635 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 636 637 /* 638 * Add all the commands to command pool (fusion->cmd_pool) 639 */ 640 641 /* SMID 0 is reserved. Set SMID/index from 1 */ 642 for (i = 0; i < instance->max_fw_cmds; i++) { 643 cmd = fusion->cmd_list[i]; 644 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 645 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 646 cmd->index = i + 1; 647 cmd->scmd = NULL; 648 cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ? 649 (i - instance->max_scsi_cmds) : 650 (u32)ULONG_MAX; /* Set to Invalid */ 651 cmd->instance = instance; 652 cmd->io_request = 653 (struct MPI2_RAID_SCSI_IO_REQUEST *) 654 (io_req_base + offset); 655 memset(cmd->io_request, 0, 656 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 657 cmd->io_request_phys_addr = io_req_base_phys + offset; 658 } 659 660 if (megasas_create_sg_sense_fusion(instance)) 661 goto fail_exit; 662 663 return 0; 664 665 fail_exit: 666 megasas_free_cmds_fusion(instance); 667 return -ENOMEM; 668 } 669 670 /** 671 * wait_and_poll - Issues a polling command 672 * @instance: Adapter soft state 673 * @cmd: Command packet to be issued 674 * 675 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 676 */ 677 int 678 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 679 int seconds) 680 { 681 int i; 682 struct megasas_header *frame_hdr = &cmd->frame->hdr; 683 struct fusion_context *fusion; 684 685 u32 msecs = seconds * 1000; 686 687 fusion = instance->ctrl_context; 688 /* 689 * Wait for cmd_status to change 690 */ 691 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { 692 rmb(); 693 msleep(20); 694 } 695 696 if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS) 697 return DCMD_TIMEOUT; 698 else if (frame_hdr->cmd_status == MFI_STAT_OK) 699 return DCMD_SUCCESS; 700 else 701 return DCMD_FAILED; 702 } 703 704 /** 705 * megasas_ioc_init_fusion - Initializes the FW 706 * @instance: Adapter soft state 707 * 708 * Issues the IOC Init cmd 709 */ 710 int 711 megasas_ioc_init_fusion(struct megasas_instance *instance) 712 { 713 struct megasas_init_frame *init_frame; 714 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL; 715 dma_addr_t ioc_init_handle; 716 struct megasas_cmd *cmd; 717 u8 ret, cur_rdpq_mode; 718 struct fusion_context *fusion; 719 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 720 int i; 721 struct megasas_header *frame_hdr; 722 const char *sys_info; 723 MFI_CAPABILITIES *drv_ops; 724 u32 scratch_pad_2; 725 726 fusion = instance->ctrl_context; 727 728 cmd = megasas_get_cmd(instance); 729 730 if (!cmd) { 731 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n"); 732 ret = 1; 733 goto fail_get_cmd; 734 } 735 736 scratch_pad_2 = readl 737 (&instance->reg_set->outbound_scratch_pad_2); 738 739 cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; 740 741 if (instance->is_rdpq && !cur_rdpq_mode) { 742 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" 743 " from RDPQ mode to non RDPQ mode\n"); 744 ret = 1; 745 goto fail_fw_init; 746 } 747 748 IOCInitMessage = 749 dma_alloc_coherent(&instance->pdev->dev, 750 sizeof(struct MPI2_IOC_INIT_REQUEST), 751 &ioc_init_handle, GFP_KERNEL); 752 753 if (!IOCInitMessage) { 754 dev_err(&instance->pdev->dev, "Could not allocate memory for " 755 "IOCInitMessage\n"); 756 ret = 1; 757 goto fail_fw_init; 758 } 759 760 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 761 762 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 763 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 764 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 765 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 766 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 767 768 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 769 IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ? 770 cpu_to_le64(fusion->rdpq_phys) : 771 cpu_to_le64(fusion->reply_frames_desc_phys[0]); 772 IOCInitMessage->MsgFlags = instance->is_rdpq ? 773 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; 774 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 775 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 776 init_frame = (struct megasas_init_frame *)cmd->frame; 777 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 778 779 frame_hdr = &cmd->frame->hdr; 780 frame_hdr->cmd_status = 0xFF; 781 frame_hdr->flags = cpu_to_le16( 782 le16_to_cpu(frame_hdr->flags) | 783 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 784 785 init_frame->cmd = MFI_CMD_INIT; 786 init_frame->cmd_status = 0xFF; 787 788 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); 789 790 /* driver support Extended MSIX */ 791 if (fusion->adapter_type == INVADER_SERIES) 792 drv_ops->mfi_capabilities.support_additional_msix = 1; 793 /* driver supports HA / Remote LUN over Fast Path interface */ 794 drv_ops->mfi_capabilities.support_fp_remote_lun = 1; 795 796 drv_ops->mfi_capabilities.support_max_255lds = 1; 797 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1; 798 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1; 799 800 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 801 drv_ops->mfi_capabilities.support_ext_io_size = 1; 802 803 drv_ops->mfi_capabilities.support_fp_rlbypass = 1; 804 if (!dual_qdepth_disable) 805 drv_ops->mfi_capabilities.support_ext_queue_depth = 1; 806 807 drv_ops->mfi_capabilities.support_qd_throttling = 1; 808 /* Convert capability to LE32 */ 809 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 810 811 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); 812 if (instance->system_info_buf && sys_info) { 813 memcpy(instance->system_info_buf->systemId, sys_info, 814 strlen(sys_info) > 64 ? 64 : strlen(sys_info)); 815 instance->system_info_buf->systemIdLength = 816 strlen(sys_info) > 64 ? 64 : strlen(sys_info); 817 init_frame->system_info_lo = instance->system_info_h; 818 init_frame->system_info_hi = 0; 819 } 820 821 init_frame->queue_info_new_phys_addr_hi = 822 cpu_to_le32(upper_32_bits(ioc_init_handle)); 823 init_frame->queue_info_new_phys_addr_lo = 824 cpu_to_le32(lower_32_bits(ioc_init_handle)); 825 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 826 827 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); 828 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); 829 req_desc.MFAIo.RequestFlags = 830 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 831 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 832 833 /* 834 * disable the intr before firing the init frame 835 */ 836 instance->instancet->disable_intr(instance); 837 838 for (i = 0; i < (10 * 1000); i += 20) { 839 if (readl(&instance->reg_set->doorbell) & 1) 840 msleep(20); 841 else 842 break; 843 } 844 845 megasas_fire_cmd_fusion(instance, &req_desc); 846 847 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 848 849 frame_hdr = &cmd->frame->hdr; 850 if (frame_hdr->cmd_status != 0) { 851 ret = 1; 852 goto fail_fw_init; 853 } 854 dev_err(&instance->pdev->dev, "Init cmd success\n"); 855 856 ret = 0; 857 858 fail_fw_init: 859 megasas_return_cmd(instance, cmd); 860 if (IOCInitMessage) 861 dma_free_coherent(&instance->pdev->dev, 862 sizeof(struct MPI2_IOC_INIT_REQUEST), 863 IOCInitMessage, ioc_init_handle); 864 fail_get_cmd: 865 return ret; 866 } 867 868 /** 869 * megasas_sync_pd_seq_num - JBOD SEQ MAP 870 * @instance: Adapter soft state 871 * @pend: set to 1, if it is pended jbod map. 872 * 873 * Issue Jbod map to the firmware. If it is pended command, 874 * issue command and return. If it is first instance of jbod map 875 * issue and receive command. 876 */ 877 int 878 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { 879 int ret = 0; 880 u32 pd_seq_map_sz; 881 struct megasas_cmd *cmd; 882 struct megasas_dcmd_frame *dcmd; 883 struct fusion_context *fusion = instance->ctrl_context; 884 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 885 dma_addr_t pd_seq_h; 886 887 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)]; 888 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)]; 889 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 890 (sizeof(struct MR_PD_CFG_SEQ) * 891 (MAX_PHYSICAL_DEVICES - 1)); 892 893 cmd = megasas_get_cmd(instance); 894 if (!cmd) { 895 dev_err(&instance->pdev->dev, 896 "Could not get mfi cmd. Fail from %s %d\n", 897 __func__, __LINE__); 898 return -ENOMEM; 899 } 900 901 dcmd = &cmd->frame->dcmd; 902 903 memset(pd_sync, 0, pd_seq_map_sz); 904 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 905 dcmd->cmd = MFI_CMD_DCMD; 906 dcmd->cmd_status = 0xFF; 907 dcmd->sge_count = 1; 908 dcmd->timeout = 0; 909 dcmd->pad_0 = 0; 910 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); 911 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 912 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h); 913 dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz); 914 915 if (pend) { 916 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG; 917 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); 918 instance->jbod_seq_cmd = cmd; 919 instance->instancet->issue_dcmd(instance, cmd); 920 return 0; 921 } 922 923 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 924 925 /* Below code is only for non pended DCMD */ 926 if (instance->ctrl_context && !instance->mask_interrupts) 927 ret = megasas_issue_blocked_cmd(instance, cmd, 928 MFI_IO_TIMEOUT_SECS); 929 else 930 ret = megasas_issue_polled(instance, cmd); 931 932 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) { 933 dev_warn(&instance->pdev->dev, 934 "driver supports max %d JBOD, but FW reports %d\n", 935 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count)); 936 ret = -EINVAL; 937 } 938 939 if (ret == DCMD_TIMEOUT && instance->ctrl_context) 940 megaraid_sas_kill_hba(instance); 941 942 if (ret == DCMD_SUCCESS) 943 instance->pd_seq_map_id++; 944 945 megasas_return_cmd(instance, cmd); 946 return ret; 947 } 948 949 /* 950 * megasas_get_ld_map_info - Returns FW's ld_map structure 951 * @instance: Adapter soft state 952 * @pend: Pend the command or not 953 * Issues an internal command (DCMD) to get the FW's controller PD 954 * list structure. This information is mainly used to find out SYSTEM 955 * supported by the FW. 956 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO 957 * dcmd.mbox.b[0] - number of LDs being sync'd 958 * dcmd.mbox.b[1] - 0 - complete command immediately. 959 * - 1 - pend till config change 960 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP 961 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and 962 * uses extended struct MR_FW_RAID_MAP_EXT 963 */ 964 static int 965 megasas_get_ld_map_info(struct megasas_instance *instance) 966 { 967 int ret = 0; 968 struct megasas_cmd *cmd; 969 struct megasas_dcmd_frame *dcmd; 970 void *ci; 971 dma_addr_t ci_h = 0; 972 u32 size_map_info; 973 struct fusion_context *fusion; 974 975 cmd = megasas_get_cmd(instance); 976 977 if (!cmd) { 978 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); 979 return -ENOMEM; 980 } 981 982 fusion = instance->ctrl_context; 983 984 if (!fusion) { 985 megasas_return_cmd(instance, cmd); 986 return -ENXIO; 987 } 988 989 dcmd = &cmd->frame->dcmd; 990 991 size_map_info = fusion->current_map_sz; 992 993 ci = (void *) fusion->ld_map[(instance->map_id & 1)]; 994 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 995 996 if (!ci) { 997 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n"); 998 megasas_return_cmd(instance, cmd); 999 return -ENOMEM; 1000 } 1001 1002 memset(ci, 0, fusion->max_map_sz); 1003 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1004 #if VD_EXT_DEBUG 1005 dev_dbg(&instance->pdev->dev, 1006 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n", 1007 __func__, cpu_to_le32(size_map_info)); 1008 #endif 1009 dcmd->cmd = MFI_CMD_DCMD; 1010 dcmd->cmd_status = 0xFF; 1011 dcmd->sge_count = 1; 1012 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 1013 dcmd->timeout = 0; 1014 dcmd->pad_0 = 0; 1015 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1016 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1017 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 1018 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 1019 1020 if (instance->ctrl_context && !instance->mask_interrupts) 1021 ret = megasas_issue_blocked_cmd(instance, cmd, 1022 MFI_IO_TIMEOUT_SECS); 1023 else 1024 ret = megasas_issue_polled(instance, cmd); 1025 1026 if (ret == DCMD_TIMEOUT && instance->ctrl_context) 1027 megaraid_sas_kill_hba(instance); 1028 1029 megasas_return_cmd(instance, cmd); 1030 1031 return ret; 1032 } 1033 1034 u8 1035 megasas_get_map_info(struct megasas_instance *instance) 1036 { 1037 struct fusion_context *fusion = instance->ctrl_context; 1038 1039 fusion->fast_path_io = 0; 1040 if (!megasas_get_ld_map_info(instance)) { 1041 if (MR_ValidateMapInfo(instance)) { 1042 fusion->fast_path_io = 1; 1043 return 0; 1044 } 1045 } 1046 return 1; 1047 } 1048 1049 /* 1050 * megasas_sync_map_info - Returns FW's ld_map structure 1051 * @instance: Adapter soft state 1052 * 1053 * Issues an internal command (DCMD) to get the FW's controller PD 1054 * list structure. This information is mainly used to find out SYSTEM 1055 * supported by the FW. 1056 */ 1057 int 1058 megasas_sync_map_info(struct megasas_instance *instance) 1059 { 1060 int ret = 0, i; 1061 struct megasas_cmd *cmd; 1062 struct megasas_dcmd_frame *dcmd; 1063 u32 size_sync_info, num_lds; 1064 struct fusion_context *fusion; 1065 struct MR_LD_TARGET_SYNC *ci = NULL; 1066 struct MR_DRV_RAID_MAP_ALL *map; 1067 struct MR_LD_RAID *raid; 1068 struct MR_LD_TARGET_SYNC *ld_sync; 1069 dma_addr_t ci_h = 0; 1070 u32 size_map_info; 1071 1072 cmd = megasas_get_cmd(instance); 1073 1074 if (!cmd) { 1075 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); 1076 return -ENOMEM; 1077 } 1078 1079 fusion = instance->ctrl_context; 1080 1081 if (!fusion) { 1082 megasas_return_cmd(instance, cmd); 1083 return 1; 1084 } 1085 1086 map = fusion->ld_drv_map[instance->map_id & 1]; 1087 1088 num_lds = le16_to_cpu(map->raidMap.ldCount); 1089 1090 dcmd = &cmd->frame->dcmd; 1091 1092 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds; 1093 1094 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1095 1096 ci = (struct MR_LD_TARGET_SYNC *) 1097 fusion->ld_map[(instance->map_id - 1) & 1]; 1098 memset(ci, 0, fusion->max_map_sz); 1099 1100 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 1101 1102 ld_sync = (struct MR_LD_TARGET_SYNC *)ci; 1103 1104 for (i = 0; i < num_lds; i++, ld_sync++) { 1105 raid = MR_LdRaidGet(i, map); 1106 ld_sync->targetId = MR_GetLDTgtId(i, map); 1107 ld_sync->seqNum = raid->seqNum; 1108 } 1109 1110 size_map_info = fusion->current_map_sz; 1111 1112 dcmd->cmd = MFI_CMD_DCMD; 1113 dcmd->cmd_status = 0xFF; 1114 dcmd->sge_count = 1; 1115 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); 1116 dcmd->timeout = 0; 1117 dcmd->pad_0 = 0; 1118 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1119 dcmd->mbox.b[0] = num_lds; 1120 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1121 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1122 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 1123 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 1124 1125 instance->map_update_cmd = cmd; 1126 1127 instance->instancet->issue_dcmd(instance, cmd); 1128 1129 return ret; 1130 } 1131 1132 /* 1133 * meagasas_display_intel_branding - Display branding string 1134 * @instance: per adapter object 1135 * 1136 * Return nothing. 1137 */ 1138 static void 1139 megasas_display_intel_branding(struct megasas_instance *instance) 1140 { 1141 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 1142 return; 1143 1144 switch (instance->pdev->device) { 1145 case PCI_DEVICE_ID_LSI_INVADER: 1146 switch (instance->pdev->subsystem_device) { 1147 case MEGARAID_INTEL_RS3DC080_SSDID: 1148 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1149 instance->host->host_no, 1150 MEGARAID_INTEL_RS3DC080_BRANDING); 1151 break; 1152 case MEGARAID_INTEL_RS3DC040_SSDID: 1153 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1154 instance->host->host_no, 1155 MEGARAID_INTEL_RS3DC040_BRANDING); 1156 break; 1157 case MEGARAID_INTEL_RS3SC008_SSDID: 1158 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1159 instance->host->host_no, 1160 MEGARAID_INTEL_RS3SC008_BRANDING); 1161 break; 1162 case MEGARAID_INTEL_RS3MC044_SSDID: 1163 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1164 instance->host->host_no, 1165 MEGARAID_INTEL_RS3MC044_BRANDING); 1166 break; 1167 default: 1168 break; 1169 } 1170 break; 1171 case PCI_DEVICE_ID_LSI_FURY: 1172 switch (instance->pdev->subsystem_device) { 1173 case MEGARAID_INTEL_RS3WC080_SSDID: 1174 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1175 instance->host->host_no, 1176 MEGARAID_INTEL_RS3WC080_BRANDING); 1177 break; 1178 case MEGARAID_INTEL_RS3WC040_SSDID: 1179 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1180 instance->host->host_no, 1181 MEGARAID_INTEL_RS3WC040_BRANDING); 1182 break; 1183 default: 1184 break; 1185 } 1186 break; 1187 case PCI_DEVICE_ID_LSI_CUTLASS_52: 1188 case PCI_DEVICE_ID_LSI_CUTLASS_53: 1189 switch (instance->pdev->subsystem_device) { 1190 case MEGARAID_INTEL_RMS3BC160_SSDID: 1191 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 1192 instance->host->host_no, 1193 MEGARAID_INTEL_RMS3BC160_BRANDING); 1194 break; 1195 default: 1196 break; 1197 } 1198 break; 1199 default: 1200 break; 1201 } 1202 } 1203 1204 /** 1205 * megasas_init_adapter_fusion - Initializes the FW 1206 * @instance: Adapter soft state 1207 * 1208 * This is the main function for initializing firmware. 1209 */ 1210 u32 1211 megasas_init_adapter_fusion(struct megasas_instance *instance) 1212 { 1213 struct megasas_register_set __iomem *reg_set; 1214 struct fusion_context *fusion; 1215 u32 max_cmd, scratch_pad_2; 1216 int i = 0, count; 1217 1218 fusion = instance->ctrl_context; 1219 1220 reg_set = instance->reg_set; 1221 1222 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); 1223 1224 /* 1225 * Reduce the max supported cmds by 1. This is to ensure that the 1226 * reply_q_sz (1 more than the max cmd that driver may send) 1227 * does not exceed max cmds that the FW can support 1228 */ 1229 instance->max_fw_cmds = instance->max_fw_cmds-1; 1230 1231 /* 1232 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames 1233 */ 1234 instance->max_mfi_cmds = 1235 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; 1236 1237 max_cmd = instance->max_fw_cmds; 1238 1239 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); 1240 1241 fusion->request_alloc_sz = 1242 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; 1243 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) 1244 *(fusion->reply_q_depth); 1245 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1246 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * 1247 (max_cmd + 1)); /* Extra 1 for SMID 0 */ 1248 1249 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2); 1250 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1251 * Firmware support extended IO chain frame which is 4 times more than 1252 * legacy Firmware. 1253 * Legacy Firmware - Frame size is (8 * 128) = 1K 1254 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 1255 */ 1256 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 1257 instance->max_chain_frame_sz = 1258 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1259 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; 1260 else 1261 instance->max_chain_frame_sz = 1262 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1263 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; 1264 1265 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { 1266 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n", 1267 instance->max_chain_frame_sz, 1268 MEGASAS_CHAIN_FRAME_SZ_MIN); 1269 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN; 1270 } 1271 1272 fusion->max_sge_in_main_msg = 1273 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 1274 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; 1275 1276 fusion->max_sge_in_chain = 1277 instance->max_chain_frame_sz 1278 / sizeof(union MPI2_SGE_IO_UNION); 1279 1280 instance->max_num_sge = 1281 rounddown_pow_of_two(fusion->max_sge_in_main_msg 1282 + fusion->max_sge_in_chain - 2); 1283 1284 /* Used for pass thru MFI frame (DCMD) */ 1285 fusion->chain_offset_mfi_pthru = 1286 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; 1287 1288 fusion->chain_offset_io_request = 1289 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1290 sizeof(union MPI2_SGE_IO_UNION))/16; 1291 1292 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 1293 for (i = 0 ; i < count; i++) 1294 fusion->last_reply_idx[i] = 0; 1295 1296 /* 1297 * For fusion adapters, 3 commands for IOCTL and 5 commands 1298 * for driver's internal DCMDs. 1299 */ 1300 instance->max_scsi_cmds = instance->max_fw_cmds - 1301 (MEGASAS_FUSION_INTERNAL_CMDS + 1302 MEGASAS_FUSION_IOCTL_CMDS); 1303 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); 1304 1305 /* 1306 * Allocate memory for descriptors 1307 * Create a pool of commands 1308 */ 1309 if (megasas_alloc_cmds(instance)) 1310 goto fail_alloc_mfi_cmds; 1311 if (megasas_alloc_cmds_fusion(instance)) 1312 goto fail_alloc_cmds; 1313 1314 if (megasas_ioc_init_fusion(instance)) 1315 goto fail_ioc_init; 1316 1317 megasas_display_intel_branding(instance); 1318 if (megasas_get_ctrl_info(instance)) { 1319 dev_err(&instance->pdev->dev, 1320 "Could not get controller info. Fail from %s %d\n", 1321 __func__, __LINE__); 1322 goto fail_ioc_init; 1323 } 1324 1325 instance->flag_ieee = 1; 1326 fusion->fast_path_io = 0; 1327 1328 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1329 for (i = 0; i < 2; i++) { 1330 fusion->ld_map[i] = NULL; 1331 fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL, 1332 fusion->drv_map_pages); 1333 if (!fusion->ld_drv_map[i]) { 1334 dev_err(&instance->pdev->dev, "Could not allocate " 1335 "memory for local map info for %d pages\n", 1336 fusion->drv_map_pages); 1337 if (i == 1) 1338 free_pages((ulong)fusion->ld_drv_map[0], 1339 fusion->drv_map_pages); 1340 goto fail_ioc_init; 1341 } 1342 memset(fusion->ld_drv_map[i], 0, 1343 ((1 << PAGE_SHIFT) << fusion->drv_map_pages)); 1344 } 1345 1346 for (i = 0; i < 2; i++) { 1347 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1348 fusion->max_map_sz, 1349 &fusion->ld_map_phys[i], 1350 GFP_KERNEL); 1351 if (!fusion->ld_map[i]) { 1352 dev_err(&instance->pdev->dev, "Could not allocate memory " 1353 "for map info\n"); 1354 goto fail_map_info; 1355 } 1356 } 1357 1358 if (!megasas_get_map_info(instance)) 1359 megasas_sync_map_info(instance); 1360 1361 return 0; 1362 1363 fail_map_info: 1364 if (i == 1) 1365 dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, 1366 fusion->ld_map[0], fusion->ld_map_phys[0]); 1367 fail_ioc_init: 1368 megasas_free_cmds_fusion(instance); 1369 fail_alloc_cmds: 1370 megasas_free_cmds(instance); 1371 fail_alloc_mfi_cmds: 1372 return 1; 1373 } 1374 1375 /** 1376 * map_cmd_status - Maps FW cmd status to OS cmd status 1377 * @cmd : Pointer to cmd 1378 * @status : status of cmd returned by FW 1379 * @ext_status : ext status of cmd returned by FW 1380 */ 1381 1382 void 1383 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) 1384 { 1385 1386 switch (status) { 1387 1388 case MFI_STAT_OK: 1389 cmd->scmd->result = DID_OK << 16; 1390 break; 1391 1392 case MFI_STAT_SCSI_IO_FAILED: 1393 case MFI_STAT_LD_INIT_IN_PROGRESS: 1394 cmd->scmd->result = (DID_ERROR << 16) | ext_status; 1395 break; 1396 1397 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1398 1399 cmd->scmd->result = (DID_OK << 16) | ext_status; 1400 if (ext_status == SAM_STAT_CHECK_CONDITION) { 1401 memset(cmd->scmd->sense_buffer, 0, 1402 SCSI_SENSE_BUFFERSIZE); 1403 memcpy(cmd->scmd->sense_buffer, cmd->sense, 1404 SCSI_SENSE_BUFFERSIZE); 1405 cmd->scmd->result |= DRIVER_SENSE << 24; 1406 } 1407 break; 1408 1409 case MFI_STAT_LD_OFFLINE: 1410 case MFI_STAT_DEVICE_NOT_FOUND: 1411 cmd->scmd->result = DID_BAD_TARGET << 16; 1412 break; 1413 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1414 cmd->scmd->result = DID_IMM_RETRY << 16; 1415 break; 1416 default: 1417 dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status); 1418 cmd->scmd->result = DID_ERROR << 16; 1419 break; 1420 } 1421 } 1422 1423 /** 1424 * megasas_make_sgl_fusion - Prepares 32-bit SGL 1425 * @instance: Adapter soft state 1426 * @scp: SCSI command from the mid-layer 1427 * @sgl_ptr: SGL to be filled in 1428 * @cmd: cmd we are working on 1429 * 1430 * If successful, this function returns the number of SG elements. 1431 */ 1432 static int 1433 megasas_make_sgl_fusion(struct megasas_instance *instance, 1434 struct scsi_cmnd *scp, 1435 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 1436 struct megasas_cmd_fusion *cmd) 1437 { 1438 int i, sg_processed, sge_count; 1439 struct scatterlist *os_sgl; 1440 struct fusion_context *fusion; 1441 1442 fusion = instance->ctrl_context; 1443 1444 if (fusion->adapter_type == INVADER_SERIES) { 1445 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 1446 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 1447 sgl_ptr_end->Flags = 0; 1448 } 1449 1450 sge_count = scsi_dma_map(scp); 1451 1452 BUG_ON(sge_count < 0); 1453 1454 if (sge_count > instance->max_num_sge || !sge_count) 1455 return sge_count; 1456 1457 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1458 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 1459 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 1460 sgl_ptr->Flags = 0; 1461 if (fusion->adapter_type == INVADER_SERIES) 1462 if (i == sge_count - 1) 1463 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1464 sgl_ptr++; 1465 1466 sg_processed = i + 1; 1467 1468 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 1469 (sge_count > fusion->max_sge_in_main_msg)) { 1470 1471 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 1472 if (fusion->adapter_type == INVADER_SERIES) { 1473 if ((le16_to_cpu(cmd->io_request->IoFlags) & 1474 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1475 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1476 cmd->io_request->ChainOffset = 1477 fusion-> 1478 chain_offset_io_request; 1479 else 1480 cmd->io_request->ChainOffset = 0; 1481 } else 1482 cmd->io_request->ChainOffset = 1483 fusion->chain_offset_io_request; 1484 1485 sg_chain = sgl_ptr; 1486 /* Prepare chain element */ 1487 sg_chain->NextChainOffset = 0; 1488 if (fusion->adapter_type == INVADER_SERIES) 1489 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1490 else 1491 sg_chain->Flags = 1492 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1493 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1494 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 1495 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 1496 1497 sgl_ptr = 1498 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 1499 memset(sgl_ptr, 0, instance->max_chain_frame_sz); 1500 } 1501 } 1502 1503 return sge_count; 1504 } 1505 1506 /** 1507 * megasas_set_pd_lba - Sets PD LBA 1508 * @cdb: CDB 1509 * @cdb_len: cdb length 1510 * @start_blk: Start block of IO 1511 * 1512 * Used to set the PD LBA in CDB for FP IOs 1513 */ 1514 void 1515 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 1516 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 1517 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 1518 { 1519 struct MR_LD_RAID *raid; 1520 u32 ld; 1521 u64 start_blk = io_info->pdBlock; 1522 u8 *cdb = io_request->CDB.CDB32; 1523 u32 num_blocks = io_info->numBlocks; 1524 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1525 1526 /* Check if T10 PI (DIF) is enabled for this LD */ 1527 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1528 raid = MR_LdRaidGet(ld, local_map_ptr); 1529 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1530 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1531 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 1532 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 1533 1534 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1535 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 1536 else 1537 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 1538 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; 1539 1540 /* LBA */ 1541 cdb[12] = (u8)((start_blk >> 56) & 0xff); 1542 cdb[13] = (u8)((start_blk >> 48) & 0xff); 1543 cdb[14] = (u8)((start_blk >> 40) & 0xff); 1544 cdb[15] = (u8)((start_blk >> 32) & 0xff); 1545 cdb[16] = (u8)((start_blk >> 24) & 0xff); 1546 cdb[17] = (u8)((start_blk >> 16) & 0xff); 1547 cdb[18] = (u8)((start_blk >> 8) & 0xff); 1548 cdb[19] = (u8)(start_blk & 0xff); 1549 1550 /* Logical block reference tag */ 1551 io_request->CDB.EEDP32.PrimaryReferenceTag = 1552 cpu_to_be32(ref_tag); 1553 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); 1554 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 1555 1556 /* Transfer length */ 1557 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 1558 cdb[29] = (u8)((num_blocks >> 16) & 0xff); 1559 cdb[30] = (u8)((num_blocks >> 8) & 0xff); 1560 cdb[31] = (u8)(num_blocks & 0xff); 1561 1562 /* set SCSI IO EEDPFlags */ 1563 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { 1564 io_request->EEDPFlags = cpu_to_le16( 1565 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1566 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1567 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1568 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1569 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1570 } else { 1571 io_request->EEDPFlags = cpu_to_le16( 1572 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1573 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1574 } 1575 io_request->Control |= cpu_to_le32((0x4 << 26)); 1576 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 1577 } else { 1578 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1579 if (((cdb_len == 12) || (cdb_len == 16)) && 1580 (start_blk <= 0xffffffff)) { 1581 if (cdb_len == 16) { 1582 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1583 flagvals = cdb[1]; 1584 groupnum = cdb[14]; 1585 control = cdb[15]; 1586 } else { 1587 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1588 flagvals = cdb[1]; 1589 groupnum = cdb[10]; 1590 control = cdb[11]; 1591 } 1592 1593 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1594 1595 cdb[0] = opcode; 1596 cdb[1] = flagvals; 1597 cdb[6] = groupnum; 1598 cdb[9] = control; 1599 1600 /* Transfer length */ 1601 cdb[8] = (u8)(num_blocks & 0xff); 1602 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1603 1604 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 1605 cdb_len = 10; 1606 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1607 /* Convert to 16 byte CDB for large LBA's */ 1608 switch (cdb_len) { 1609 case 6: 1610 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1611 control = cdb[5]; 1612 break; 1613 case 10: 1614 opcode = 1615 cdb[0] == READ_10 ? READ_16 : WRITE_16; 1616 flagvals = cdb[1]; 1617 groupnum = cdb[6]; 1618 control = cdb[9]; 1619 break; 1620 case 12: 1621 opcode = 1622 cdb[0] == READ_12 ? READ_16 : WRITE_16; 1623 flagvals = cdb[1]; 1624 groupnum = cdb[10]; 1625 control = cdb[11]; 1626 break; 1627 } 1628 1629 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1630 1631 cdb[0] = opcode; 1632 cdb[1] = flagvals; 1633 cdb[14] = groupnum; 1634 cdb[15] = control; 1635 1636 /* Transfer length */ 1637 cdb[13] = (u8)(num_blocks & 0xff); 1638 cdb[12] = (u8)((num_blocks >> 8) & 0xff); 1639 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 1640 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 1641 1642 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 1643 cdb_len = 16; 1644 } 1645 1646 /* Normal case, just load LBA here */ 1647 switch (cdb_len) { 1648 case 6: 1649 { 1650 u8 val = cdb[1] & 0xE0; 1651 cdb[3] = (u8)(start_blk & 0xff); 1652 cdb[2] = (u8)((start_blk >> 8) & 0xff); 1653 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); 1654 break; 1655 } 1656 case 10: 1657 cdb[5] = (u8)(start_blk & 0xff); 1658 cdb[4] = (u8)((start_blk >> 8) & 0xff); 1659 cdb[3] = (u8)((start_blk >> 16) & 0xff); 1660 cdb[2] = (u8)((start_blk >> 24) & 0xff); 1661 break; 1662 case 12: 1663 cdb[5] = (u8)(start_blk & 0xff); 1664 cdb[4] = (u8)((start_blk >> 8) & 0xff); 1665 cdb[3] = (u8)((start_blk >> 16) & 0xff); 1666 cdb[2] = (u8)((start_blk >> 24) & 0xff); 1667 break; 1668 case 16: 1669 cdb[9] = (u8)(start_blk & 0xff); 1670 cdb[8] = (u8)((start_blk >> 8) & 0xff); 1671 cdb[7] = (u8)((start_blk >> 16) & 0xff); 1672 cdb[6] = (u8)((start_blk >> 24) & 0xff); 1673 cdb[5] = (u8)((start_blk >> 32) & 0xff); 1674 cdb[4] = (u8)((start_blk >> 40) & 0xff); 1675 cdb[3] = (u8)((start_blk >> 48) & 0xff); 1676 cdb[2] = (u8)((start_blk >> 56) & 0xff); 1677 break; 1678 } 1679 } 1680 } 1681 1682 /** 1683 * megasas_build_ldio_fusion - Prepares IOs to devices 1684 * @instance: Adapter soft state 1685 * @scp: SCSI command 1686 * @cmd: Command to be prepared 1687 * 1688 * Prepares the io_request and chain elements (sg_frame) for IO 1689 * The IO can be for PD (Fast Path) or LD 1690 */ 1691 void 1692 megasas_build_ldio_fusion(struct megasas_instance *instance, 1693 struct scsi_cmnd *scp, 1694 struct megasas_cmd_fusion *cmd) 1695 { 1696 u8 fp_possible; 1697 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 1698 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1699 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1700 struct IO_REQUEST_INFO io_info; 1701 struct fusion_context *fusion; 1702 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1703 u8 *raidLUN; 1704 1705 device_id = MEGASAS_DEV_INDEX(scp); 1706 1707 fusion = instance->ctrl_context; 1708 1709 io_request = cmd->io_request; 1710 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 1711 io_request->RaidContext.status = 0; 1712 io_request->RaidContext.exStatus = 0; 1713 1714 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 1715 1716 start_lba_lo = 0; 1717 start_lba_hi = 0; 1718 fp_possible = 0; 1719 1720 /* 1721 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1722 */ 1723 if (scp->cmd_len == 6) { 1724 datalength = (u32) scp->cmnd[4]; 1725 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1726 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1727 1728 start_lba_lo &= 0x1FFFFF; 1729 } 1730 1731 /* 1732 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1733 */ 1734 else if (scp->cmd_len == 10) { 1735 datalength = (u32) scp->cmnd[8] | 1736 ((u32) scp->cmnd[7] << 8); 1737 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1738 ((u32) scp->cmnd[3] << 16) | 1739 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1740 } 1741 1742 /* 1743 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1744 */ 1745 else if (scp->cmd_len == 12) { 1746 datalength = ((u32) scp->cmnd[6] << 24) | 1747 ((u32) scp->cmnd[7] << 16) | 1748 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1749 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1750 ((u32) scp->cmnd[3] << 16) | 1751 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1752 } 1753 1754 /* 1755 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1756 */ 1757 else if (scp->cmd_len == 16) { 1758 datalength = ((u32) scp->cmnd[10] << 24) | 1759 ((u32) scp->cmnd[11] << 16) | 1760 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1761 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1762 ((u32) scp->cmnd[7] << 16) | 1763 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1764 1765 start_lba_hi = ((u32) scp->cmnd[2] << 24) | 1766 ((u32) scp->cmnd[3] << 16) | 1767 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1768 } 1769 1770 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1771 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1772 io_info.numBlocks = datalength; 1773 io_info.ldTgtId = device_id; 1774 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); 1775 1776 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1777 io_info.isRead = 1; 1778 1779 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1780 1781 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= 1782 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { 1783 io_request->RaidContext.regLockFlags = 0; 1784 fp_possible = 0; 1785 } else { 1786 if (MR_BuildRaidContext(instance, &io_info, 1787 &io_request->RaidContext, 1788 local_map_ptr, &raidLUN)) 1789 fp_possible = io_info.fpOkForIo; 1790 } 1791 1792 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU 1793 id by default, not CPU group id, otherwise all MSI-X queues won't 1794 be utilized */ 1795 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? 1796 raw_smp_processor_id() % instance->msix_vectors : 0; 1797 1798 if (fp_possible) { 1799 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 1800 local_map_ptr, start_lba_lo); 1801 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1802 cmd->request_desc->SCSIIO.RequestFlags = 1803 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 1804 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1805 if (fusion->adapter_type == INVADER_SERIES) { 1806 if (io_request->RaidContext.regLockFlags == 1807 REGION_TYPE_UNUSED) 1808 cmd->request_desc->SCSIIO.RequestFlags = 1809 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1810 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1811 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1812 io_request->RaidContext.nseg = 0x1; 1813 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1814 io_request->RaidContext.regLockFlags |= 1815 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1816 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1817 } 1818 if ((fusion->load_balance_info[device_id].loadBalanceFlag) && 1819 (io_info.isRead)) { 1820 io_info.devHandle = 1821 get_updated_dev_handle(instance, 1822 &fusion->load_balance_info[device_id], 1823 &io_info); 1824 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 1825 cmd->pd_r1_lb = io_info.pd_after_lb; 1826 } else 1827 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1828 1829 if ((raidLUN[0] == 1) && 1830 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { 1831 instance->dev_handle = !(instance->dev_handle); 1832 io_info.devHandle = 1833 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; 1834 } 1835 1836 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1837 io_request->DevHandle = io_info.devHandle; 1838 /* populate the LUN field */ 1839 memcpy(io_request->LUN, raidLUN, 8); 1840 } else { 1841 io_request->RaidContext.timeoutValue = 1842 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 1843 cmd->request_desc->SCSIIO.RequestFlags = 1844 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 1845 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1846 if (fusion->adapter_type == INVADER_SERIES) { 1847 if (io_info.do_fp_rlbypass || 1848 (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)) 1849 cmd->request_desc->SCSIIO.RequestFlags = 1850 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1851 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1852 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1853 io_request->RaidContext.regLockFlags |= 1854 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 1855 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1856 io_request->RaidContext.nseg = 0x1; 1857 } 1858 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1859 io_request->DevHandle = cpu_to_le16(device_id); 1860 } /* Not FP */ 1861 } 1862 1863 /** 1864 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk 1865 * @instance: Adapter soft state 1866 * @scp: SCSI command 1867 * @cmd: Command to be prepared 1868 * 1869 * Prepares the io_request frame for non-rw io cmds for vd. 1870 */ 1871 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, 1872 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) 1873 { 1874 u32 device_id; 1875 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1876 u16 pd_index = 0; 1877 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1878 struct fusion_context *fusion = instance->ctrl_context; 1879 u8 span, physArm; 1880 __le16 devHandle; 1881 u32 ld, arRef, pd; 1882 struct MR_LD_RAID *raid; 1883 struct RAID_CONTEXT *pRAID_Context; 1884 u8 fp_possible = 1; 1885 1886 io_request = cmd->io_request; 1887 device_id = MEGASAS_DEV_INDEX(scmd); 1888 pd_index = MEGASAS_PD_INDEX(scmd); 1889 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1890 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1891 /* get RAID_Context pointer */ 1892 pRAID_Context = &io_request->RaidContext; 1893 /* Check with FW team */ 1894 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1895 pRAID_Context->regLockRowLBA = 0; 1896 pRAID_Context->regLockLength = 0; 1897 1898 if (fusion->fast_path_io && ( 1899 device_id < instance->fw_supported_vd_count)) { 1900 1901 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1902 if (ld >= instance->fw_supported_vd_count) 1903 fp_possible = 0; 1904 1905 raid = MR_LdRaidGet(ld, local_map_ptr); 1906 if (!(raid->capability.fpNonRWCapable)) 1907 fp_possible = 0; 1908 } else 1909 fp_possible = 0; 1910 1911 if (!fp_possible) { 1912 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1913 io_request->DevHandle = cpu_to_le16(device_id); 1914 io_request->LUN[1] = scmd->device->lun; 1915 pRAID_Context->timeoutValue = 1916 cpu_to_le16 (scmd->request->timeout / HZ); 1917 cmd->request_desc->SCSIIO.RequestFlags = 1918 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1919 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1920 } else { 1921 1922 /* set RAID context values */ 1923 pRAID_Context->configSeqNum = raid->seqNum; 1924 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1925 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); 1926 1927 /* get the DevHandle for the PD (since this is 1928 fpNonRWCapable, this is a single disk RAID0) */ 1929 span = physArm = 0; 1930 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 1931 pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 1932 devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 1933 1934 /* build request descriptor */ 1935 cmd->request_desc->SCSIIO.RequestFlags = 1936 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1937 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1938 cmd->request_desc->SCSIIO.DevHandle = devHandle; 1939 1940 /* populate the LUN field */ 1941 memcpy(io_request->LUN, raid->LUN, 8); 1942 1943 /* build the raidScsiIO structure */ 1944 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1945 io_request->DevHandle = devHandle; 1946 } 1947 } 1948 1949 /** 1950 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd 1951 * @instance: Adapter soft state 1952 * @scp: SCSI command 1953 * @cmd: Command to be prepared 1954 * @fp_possible: parameter to detect fast path or firmware path io. 1955 * 1956 * Prepares the io_request frame for rw/non-rw io cmds for syspds 1957 */ 1958 static void 1959 megasas_build_syspd_fusion(struct megasas_instance *instance, 1960 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible) 1961 { 1962 u32 device_id; 1963 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1964 u16 pd_index = 0; 1965 u16 os_timeout_value; 1966 u16 timeout_limit; 1967 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1968 struct RAID_CONTEXT *pRAID_Context; 1969 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1970 struct fusion_context *fusion = instance->ctrl_context; 1971 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; 1972 1973 device_id = MEGASAS_DEV_INDEX(scmd); 1974 pd_index = MEGASAS_PD_INDEX(scmd); 1975 os_timeout_value = scmd->request->timeout / HZ; 1976 1977 io_request = cmd->io_request; 1978 /* get RAID_Context pointer */ 1979 pRAID_Context = &io_request->RaidContext; 1980 pRAID_Context->regLockFlags = 0; 1981 pRAID_Context->regLockRowLBA = 0; 1982 pRAID_Context->regLockLength = 0; 1983 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1984 io_request->LUN[1] = scmd->device->lun; 1985 pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 1986 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1987 1988 /* If FW supports PD sequence number */ 1989 if (instance->use_seqnum_jbod_fp && 1990 instance->pd_list[pd_index].driveType == TYPE_DISK) { 1991 /* TgtId must be incremented by 255 as jbod seq number is index 1992 * below raid map 1993 */ 1994 pRAID_Context->VirtualDiskTgtId = 1995 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); 1996 pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum; 1997 io_request->DevHandle = pd_sync->seq[pd_index].devHandle; 1998 pRAID_Context->regLockFlags |= 1999 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 2000 } else if (fusion->fast_path_io) { 2001 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2002 pRAID_Context->configSeqNum = 0; 2003 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2004 io_request->DevHandle = 2005 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 2006 } else { 2007 /* Want to send all IO via FW path */ 2008 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2009 pRAID_Context->configSeqNum = 0; 2010 io_request->DevHandle = cpu_to_le16(0xFFFF); 2011 } 2012 2013 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 2014 cmd->request_desc->SCSIIO.MSIxIndex = 2015 instance->msix_vectors ? 2016 (raw_smp_processor_id() % instance->msix_vectors) : 0; 2017 2018 2019 if (!fp_possible) { 2020 /* system pd firmware path */ 2021 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2022 cmd->request_desc->SCSIIO.RequestFlags = 2023 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2024 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2025 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); 2026 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2027 } else { 2028 /* system pd Fast Path */ 2029 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2030 timeout_limit = (scmd->device->type == TYPE_DISK) ? 2031 255 : 0xFFFF; 2032 pRAID_Context->timeoutValue = 2033 cpu_to_le16((os_timeout_value > timeout_limit) ? 2034 timeout_limit : os_timeout_value); 2035 if (fusion->adapter_type == INVADER_SERIES) { 2036 pRAID_Context->Type = MPI2_TYPE_CUDA; 2037 pRAID_Context->nseg = 0x1; 2038 io_request->IoFlags |= 2039 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2040 } 2041 cmd->request_desc->SCSIIO.RequestFlags = 2042 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 2043 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2044 } 2045 } 2046 2047 /** 2048 * megasas_build_io_fusion - Prepares IOs to devices 2049 * @instance: Adapter soft state 2050 * @scp: SCSI command 2051 * @cmd: Command to be prepared 2052 * 2053 * Invokes helper functions to prepare request frames 2054 * and sets flags appropriate for IO/Non-IO cmd 2055 */ 2056 int 2057 megasas_build_io_fusion(struct megasas_instance *instance, 2058 struct scsi_cmnd *scp, 2059 struct megasas_cmd_fusion *cmd) 2060 { 2061 u16 sge_count; 2062 u8 cmd_type; 2063 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 2064 2065 /* Zero out some fields so they don't get reused */ 2066 memset(io_request->LUN, 0x0, 8); 2067 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 2068 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; 2069 io_request->EEDPFlags = 0; 2070 io_request->Control = 0; 2071 io_request->EEDPBlockSize = 0; 2072 io_request->ChainOffset = 0; 2073 io_request->RaidContext.RAIDFlags = 0; 2074 io_request->RaidContext.Type = 0; 2075 io_request->RaidContext.nseg = 0; 2076 2077 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 2078 /* 2079 * Just the CDB length,rest of the Flags are zero 2080 * This will be modified for FP in build_ldio_fusion 2081 */ 2082 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 2083 2084 switch (cmd_type = megasas_cmd_type(scp)) { 2085 case READ_WRITE_LDIO: 2086 megasas_build_ldio_fusion(instance, scp, cmd); 2087 break; 2088 case NON_READ_WRITE_LDIO: 2089 megasas_build_ld_nonrw_fusion(instance, scp, cmd); 2090 break; 2091 case READ_WRITE_SYSPDIO: 2092 case NON_READ_WRITE_SYSPDIO: 2093 if (instance->secure_jbod_support && 2094 (cmd_type == NON_READ_WRITE_SYSPDIO)) 2095 megasas_build_syspd_fusion(instance, scp, cmd, 0); 2096 else 2097 megasas_build_syspd_fusion(instance, scp, cmd, 1); 2098 break; 2099 default: 2100 break; 2101 } 2102 2103 /* 2104 * Construct SGL 2105 */ 2106 2107 sge_count = 2108 megasas_make_sgl_fusion(instance, scp, 2109 (struct MPI25_IEEE_SGE_CHAIN64 *) 2110 &io_request->SGL, cmd); 2111 2112 if (sge_count > instance->max_num_sge) { 2113 dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds " 2114 "max (0x%x) allowed\n", sge_count, 2115 instance->max_num_sge); 2116 return 1; 2117 } 2118 2119 /* numSGE store lower 8 bit of sge_count. 2120 * numSGEExt store higher 8 bit of sge_count 2121 */ 2122 io_request->RaidContext.numSGE = sge_count; 2123 io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8); 2124 2125 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 2126 2127 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 2128 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 2129 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 2130 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 2131 2132 io_request->SGLOffset0 = 2133 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 2134 2135 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); 2136 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 2137 2138 cmd->scmd = scp; 2139 scp->SCp.ptr = (char *)cmd; 2140 2141 return 0; 2142 } 2143 2144 union MEGASAS_REQUEST_DESCRIPTOR_UNION * 2145 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 2146 { 2147 u8 *p; 2148 struct fusion_context *fusion; 2149 2150 if (index >= instance->max_fw_cmds) { 2151 dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for " 2152 "descriptor for scsi%d\n", index, 2153 instance->host->host_no); 2154 return NULL; 2155 } 2156 fusion = instance->ctrl_context; 2157 p = fusion->req_frames_desc 2158 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; 2159 2160 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 2161 } 2162 2163 /** 2164 * megasas_build_and_issue_cmd_fusion -Main routine for building and 2165 * issuing non IOCTL cmd 2166 * @instance: Adapter soft state 2167 * @scmd: pointer to scsi cmd from OS 2168 */ 2169 static u32 2170 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 2171 struct scsi_cmnd *scmd) 2172 { 2173 struct megasas_cmd_fusion *cmd; 2174 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2175 u32 index; 2176 struct fusion_context *fusion; 2177 2178 fusion = instance->ctrl_context; 2179 2180 if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && 2181 instance->ldio_threshold && 2182 (atomic_inc_return(&instance->ldio_outstanding) > 2183 instance->ldio_threshold)) { 2184 atomic_dec(&instance->ldio_outstanding); 2185 return SCSI_MLQUEUE_DEVICE_BUSY; 2186 } 2187 2188 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); 2189 2190 index = cmd->index; 2191 2192 req_desc = megasas_get_request_descriptor(instance, index-1); 2193 if (!req_desc) 2194 return SCSI_MLQUEUE_HOST_BUSY; 2195 2196 req_desc->Words = 0; 2197 cmd->request_desc = req_desc; 2198 2199 if (megasas_build_io_fusion(instance, scmd, cmd)) { 2200 megasas_return_cmd_fusion(instance, cmd); 2201 dev_err(&instance->pdev->dev, "Error building command\n"); 2202 cmd->request_desc = NULL; 2203 return SCSI_MLQUEUE_HOST_BUSY; 2204 } 2205 2206 req_desc = cmd->request_desc; 2207 req_desc->SCSIIO.SMID = cpu_to_le16(index); 2208 2209 if (cmd->io_request->ChainOffset != 0 && 2210 cmd->io_request->ChainOffset != 0xF) 2211 dev_err(&instance->pdev->dev, "The chain offset value is not " 2212 "correct : %x\n", cmd->io_request->ChainOffset); 2213 2214 /* 2215 * Issue the command to the FW 2216 */ 2217 atomic_inc(&instance->fw_outstanding); 2218 2219 megasas_fire_cmd_fusion(instance, req_desc); 2220 2221 return 0; 2222 } 2223 2224 /** 2225 * complete_cmd_fusion - Completes command 2226 * @instance: Adapter soft state 2227 * Completes all commands that is in reply descriptor queue 2228 */ 2229 int 2230 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) 2231 { 2232 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 2233 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 2234 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 2235 struct fusion_context *fusion; 2236 struct megasas_cmd *cmd_mfi; 2237 struct megasas_cmd_fusion *cmd_fusion; 2238 u16 smid, num_completed; 2239 u8 reply_descript_type; 2240 u32 status, extStatus, device_id; 2241 union desc_value d_val; 2242 struct LD_LOAD_BALANCE_INFO *lbinfo; 2243 int threshold_reply_count = 0; 2244 struct scsi_cmnd *scmd_local = NULL; 2245 struct MR_TASK_MANAGE_REQUEST *mr_tm_req; 2246 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 2247 2248 fusion = instance->ctrl_context; 2249 2250 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2251 return IRQ_HANDLED; 2252 2253 desc = fusion->reply_frames_desc[MSIxIndex] + 2254 fusion->last_reply_idx[MSIxIndex]; 2255 2256 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 2257 2258 d_val.word = desc->Words; 2259 2260 reply_descript_type = reply_desc->ReplyFlags & 2261 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2262 2263 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 2264 return IRQ_NONE; 2265 2266 num_completed = 0; 2267 2268 while (d_val.u.low != cpu_to_le32(UINT_MAX) && 2269 d_val.u.high != cpu_to_le32(UINT_MAX)) { 2270 smid = le16_to_cpu(reply_desc->SMID); 2271 2272 cmd_fusion = fusion->cmd_list[smid - 1]; 2273 2274 scsi_io_req = 2275 (struct MPI2_RAID_SCSI_IO_REQUEST *) 2276 cmd_fusion->io_request; 2277 2278 if (cmd_fusion->scmd) 2279 cmd_fusion->scmd->SCp.ptr = NULL; 2280 2281 scmd_local = cmd_fusion->scmd; 2282 status = scsi_io_req->RaidContext.status; 2283 extStatus = scsi_io_req->RaidContext.exStatus; 2284 2285 switch (scsi_io_req->Function) { 2286 case MPI2_FUNCTION_SCSI_TASK_MGMT: 2287 mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *) 2288 cmd_fusion->io_request; 2289 mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) 2290 &mr_tm_req->TmRequest; 2291 dev_dbg(&instance->pdev->dev, "TM completion:" 2292 "type: 0x%x TaskMID: 0x%x\n", 2293 mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 2294 complete(&cmd_fusion->done); 2295 break; 2296 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 2297 /* Update load balancing info */ 2298 device_id = MEGASAS_DEV_INDEX(scmd_local); 2299 lbinfo = &fusion->load_balance_info[device_id]; 2300 if (cmd_fusion->scmd->SCp.Status & 2301 MEGASAS_LOAD_BALANCE_FLAG) { 2302 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 2303 cmd_fusion->scmd->SCp.Status &= 2304 ~MEGASAS_LOAD_BALANCE_FLAG; 2305 } 2306 if (reply_descript_type == 2307 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 2308 if (megasas_dbg_lvl == 5) 2309 dev_err(&instance->pdev->dev, "\nFAST Path " 2310 "IO Success\n"); 2311 } 2312 /* Fall thru and complete IO */ 2313 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 2314 /* Map the FW Cmd Status */ 2315 map_cmd_status(cmd_fusion, status, extStatus); 2316 scsi_io_req->RaidContext.status = 0; 2317 scsi_io_req->RaidContext.exStatus = 0; 2318 if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 2319 atomic_dec(&instance->ldio_outstanding); 2320 megasas_return_cmd_fusion(instance, cmd_fusion); 2321 scsi_dma_unmap(scmd_local); 2322 scmd_local->scsi_done(scmd_local); 2323 atomic_dec(&instance->fw_outstanding); 2324 2325 break; 2326 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 2327 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2328 2329 /* Poll mode. Dummy free. 2330 * In case of Interrupt mode, caller has reverse check. 2331 */ 2332 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { 2333 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; 2334 megasas_return_cmd(instance, cmd_mfi); 2335 } else 2336 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2337 break; 2338 } 2339 2340 fusion->last_reply_idx[MSIxIndex]++; 2341 if (fusion->last_reply_idx[MSIxIndex] >= 2342 fusion->reply_q_depth) 2343 fusion->last_reply_idx[MSIxIndex] = 0; 2344 2345 desc->Words = cpu_to_le64(ULLONG_MAX); 2346 num_completed++; 2347 threshold_reply_count++; 2348 2349 /* Get the next reply descriptor */ 2350 if (!fusion->last_reply_idx[MSIxIndex]) 2351 desc = fusion->reply_frames_desc[MSIxIndex]; 2352 else 2353 desc++; 2354 2355 reply_desc = 2356 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 2357 2358 d_val.word = desc->Words; 2359 2360 reply_descript_type = reply_desc->ReplyFlags & 2361 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2362 2363 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 2364 break; 2365 /* 2366 * Write to reply post host index register after completing threshold 2367 * number of reply counts and still there are more replies in reply queue 2368 * pending to be completed 2369 */ 2370 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 2371 if (fusion->adapter_type == INVADER_SERIES) 2372 writel(((MSIxIndex & 0x7) << 24) | 2373 fusion->last_reply_idx[MSIxIndex], 2374 instance->reply_post_host_index_addr[MSIxIndex/8]); 2375 else 2376 writel((MSIxIndex << 24) | 2377 fusion->last_reply_idx[MSIxIndex], 2378 instance->reply_post_host_index_addr[0]); 2379 threshold_reply_count = 0; 2380 } 2381 } 2382 2383 if (!num_completed) 2384 return IRQ_NONE; 2385 2386 wmb(); 2387 if (fusion->adapter_type == INVADER_SERIES) 2388 writel(((MSIxIndex & 0x7) << 24) | 2389 fusion->last_reply_idx[MSIxIndex], 2390 instance->reply_post_host_index_addr[MSIxIndex/8]); 2391 else 2392 writel((MSIxIndex << 24) | 2393 fusion->last_reply_idx[MSIxIndex], 2394 instance->reply_post_host_index_addr[0]); 2395 megasas_check_and_restore_queue_depth(instance); 2396 return IRQ_HANDLED; 2397 } 2398 2399 /** 2400 * megasas_complete_cmd_dpc_fusion - Completes command 2401 * @instance: Adapter soft state 2402 * 2403 * Tasklet to complete cmds 2404 */ 2405 void 2406 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 2407 { 2408 struct megasas_instance *instance = 2409 (struct megasas_instance *)instance_addr; 2410 unsigned long flags; 2411 u32 count, MSIxIndex; 2412 2413 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 2414 2415 /* If we have already declared adapter dead, donot complete cmds */ 2416 spin_lock_irqsave(&instance->hba_lock, flags); 2417 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2418 spin_unlock_irqrestore(&instance->hba_lock, flags); 2419 return; 2420 } 2421 spin_unlock_irqrestore(&instance->hba_lock, flags); 2422 2423 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 2424 complete_cmd_fusion(instance, MSIxIndex); 2425 } 2426 2427 /** 2428 * megasas_isr_fusion - isr entry point 2429 */ 2430 irqreturn_t megasas_isr_fusion(int irq, void *devp) 2431 { 2432 struct megasas_irq_context *irq_context = devp; 2433 struct megasas_instance *instance = irq_context->instance; 2434 u32 mfiStatus, fw_state, dma_state; 2435 2436 if (instance->mask_interrupts) 2437 return IRQ_NONE; 2438 2439 if (!instance->msix_vectors) { 2440 mfiStatus = instance->instancet->clear_intr(instance->reg_set); 2441 if (!mfiStatus) 2442 return IRQ_NONE; 2443 } 2444 2445 /* If we are resetting, bail */ 2446 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 2447 instance->instancet->clear_intr(instance->reg_set); 2448 return IRQ_HANDLED; 2449 } 2450 2451 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) { 2452 instance->instancet->clear_intr(instance->reg_set); 2453 /* If we didn't complete any commands, check for FW fault */ 2454 fw_state = instance->instancet->read_fw_status_reg( 2455 instance->reg_set) & MFI_STATE_MASK; 2456 dma_state = instance->instancet->read_fw_status_reg 2457 (instance->reg_set) & MFI_STATE_DMADONE; 2458 if (instance->crash_dump_drv_support && 2459 instance->crash_dump_app_support) { 2460 /* Start collecting crash, if DMA bit is done */ 2461 if ((fw_state == MFI_STATE_FAULT) && dma_state) 2462 schedule_work(&instance->crash_init); 2463 else if (fw_state == MFI_STATE_FAULT) 2464 schedule_work(&instance->work_init); 2465 } else if (fw_state == MFI_STATE_FAULT) { 2466 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt" 2467 "for scsi%d\n", instance->host->host_no); 2468 schedule_work(&instance->work_init); 2469 } 2470 } 2471 2472 return IRQ_HANDLED; 2473 } 2474 2475 /** 2476 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru 2477 * @instance: Adapter soft state 2478 * mfi_cmd: megasas_cmd pointer 2479 * 2480 */ 2481 u8 2482 build_mpt_mfi_pass_thru(struct megasas_instance *instance, 2483 struct megasas_cmd *mfi_cmd) 2484 { 2485 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 2486 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; 2487 struct megasas_cmd_fusion *cmd; 2488 struct fusion_context *fusion; 2489 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 2490 2491 fusion = instance->ctrl_context; 2492 2493 cmd = megasas_get_cmd_fusion(instance, 2494 instance->max_scsi_cmds + mfi_cmd->index); 2495 2496 /* Save the smid. To be used for returning the cmd */ 2497 mfi_cmd->context.smid = cmd->index; 2498 2499 /* 2500 * For cmds where the flag is set, store the flag and check 2501 * on completion. For cmds with this flag, don't call 2502 * megasas_complete_cmd 2503 */ 2504 2505 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) 2506 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; 2507 2508 io_req = cmd->io_request; 2509 2510 if (fusion->adapter_type == INVADER_SERIES) { 2511 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 2512 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 2513 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2514 sgl_ptr_end->Flags = 0; 2515 } 2516 2517 mpi25_ieee_chain = 2518 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 2519 2520 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 2521 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, 2522 SGL) / 4; 2523 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 2524 2525 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 2526 2527 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2528 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2529 2530 mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz); 2531 2532 return 0; 2533 } 2534 2535 /** 2536 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd 2537 * @instance: Adapter soft state 2538 * @cmd: mfi cmd to build 2539 * 2540 */ 2541 union MEGASAS_REQUEST_DESCRIPTOR_UNION * 2542 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 2543 { 2544 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2545 u16 index; 2546 2547 if (build_mpt_mfi_pass_thru(instance, cmd)) { 2548 dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n"); 2549 return NULL; 2550 } 2551 2552 index = cmd->context.smid; 2553 2554 req_desc = megasas_get_request_descriptor(instance, index - 1); 2555 2556 if (!req_desc) 2557 return NULL; 2558 2559 req_desc->Words = 0; 2560 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2561 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2562 2563 req_desc->SCSIIO.SMID = cpu_to_le16(index); 2564 2565 return req_desc; 2566 } 2567 2568 /** 2569 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd 2570 * @instance: Adapter soft state 2571 * @cmd: mfi cmd pointer 2572 * 2573 */ 2574 int 2575 megasas_issue_dcmd_fusion(struct megasas_instance *instance, 2576 struct megasas_cmd *cmd) 2577 { 2578 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2579 2580 req_desc = build_mpt_cmd(instance, cmd); 2581 if (!req_desc) { 2582 dev_info(&instance->pdev->dev, "Failed from %s %d\n", 2583 __func__, __LINE__); 2584 return DCMD_NOT_FIRED; 2585 } 2586 2587 megasas_fire_cmd_fusion(instance, req_desc); 2588 return DCMD_SUCCESS; 2589 } 2590 2591 /** 2592 * megasas_release_fusion - Reverses the FW initialization 2593 * @instance: Adapter soft state 2594 */ 2595 void 2596 megasas_release_fusion(struct megasas_instance *instance) 2597 { 2598 megasas_free_cmds(instance); 2599 megasas_free_cmds_fusion(instance); 2600 2601 iounmap(instance->reg_set); 2602 2603 pci_release_selected_regions(instance->pdev, instance->bar); 2604 } 2605 2606 /** 2607 * megasas_read_fw_status_reg_fusion - returns the current FW status value 2608 * @regs: MFI register set 2609 */ 2610 static u32 2611 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) 2612 { 2613 return readl(&(regs)->outbound_scratch_pad); 2614 } 2615 2616 /** 2617 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware 2618 * @instance: Controller's soft instance 2619 * return: Number of allocated host crash buffers 2620 */ 2621 static void 2622 megasas_alloc_host_crash_buffer(struct megasas_instance *instance) 2623 { 2624 unsigned int i; 2625 2626 instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE); 2627 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { 2628 instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL, 2629 instance->crash_buf_pages); 2630 if (!instance->crash_buf[i]) { 2631 dev_info(&instance->pdev->dev, "Firmware crash dump " 2632 "memory allocation failed at index %d\n", i); 2633 break; 2634 } 2635 memset(instance->crash_buf[i], 0, 2636 ((1 << PAGE_SHIFT) << instance->crash_buf_pages)); 2637 } 2638 instance->drv_buf_alloc = i; 2639 } 2640 2641 /** 2642 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware 2643 * @instance: Controller's soft instance 2644 */ 2645 void 2646 megasas_free_host_crash_buffer(struct megasas_instance *instance) 2647 { 2648 unsigned int i 2649 ; 2650 for (i = 0; i < instance->drv_buf_alloc; i++) { 2651 if (instance->crash_buf[i]) 2652 free_pages((ulong)instance->crash_buf[i], 2653 instance->crash_buf_pages); 2654 } 2655 instance->drv_buf_index = 0; 2656 instance->drv_buf_alloc = 0; 2657 instance->fw_crash_state = UNAVAILABLE; 2658 instance->fw_crash_buffer_size = 0; 2659 } 2660 2661 /** 2662 * megasas_adp_reset_fusion - For controller reset 2663 * @regs: MFI register set 2664 */ 2665 static int 2666 megasas_adp_reset_fusion(struct megasas_instance *instance, 2667 struct megasas_register_set __iomem *regs) 2668 { 2669 u32 host_diag, abs_state, retry; 2670 2671 /* Now try to reset the chip */ 2672 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2673 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2674 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2675 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2676 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2677 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2678 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 2679 2680 /* Check that the diag write enable (DRWE) bit is on */ 2681 host_diag = readl(&instance->reg_set->fusion_host_diag); 2682 retry = 0; 2683 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2684 msleep(100); 2685 host_diag = readl(&instance->reg_set->fusion_host_diag); 2686 if (retry++ == 100) { 2687 dev_warn(&instance->pdev->dev, 2688 "Host diag unlock failed from %s %d\n", 2689 __func__, __LINE__); 2690 break; 2691 } 2692 } 2693 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2694 return -1; 2695 2696 /* Send chip reset command */ 2697 writel(host_diag | HOST_DIAG_RESET_ADAPTER, 2698 &instance->reg_set->fusion_host_diag); 2699 msleep(3000); 2700 2701 /* Make sure reset adapter bit is cleared */ 2702 host_diag = readl(&instance->reg_set->fusion_host_diag); 2703 retry = 0; 2704 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2705 msleep(100); 2706 host_diag = readl(&instance->reg_set->fusion_host_diag); 2707 if (retry++ == 1000) { 2708 dev_warn(&instance->pdev->dev, 2709 "Diag reset adapter never cleared %s %d\n", 2710 __func__, __LINE__); 2711 break; 2712 } 2713 } 2714 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2715 return -1; 2716 2717 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set) 2718 & MFI_STATE_MASK; 2719 retry = 0; 2720 2721 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 2722 msleep(100); 2723 abs_state = instance->instancet-> 2724 read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2725 } 2726 if (abs_state <= MFI_STATE_FW_INIT) { 2727 dev_warn(&instance->pdev->dev, 2728 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n", 2729 abs_state, __func__, __LINE__); 2730 return -1; 2731 } 2732 2733 return 0; 2734 } 2735 2736 /** 2737 * megasas_check_reset_fusion - For controller reset check 2738 * @regs: MFI register set 2739 */ 2740 static int 2741 megasas_check_reset_fusion(struct megasas_instance *instance, 2742 struct megasas_register_set __iomem *regs) 2743 { 2744 return 0; 2745 } 2746 2747 /* This function waits for outstanding commands on fusion to complete */ 2748 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 2749 int reason, int *convert) 2750 { 2751 int i, outstanding, retval = 0, hb_seconds_missed = 0; 2752 u32 fw_state; 2753 2754 for (i = 0; i < resetwaittime; i++) { 2755 /* Check if firmware is in fault state */ 2756 fw_state = instance->instancet->read_fw_status_reg( 2757 instance->reg_set) & MFI_STATE_MASK; 2758 if (fw_state == MFI_STATE_FAULT) { 2759 dev_warn(&instance->pdev->dev, "Found FW in FAULT state," 2760 " will reset adapter scsi%d.\n", 2761 instance->host->host_no); 2762 retval = 1; 2763 goto out; 2764 } 2765 2766 if (reason == MFI_IO_TIMEOUT_OCR) { 2767 dev_info(&instance->pdev->dev, 2768 "MFI IO is timed out, initiating OCR\n"); 2769 retval = 1; 2770 goto out; 2771 } 2772 2773 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 2774 if (instance->requestorId && !reason) { 2775 retval = 1; 2776 goto out; 2777 } 2778 2779 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 2780 if (instance->requestorId && reason) { 2781 if (instance->hb_host_mem->HB.fwCounter != 2782 instance->hb_host_mem->HB.driverCounter) { 2783 instance->hb_host_mem->HB.driverCounter = 2784 instance->hb_host_mem->HB.fwCounter; 2785 hb_seconds_missed = 0; 2786 } else { 2787 hb_seconds_missed++; 2788 if (hb_seconds_missed == 2789 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 2790 dev_warn(&instance->pdev->dev, "SR-IOV:" 2791 " Heartbeat never completed " 2792 " while polling during I/O " 2793 " timeout handling for " 2794 "scsi%d.\n", 2795 instance->host->host_no); 2796 *convert = 1; 2797 retval = 1; 2798 goto out; 2799 } 2800 } 2801 } 2802 2803 outstanding = atomic_read(&instance->fw_outstanding); 2804 if (!outstanding) 2805 goto out; 2806 2807 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2808 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2809 "commands to complete for scsi%d\n", i, 2810 outstanding, instance->host->host_no); 2811 megasas_complete_cmd_dpc_fusion( 2812 (unsigned long)instance); 2813 } 2814 msleep(1000); 2815 } 2816 2817 if (atomic_read(&instance->fw_outstanding)) { 2818 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 2819 "will reset adapter scsi%d.\n", 2820 instance->host->host_no); 2821 retval = 1; 2822 } 2823 out: 2824 return retval; 2825 } 2826 2827 void megasas_reset_reply_desc(struct megasas_instance *instance) 2828 { 2829 int i, j, count; 2830 struct fusion_context *fusion; 2831 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 2832 2833 fusion = instance->ctrl_context; 2834 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 2835 for (i = 0 ; i < count ; i++) { 2836 fusion->last_reply_idx[i] = 0; 2837 reply_desc = fusion->reply_frames_desc[i]; 2838 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++) 2839 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 2840 } 2841 } 2842 2843 /* 2844 * megasas_refire_mgmt_cmd : Re-fire management commands 2845 * @instance: Controller's soft instance 2846 */ 2847 void megasas_refire_mgmt_cmd(struct megasas_instance *instance) 2848 { 2849 int j; 2850 struct megasas_cmd_fusion *cmd_fusion; 2851 struct fusion_context *fusion; 2852 struct megasas_cmd *cmd_mfi; 2853 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2854 u16 smid; 2855 bool refire_cmd = 0; 2856 2857 fusion = instance->ctrl_context; 2858 2859 /* Re-fire management commands. 2860 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. 2861 */ 2862 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { 2863 cmd_fusion = fusion->cmd_list[j]; 2864 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2865 smid = le16_to_cpu(cmd_mfi->context.smid); 2866 2867 if (!smid) 2868 continue; 2869 req_desc = megasas_get_request_descriptor 2870 (instance, smid - 1); 2871 refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode != 2872 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) && 2873 (cmd_mfi->frame->dcmd.opcode != 2874 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))) 2875 && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE); 2876 if (refire_cmd) 2877 megasas_fire_cmd_fusion(instance, req_desc); 2878 else 2879 megasas_return_cmd(instance, cmd_mfi); 2880 } 2881 } 2882 2883 /* 2884 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device 2885 * @instance: per adapter struct 2886 * @channel: the channel assigned by the OS 2887 * @id: the id assigned by the OS 2888 * 2889 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED 2890 */ 2891 2892 static int megasas_track_scsiio(struct megasas_instance *instance, 2893 int id, int channel) 2894 { 2895 int i, found = 0; 2896 struct megasas_cmd_fusion *cmd_fusion; 2897 struct fusion_context *fusion; 2898 fusion = instance->ctrl_context; 2899 2900 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 2901 cmd_fusion = fusion->cmd_list[i]; 2902 if (cmd_fusion->scmd && 2903 (cmd_fusion->scmd->device->id == id && 2904 cmd_fusion->scmd->device->channel == channel)) { 2905 dev_info(&instance->pdev->dev, 2906 "SCSI commands pending to target" 2907 "channel %d id %d \tSMID: 0x%x\n", 2908 channel, id, cmd_fusion->index); 2909 scsi_print_command(cmd_fusion->scmd); 2910 found = 1; 2911 break; 2912 } 2913 } 2914 2915 return found ? FAILED : SUCCESS; 2916 } 2917 2918 /** 2919 * megasas_tm_response_code - translation of device response code 2920 * @ioc: per adapter object 2921 * @mpi_reply: MPI reply returned by firmware 2922 * 2923 * Return nothing. 2924 */ 2925 static void 2926 megasas_tm_response_code(struct megasas_instance *instance, 2927 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 2928 { 2929 char *desc; 2930 2931 switch (mpi_reply->ResponseCode) { 2932 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2933 desc = "task management request completed"; 2934 break; 2935 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2936 desc = "invalid frame"; 2937 break; 2938 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2939 desc = "task management request not supported"; 2940 break; 2941 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2942 desc = "task management request failed"; 2943 break; 2944 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2945 desc = "task management request succeeded"; 2946 break; 2947 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2948 desc = "invalid lun"; 2949 break; 2950 case 0xA: 2951 desc = "overlapped tag attempted"; 2952 break; 2953 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2954 desc = "task queued, however not sent to target"; 2955 break; 2956 default: 2957 desc = "unknown"; 2958 break; 2959 } 2960 dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n", 2961 mpi_reply->ResponseCode, desc); 2962 dev_dbg(&instance->pdev->dev, 2963 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo" 2964 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 2965 mpi_reply->TerminationCount, mpi_reply->DevHandle, 2966 mpi_reply->Function, mpi_reply->TaskType, 2967 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 2968 } 2969 2970 /** 2971 * megasas_issue_tm - main routine for sending tm requests 2972 * @instance: per adapter struct 2973 * @device_handle: device handle 2974 * @channel: the channel assigned by the OS 2975 * @id: the id assigned by the OS 2976 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c) 2977 * @smid_task: smid assigned to the task 2978 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF 2979 * Context: user 2980 * 2981 * MegaRaid use MPT interface for Task Magement request. 2982 * A generic API for sending task management requests to firmware. 2983 * 2984 * Return SUCCESS or FAILED. 2985 */ 2986 static int 2987 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, 2988 uint channel, uint id, u16 smid_task, u8 type) 2989 { 2990 struct MR_TASK_MANAGE_REQUEST *mr_request; 2991 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; 2992 unsigned long timeleft; 2993 struct megasas_cmd_fusion *cmd_fusion; 2994 struct megasas_cmd *cmd_mfi; 2995 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2996 struct fusion_context *fusion; 2997 struct megasas_cmd_fusion *scsi_lookup; 2998 int rc; 2999 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 3000 3001 fusion = instance->ctrl_context; 3002 3003 cmd_mfi = megasas_get_cmd(instance); 3004 3005 if (!cmd_mfi) { 3006 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 3007 __func__, __LINE__); 3008 return -ENOMEM; 3009 } 3010 3011 cmd_fusion = megasas_get_cmd_fusion(instance, 3012 instance->max_scsi_cmds + cmd_mfi->index); 3013 3014 /* Save the smid. To be used for returning the cmd */ 3015 cmd_mfi->context.smid = cmd_fusion->index; 3016 3017 req_desc = megasas_get_request_descriptor(instance, 3018 (cmd_fusion->index - 1)); 3019 if (!req_desc) { 3020 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 3021 __func__, __LINE__); 3022 megasas_return_cmd(instance, cmd_mfi); 3023 return -ENOMEM; 3024 } 3025 3026 cmd_fusion->request_desc = req_desc; 3027 req_desc->Words = 0; 3028 3029 scsi_lookup = fusion->cmd_list[smid_task - 1]; 3030 3031 mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request; 3032 memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST)); 3033 mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 3034 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3035 mpi_request->DevHandle = cpu_to_le16(device_handle); 3036 mpi_request->TaskType = type; 3037 mpi_request->TaskMID = cpu_to_le16(smid_task); 3038 mpi_request->LUN[1] = 0; 3039 3040 3041 req_desc = cmd_fusion->request_desc; 3042 req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index); 3043 req_desc->HighPriority.RequestFlags = 3044 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 3045 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3046 req_desc->HighPriority.MSIxIndex = 0; 3047 req_desc->HighPriority.LMID = 0; 3048 req_desc->HighPriority.Reserved1 = 0; 3049 3050 if (channel < MEGASAS_MAX_PD_CHANNELS) 3051 mr_request->tmReqFlags.isTMForPD = 1; 3052 else 3053 mr_request->tmReqFlags.isTMForLD = 1; 3054 3055 init_completion(&cmd_fusion->done); 3056 megasas_fire_cmd_fusion(instance, req_desc); 3057 3058 timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ); 3059 3060 if (!timeleft) { 3061 dev_err(&instance->pdev->dev, 3062 "task mgmt type 0x%x timed out\n", type); 3063 cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; 3064 mutex_unlock(&instance->reset_mutex); 3065 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); 3066 mutex_lock(&instance->reset_mutex); 3067 return rc; 3068 } 3069 3070 mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply; 3071 megasas_tm_response_code(instance, mpi_reply); 3072 3073 megasas_return_cmd(instance, cmd_mfi); 3074 rc = SUCCESS; 3075 switch (type) { 3076 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 3077 if (scsi_lookup->scmd == NULL) 3078 break; 3079 else { 3080 instance->instancet->disable_intr(instance); 3081 msleep(1000); 3082 megasas_complete_cmd_dpc_fusion 3083 ((unsigned long)instance); 3084 instance->instancet->enable_intr(instance); 3085 if (scsi_lookup->scmd == NULL) 3086 break; 3087 } 3088 rc = FAILED; 3089 break; 3090 3091 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3092 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF)) 3093 break; 3094 instance->instancet->disable_intr(instance); 3095 msleep(1000); 3096 megasas_complete_cmd_dpc_fusion 3097 ((unsigned long)instance); 3098 rc = megasas_track_scsiio(instance, id, channel); 3099 instance->instancet->enable_intr(instance); 3100 3101 break; 3102 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 3103 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 3104 break; 3105 default: 3106 rc = FAILED; 3107 break; 3108 } 3109 3110 return rc; 3111 3112 } 3113 3114 /* 3115 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI 3116 * @instance: per adapter struct 3117 * 3118 * Return Non Zero index, if SMID found in outstanding commands 3119 */ 3120 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd) 3121 { 3122 int i, ret = 0; 3123 struct megasas_instance *instance; 3124 struct megasas_cmd_fusion *cmd_fusion; 3125 struct fusion_context *fusion; 3126 3127 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3128 3129 fusion = instance->ctrl_context; 3130 3131 for (i = 0; i < instance->max_scsi_cmds; i++) { 3132 cmd_fusion = fusion->cmd_list[i]; 3133 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) { 3134 scmd_printk(KERN_NOTICE, scmd, "Abort request is for" 3135 " SMID: %d\n", cmd_fusion->index); 3136 ret = cmd_fusion->index; 3137 break; 3138 } 3139 } 3140 3141 return ret; 3142 } 3143 3144 /* 3145 * megasas_get_tm_devhandle - Get devhandle for TM request 3146 * @sdev- OS provided scsi device 3147 * 3148 * Returns- devhandle/targetID of SCSI device 3149 */ 3150 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) 3151 { 3152 u16 pd_index = 0; 3153 u32 device_id; 3154 struct megasas_instance *instance; 3155 struct fusion_context *fusion; 3156 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 3157 u16 devhandle = (u16)ULONG_MAX; 3158 3159 instance = (struct megasas_instance *)sdev->host->hostdata; 3160 fusion = instance->ctrl_context; 3161 3162 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 3163 if (instance->use_seqnum_jbod_fp) { 3164 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 3165 sdev->id; 3166 pd_sync = (void *)fusion->pd_seq_sync 3167 [(instance->pd_seq_map_id - 1) & 1]; 3168 devhandle = pd_sync->seq[pd_index].devHandle; 3169 } else 3170 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" 3171 " without JBOD MAP support from %s %d\n", __func__, __LINE__); 3172 } else { 3173 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 3174 + sdev->id; 3175 devhandle = device_id; 3176 } 3177 3178 return devhandle; 3179 } 3180 3181 /* 3182 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters 3183 * @scmd : pointer to scsi command object 3184 * 3185 * Return SUCCESS, if command aborted else FAILED 3186 */ 3187 3188 int megasas_task_abort_fusion(struct scsi_cmnd *scmd) 3189 { 3190 struct megasas_instance *instance; 3191 u16 smid, devhandle; 3192 struct fusion_context *fusion; 3193 int ret; 3194 struct MR_PRIV_DEVICE *mr_device_priv_data; 3195 mr_device_priv_data = scmd->device->hostdata; 3196 3197 3198 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3199 fusion = instance->ctrl_context; 3200 3201 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 3202 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 3203 "SCSI host:%d\n", instance->host->host_no); 3204 ret = FAILED; 3205 return ret; 3206 } 3207 3208 if (!mr_device_priv_data) { 3209 sdev_printk(KERN_INFO, scmd->device, "device been deleted! " 3210 "scmd(%p)\n", scmd); 3211 scmd->result = DID_NO_CONNECT << 16; 3212 ret = SUCCESS; 3213 goto out; 3214 } 3215 3216 3217 if (!mr_device_priv_data->is_tm_capable) { 3218 ret = FAILED; 3219 goto out; 3220 } 3221 3222 mutex_lock(&instance->reset_mutex); 3223 3224 smid = megasas_fusion_smid_lookup(scmd); 3225 3226 if (!smid) { 3227 ret = SUCCESS; 3228 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is" 3229 " issued is not found in oustanding commands\n"); 3230 mutex_unlock(&instance->reset_mutex); 3231 goto out; 3232 } 3233 3234 devhandle = megasas_get_tm_devhandle(scmd->device); 3235 3236 if (devhandle == (u16)ULONG_MAX) { 3237 ret = SUCCESS; 3238 sdev_printk(KERN_INFO, scmd->device, 3239 "task abort issued for invalid devhandle\n"); 3240 mutex_unlock(&instance->reset_mutex); 3241 goto out; 3242 } 3243 sdev_printk(KERN_INFO, scmd->device, 3244 "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n", 3245 scmd, devhandle); 3246 3247 mr_device_priv_data->tm_busy = 1; 3248 ret = megasas_issue_tm(instance, devhandle, 3249 scmd->device->channel, scmd->device->id, smid, 3250 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK); 3251 mr_device_priv_data->tm_busy = 0; 3252 3253 mutex_unlock(&instance->reset_mutex); 3254 out: 3255 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 3256 ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3257 3258 return ret; 3259 } 3260 3261 /* 3262 * megasas_reset_target_fusion : target reset function for fusion adapters 3263 * scmd: SCSI command pointer 3264 * 3265 * Returns SUCCESS if all commands associated with target aborted else FAILED 3266 */ 3267 3268 int megasas_reset_target_fusion(struct scsi_cmnd *scmd) 3269 { 3270 3271 struct megasas_instance *instance; 3272 int ret = FAILED; 3273 u16 devhandle; 3274 struct fusion_context *fusion; 3275 struct MR_PRIV_DEVICE *mr_device_priv_data; 3276 mr_device_priv_data = scmd->device->hostdata; 3277 3278 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3279 fusion = instance->ctrl_context; 3280 3281 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 3282 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 3283 "SCSI host:%d\n", instance->host->host_no); 3284 ret = FAILED; 3285 return ret; 3286 } 3287 3288 if (!mr_device_priv_data) { 3289 sdev_printk(KERN_INFO, scmd->device, "device been deleted! " 3290 "scmd(%p)\n", scmd); 3291 scmd->result = DID_NO_CONNECT << 16; 3292 ret = SUCCESS; 3293 goto out; 3294 } 3295 3296 3297 if (!mr_device_priv_data->is_tm_capable) { 3298 ret = FAILED; 3299 goto out; 3300 } 3301 3302 mutex_lock(&instance->reset_mutex); 3303 devhandle = megasas_get_tm_devhandle(scmd->device); 3304 3305 if (devhandle == (u16)ULONG_MAX) { 3306 ret = SUCCESS; 3307 sdev_printk(KERN_INFO, scmd->device, 3308 "target reset issued for invalid devhandle\n"); 3309 mutex_unlock(&instance->reset_mutex); 3310 goto out; 3311 } 3312 3313 sdev_printk(KERN_INFO, scmd->device, 3314 "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n", 3315 scmd, devhandle); 3316 mr_device_priv_data->tm_busy = 1; 3317 ret = megasas_issue_tm(instance, devhandle, 3318 scmd->device->channel, scmd->device->id, 0, 3319 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 3320 mr_device_priv_data->tm_busy = 0; 3321 mutex_unlock(&instance->reset_mutex); 3322 out: 3323 scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n", 3324 (ret == SUCCESS) ? "SUCCESS" : "FAILED"); 3325 3326 return ret; 3327 } 3328 3329 /*SRIOV get other instance in cluster if any*/ 3330 struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) 3331 { 3332 int i; 3333 3334 for (i = 0; i < MAX_MGMT_ADAPTERS; i++) { 3335 if (megasas_mgmt_info.instance[i] && 3336 (megasas_mgmt_info.instance[i] != instance) && 3337 megasas_mgmt_info.instance[i]->requestorId && 3338 megasas_mgmt_info.instance[i]->peerIsPresent && 3339 (memcmp((megasas_mgmt_info.instance[i]->clusterId), 3340 instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0)) 3341 return megasas_mgmt_info.instance[i]; 3342 } 3343 return NULL; 3344 } 3345 3346 /* Check for a second path that is currently UP */ 3347 int megasas_check_mpio_paths(struct megasas_instance *instance, 3348 struct scsi_cmnd *scmd) 3349 { 3350 struct megasas_instance *peer_instance = NULL; 3351 int retval = (DID_RESET << 16); 3352 3353 if (instance->peerIsPresent) { 3354 peer_instance = megasas_get_peer_instance(instance); 3355 if ((peer_instance) && 3356 (atomic_read(&peer_instance->adprecovery) == 3357 MEGASAS_HBA_OPERATIONAL)) 3358 retval = (DID_NO_CONNECT << 16); 3359 } 3360 return retval; 3361 } 3362 3363 /* Core fusion reset function */ 3364 int megasas_reset_fusion(struct Scsi_Host *shost, int reason) 3365 { 3366 int retval = SUCCESS, i, convert = 0; 3367 struct megasas_instance *instance; 3368 struct megasas_cmd_fusion *cmd_fusion; 3369 struct fusion_context *fusion; 3370 u32 abs_state, status_reg, reset_adapter; 3371 u32 io_timeout_in_crash_mode = 0; 3372 struct scsi_cmnd *scmd_local = NULL; 3373 struct scsi_device *sdev; 3374 3375 instance = (struct megasas_instance *)shost->hostdata; 3376 fusion = instance->ctrl_context; 3377 3378 mutex_lock(&instance->reset_mutex); 3379 3380 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 3381 dev_warn(&instance->pdev->dev, "Hardware critical error, " 3382 "returning FAILED for scsi%d.\n", 3383 instance->host->host_no); 3384 mutex_unlock(&instance->reset_mutex); 3385 return FAILED; 3386 } 3387 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 3388 abs_state = status_reg & MFI_STATE_MASK; 3389 3390 /* IO timeout detected, forcibly put FW in FAULT state */ 3391 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && 3392 instance->crash_dump_app_support && reason) { 3393 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " 3394 "forcibly FAULT Firmware\n"); 3395 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3396 status_reg = readl(&instance->reg_set->doorbell); 3397 writel(status_reg | MFI_STATE_FORCE_OCR, 3398 &instance->reg_set->doorbell); 3399 readl(&instance->reg_set->doorbell); 3400 mutex_unlock(&instance->reset_mutex); 3401 do { 3402 ssleep(3); 3403 io_timeout_in_crash_mode++; 3404 dev_dbg(&instance->pdev->dev, "waiting for [%d] " 3405 "seconds for crash dump collection and OCR " 3406 "to be done\n", (io_timeout_in_crash_mode * 3)); 3407 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 3408 (io_timeout_in_crash_mode < 80)); 3409 3410 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 3411 dev_info(&instance->pdev->dev, "OCR done for IO " 3412 "timeout case\n"); 3413 retval = SUCCESS; 3414 } else { 3415 dev_info(&instance->pdev->dev, "Controller is not " 3416 "operational after 240 seconds wait for IO " 3417 "timeout case in FW crash dump mode\n do " 3418 "OCR/kill adapter\n"); 3419 retval = megasas_reset_fusion(shost, 0); 3420 } 3421 return retval; 3422 } 3423 3424 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 3425 del_timer_sync(&instance->sriov_heartbeat_timer); 3426 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 3427 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); 3428 instance->instancet->disable_intr(instance); 3429 msleep(1000); 3430 3431 /* First try waiting for commands to complete */ 3432 if (megasas_wait_for_outstanding_fusion(instance, reason, 3433 &convert)) { 3434 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3435 dev_warn(&instance->pdev->dev, "resetting fusion " 3436 "adapter scsi%d.\n", instance->host->host_no); 3437 if (convert) 3438 reason = 0; 3439 3440 /* Now return commands back to the OS */ 3441 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 3442 cmd_fusion = fusion->cmd_list[i]; 3443 scmd_local = cmd_fusion->scmd; 3444 if (cmd_fusion->scmd) { 3445 scmd_local->result = 3446 megasas_check_mpio_paths(instance, 3447 scmd_local); 3448 if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 3449 atomic_dec(&instance->ldio_outstanding); 3450 megasas_return_cmd_fusion(instance, cmd_fusion); 3451 scsi_dma_unmap(scmd_local); 3452 scmd_local->scsi_done(scmd_local); 3453 atomic_dec(&instance->fw_outstanding); 3454 } 3455 } 3456 3457 status_reg = instance->instancet->read_fw_status_reg( 3458 instance->reg_set); 3459 abs_state = status_reg & MFI_STATE_MASK; 3460 reset_adapter = status_reg & MFI_RESET_ADAPTER; 3461 if (instance->disableOnlineCtrlReset || 3462 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 3463 /* Reset not supported, kill adapter */ 3464 dev_warn(&instance->pdev->dev, "Reset not supported" 3465 ", killing adapter scsi%d.\n", 3466 instance->host->host_no); 3467 megaraid_sas_kill_hba(instance); 3468 instance->skip_heartbeat_timer_del = 1; 3469 retval = FAILED; 3470 goto out; 3471 } 3472 3473 /* Let SR-IOV VF & PF sync up if there was a HB failure */ 3474 if (instance->requestorId && !reason) { 3475 msleep(MEGASAS_OCR_SETTLE_TIME_VF); 3476 goto transition_to_ready; 3477 } 3478 3479 /* Now try to reset the chip */ 3480 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { 3481 3482 if (instance->instancet->adp_reset 3483 (instance, instance->reg_set)) 3484 continue; 3485 transition_to_ready: 3486 /* Wait for FW to become ready */ 3487 if (megasas_transition_to_ready(instance, 1)) { 3488 dev_warn(&instance->pdev->dev, 3489 "Failed to transition controller to ready for " 3490 "scsi%d.\n", instance->host->host_no); 3491 if (instance->requestorId && !reason) 3492 goto fail_kill_adapter; 3493 else 3494 continue; 3495 } 3496 megasas_reset_reply_desc(instance); 3497 megasas_fusion_update_can_queue(instance, OCR_CONTEXT); 3498 3499 if (megasas_ioc_init_fusion(instance)) { 3500 dev_warn(&instance->pdev->dev, 3501 "megasas_ioc_init_fusion() failed! for " 3502 "scsi%d\n", instance->host->host_no); 3503 if (instance->requestorId && !reason) 3504 goto fail_kill_adapter; 3505 else 3506 continue; 3507 } 3508 3509 megasas_refire_mgmt_cmd(instance); 3510 3511 if (megasas_get_ctrl_info(instance)) { 3512 dev_info(&instance->pdev->dev, 3513 "Failed from %s %d\n", 3514 __func__, __LINE__); 3515 megaraid_sas_kill_hba(instance); 3516 retval = FAILED; 3517 } 3518 /* Reset load balance info */ 3519 memset(fusion->load_balance_info, 0, 3520 sizeof(struct LD_LOAD_BALANCE_INFO) 3521 *MAX_LOGICAL_DRIVES_EXT); 3522 3523 if (!megasas_get_map_info(instance)) 3524 megasas_sync_map_info(instance); 3525 3526 megasas_setup_jbod_map(instance); 3527 3528 shost_for_each_device(sdev, shost) 3529 megasas_update_sdev_properties(sdev); 3530 3531 clear_bit(MEGASAS_FUSION_IN_RESET, 3532 &instance->reset_flags); 3533 instance->instancet->enable_intr(instance); 3534 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3535 3536 /* Restart SR-IOV heartbeat */ 3537 if (instance->requestorId) { 3538 if (!megasas_sriov_start_heartbeat(instance, 0)) 3539 megasas_start_timer(instance, 3540 &instance->sriov_heartbeat_timer, 3541 megasas_sriov_heartbeat_handler, 3542 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 3543 else 3544 instance->skip_heartbeat_timer_del = 1; 3545 } 3546 3547 /* Adapter reset completed successfully */ 3548 dev_warn(&instance->pdev->dev, "Reset " 3549 "successful for scsi%d.\n", 3550 instance->host->host_no); 3551 3552 if (instance->crash_dump_drv_support && 3553 instance->crash_dump_app_support) 3554 megasas_set_crash_dump_params(instance, 3555 MR_CRASH_BUF_TURN_ON); 3556 else 3557 megasas_set_crash_dump_params(instance, 3558 MR_CRASH_BUF_TURN_OFF); 3559 3560 retval = SUCCESS; 3561 goto out; 3562 } 3563 fail_kill_adapter: 3564 /* Reset failed, kill the adapter */ 3565 dev_warn(&instance->pdev->dev, "Reset failed, killing " 3566 "adapter scsi%d.\n", instance->host->host_no); 3567 megaraid_sas_kill_hba(instance); 3568 instance->skip_heartbeat_timer_del = 1; 3569 retval = FAILED; 3570 } else { 3571 /* For VF: Restart HB timer if we didn't OCR */ 3572 if (instance->requestorId) { 3573 megasas_start_timer(instance, 3574 &instance->sriov_heartbeat_timer, 3575 megasas_sriov_heartbeat_handler, 3576 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 3577 } 3578 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 3579 instance->instancet->enable_intr(instance); 3580 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3581 } 3582 out: 3583 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 3584 mutex_unlock(&instance->reset_mutex); 3585 return retval; 3586 } 3587 3588 /* Fusion Crash dump collection work queue */ 3589 void megasas_fusion_crash_dump_wq(struct work_struct *work) 3590 { 3591 struct megasas_instance *instance = 3592 container_of(work, struct megasas_instance, crash_init); 3593 u32 status_reg; 3594 u8 partial_copy = 0; 3595 3596 3597 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 3598 3599 /* 3600 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 3601 * to host crash buffers 3602 */ 3603 if (instance->drv_buf_index == 0) { 3604 /* Buffer is already allocated for old Crash dump. 3605 * Do OCR and do not wait for crash dump collection 3606 */ 3607 if (instance->drv_buf_alloc) { 3608 dev_info(&instance->pdev->dev, "earlier crash dump is " 3609 "not yet copied by application, ignoring this " 3610 "crash dump and initiating OCR\n"); 3611 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 3612 writel(status_reg, 3613 &instance->reg_set->outbound_scratch_pad); 3614 readl(&instance->reg_set->outbound_scratch_pad); 3615 return; 3616 } 3617 megasas_alloc_host_crash_buffer(instance); 3618 dev_info(&instance->pdev->dev, "Number of host crash buffers " 3619 "allocated: %d\n", instance->drv_buf_alloc); 3620 } 3621 3622 /* 3623 * Driver has allocated max buffers, which can be allocated 3624 * and FW has more crash dump data, then driver will 3625 * ignore the data. 3626 */ 3627 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) { 3628 dev_info(&instance->pdev->dev, "Driver is done copying " 3629 "the buffer: %d\n", instance->drv_buf_alloc); 3630 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 3631 partial_copy = 1; 3632 } else { 3633 memcpy(instance->crash_buf[instance->drv_buf_index], 3634 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 3635 instance->drv_buf_index++; 3636 status_reg &= ~MFI_STATE_DMADONE; 3637 } 3638 3639 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 3640 dev_info(&instance->pdev->dev, "Crash Dump is available,number " 3641 "of copied buffers: %d\n", instance->drv_buf_index); 3642 instance->fw_crash_buffer_size = instance->drv_buf_index; 3643 instance->fw_crash_state = AVAILABLE; 3644 instance->drv_buf_index = 0; 3645 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 3646 readl(&instance->reg_set->outbound_scratch_pad); 3647 if (!partial_copy) 3648 megasas_reset_fusion(instance->host, 0); 3649 } else { 3650 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 3651 readl(&instance->reg_set->outbound_scratch_pad); 3652 } 3653 } 3654 3655 3656 /* Fusion OCR work queue */ 3657 void megasas_fusion_ocr_wq(struct work_struct *work) 3658 { 3659 struct megasas_instance *instance = 3660 container_of(work, struct megasas_instance, work_init); 3661 3662 megasas_reset_fusion(instance->host, 0); 3663 } 3664 3665 struct megasas_instance_template megasas_instance_template_fusion = { 3666 .enable_intr = megasas_enable_intr_fusion, 3667 .disable_intr = megasas_disable_intr_fusion, 3668 .clear_intr = megasas_clear_intr_fusion, 3669 .read_fw_status_reg = megasas_read_fw_status_reg_fusion, 3670 .adp_reset = megasas_adp_reset_fusion, 3671 .check_reset = megasas_check_reset_fusion, 3672 .service_isr = megasas_isr_fusion, 3673 .tasklet = megasas_complete_cmd_dpc_fusion, 3674 .init_adapter = megasas_init_adapter_fusion, 3675 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, 3676 .issue_dcmd = megasas_issue_dcmd_fusion, 3677 }; 3678