1 /* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2003-2013 LSI Corporation 5 * Copyright (c) 2013-2014 Avago Technologies 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * 20 * Authors: Avago Technologies 21 * Sreenivas Bagalkote 22 * Sumant Patro 23 * Bo Yang 24 * Adam Radford 25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * 28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * 30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, 31 * San Jose, California 95131 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/list.h> 38 #include <linux/moduleparam.h> 39 #include <linux/module.h> 40 #include <linux/spinlock.h> 41 #include <linux/interrupt.h> 42 #include <linux/delay.h> 43 #include <linux/uio.h> 44 #include <linux/slab.h> 45 #include <asm/uaccess.h> 46 #include <linux/fs.h> 47 #include <linux/compat.h> 48 #include <linux/blkdev.h> 49 #include <linux/mutex.h> 50 #include <linux/poll.h> 51 52 #include <scsi/scsi.h> 53 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_device.h> 55 #include <scsi/scsi_host.h> 56 #include <scsi/scsi_tcq.h> 57 #include "megaraid_sas_fusion.h" 58 #include "megaraid_sas.h" 59 60 /* 61 * Number of sectors per IO command 62 * Will be set in megasas_init_mfi if user does not provide 63 */ 64 static unsigned int max_sectors; 65 module_param_named(max_sectors, max_sectors, int, 0); 66 MODULE_PARM_DESC(max_sectors, 67 "Maximum number of sectors per IO command"); 68 69 static int msix_disable; 70 module_param(msix_disable, int, S_IRUGO); 71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72 73 static unsigned int msix_vectors; 74 module_param(msix_vectors, int, S_IRUGO); 75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 76 77 static int allow_vf_ioctls; 78 module_param(allow_vf_ioctls, int, S_IRUGO); 79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 80 81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 82 module_param(throttlequeuedepth, int, S_IRUGO); 83 MODULE_PARM_DESC(throttlequeuedepth, 84 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 85 86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 87 module_param(resetwaittime, int, S_IRUGO); 88 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " 89 "before resetting adapter. Default: 180"); 90 91 int smp_affinity_enable = 1; 92 module_param(smp_affinity_enable, int, S_IRUGO); 93 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 94 95 int rdpq_enable = 1; 96 module_param(rdpq_enable, int, S_IRUGO); 97 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); 98 99 unsigned int dual_qdepth_disable; 100 module_param(dual_qdepth_disable, int, S_IRUGO); 101 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 102 103 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 104 module_param(scmd_timeout, int, S_IRUGO); 105 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 106 107 MODULE_LICENSE("GPL"); 108 MODULE_VERSION(MEGASAS_VERSION); 109 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); 110 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); 111 112 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 113 static int megasas_get_pd_list(struct megasas_instance *instance); 114 static int megasas_ld_list_query(struct megasas_instance *instance, 115 u8 query_type); 116 static int megasas_issue_init_mfi(struct megasas_instance *instance); 117 static int megasas_register_aen(struct megasas_instance *instance, 118 u32 seq_num, u32 class_locale_word); 119 static int 120 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id); 121 /* 122 * PCI ID table for all supported controllers 123 */ 124 static struct pci_device_id megasas_pci_table[] = { 125 126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 127 /* xscale IOP */ 128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 129 /* ppc IOP */ 130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 131 /* ppc IOP */ 132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 133 /* gen2*/ 134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 135 /* gen2*/ 136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 137 /* skinny*/ 138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 139 /* skinny*/ 140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 141 /* xscale IOP, vega */ 142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 143 /* xscale IOP */ 144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 145 /* Fusion */ 146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 147 /* Plasma */ 148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 149 /* Invader */ 150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 151 /* Fury */ 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 153 /* Intruder */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 155 /* Intruder 24 port*/ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 158 {} 159 }; 160 161 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 162 163 static int megasas_mgmt_majorno; 164 struct megasas_mgmt_info megasas_mgmt_info; 165 static struct fasync_struct *megasas_async_queue; 166 static DEFINE_MUTEX(megasas_async_queue_mutex); 167 168 static int megasas_poll_wait_aen; 169 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 170 static u32 support_poll_for_event; 171 u32 megasas_dbg_lvl; 172 static u32 support_device_change; 173 174 /* define lock for aen poll */ 175 spinlock_t poll_aen_lock; 176 177 void 178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 179 u8 alt_status); 180 static u32 181 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); 182 static int 183 megasas_adp_reset_gen2(struct megasas_instance *instance, 184 struct megasas_register_set __iomem *reg_set); 185 static irqreturn_t megasas_isr(int irq, void *devp); 186 static u32 187 megasas_init_adapter_mfi(struct megasas_instance *instance); 188 u32 189 megasas_build_and_issue_cmd(struct megasas_instance *instance, 190 struct scsi_cmnd *scmd); 191 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 192 void 193 megasas_release_fusion(struct megasas_instance *instance); 194 int 195 megasas_ioc_init_fusion(struct megasas_instance *instance); 196 void 197 megasas_free_cmds_fusion(struct megasas_instance *instance); 198 u8 199 megasas_get_map_info(struct megasas_instance *instance); 200 int 201 megasas_sync_map_info(struct megasas_instance *instance); 202 int 203 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 204 int seconds); 205 void megasas_reset_reply_desc(struct megasas_instance *instance); 206 void megasas_fusion_ocr_wq(struct work_struct *work); 207 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 208 int initial); 209 int megasas_check_mpio_paths(struct megasas_instance *instance, 210 struct scsi_cmnd *scmd); 211 212 int 213 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 214 { 215 instance->instancet->fire_cmd(instance, 216 cmd->frame_phys_addr, 0, instance->reg_set); 217 return 0; 218 } 219 220 /** 221 * megasas_get_cmd - Get a command from the free pool 222 * @instance: Adapter soft state 223 * 224 * Returns a free command from the pool 225 */ 226 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 227 *instance) 228 { 229 unsigned long flags; 230 struct megasas_cmd *cmd = NULL; 231 232 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 233 234 if (!list_empty(&instance->cmd_pool)) { 235 cmd = list_entry((&instance->cmd_pool)->next, 236 struct megasas_cmd, list); 237 list_del_init(&cmd->list); 238 } else { 239 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 240 } 241 242 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 243 return cmd; 244 } 245 246 /** 247 * megasas_return_cmd - Return a cmd to free command pool 248 * @instance: Adapter soft state 249 * @cmd: Command packet to be returned to free command pool 250 */ 251 inline void 252 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 253 { 254 unsigned long flags; 255 u32 blk_tags; 256 struct megasas_cmd_fusion *cmd_fusion; 257 struct fusion_context *fusion = instance->ctrl_context; 258 259 /* This flag is used only for fusion adapter. 260 * Wait for Interrupt for Polled mode DCMD 261 */ 262 if (cmd->flags & DRV_DCMD_POLLED_MODE) 263 return; 264 265 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 266 267 if (fusion) { 268 blk_tags = instance->max_scsi_cmds + cmd->index; 269 cmd_fusion = fusion->cmd_list[blk_tags]; 270 megasas_return_cmd_fusion(instance, cmd_fusion); 271 } 272 cmd->scmd = NULL; 273 cmd->frame_count = 0; 274 cmd->flags = 0; 275 if (!fusion && reset_devices) 276 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 277 list_add(&cmd->list, (&instance->cmd_pool)->next); 278 279 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 280 281 } 282 283 static const char * 284 format_timestamp(uint32_t timestamp) 285 { 286 static char buffer[32]; 287 288 if ((timestamp & 0xff000000) == 0xff000000) 289 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 290 0x00ffffff); 291 else 292 snprintf(buffer, sizeof(buffer), "%us", timestamp); 293 return buffer; 294 } 295 296 static const char * 297 format_class(int8_t class) 298 { 299 static char buffer[6]; 300 301 switch (class) { 302 case MFI_EVT_CLASS_DEBUG: 303 return "debug"; 304 case MFI_EVT_CLASS_PROGRESS: 305 return "progress"; 306 case MFI_EVT_CLASS_INFO: 307 return "info"; 308 case MFI_EVT_CLASS_WARNING: 309 return "WARN"; 310 case MFI_EVT_CLASS_CRITICAL: 311 return "CRIT"; 312 case MFI_EVT_CLASS_FATAL: 313 return "FATAL"; 314 case MFI_EVT_CLASS_DEAD: 315 return "DEAD"; 316 default: 317 snprintf(buffer, sizeof(buffer), "%d", class); 318 return buffer; 319 } 320 } 321 322 /** 323 * megasas_decode_evt: Decode FW AEN event and print critical event 324 * for information. 325 * @instance: Adapter soft state 326 */ 327 static void 328 megasas_decode_evt(struct megasas_instance *instance) 329 { 330 struct megasas_evt_detail *evt_detail = instance->evt_detail; 331 union megasas_evt_class_locale class_locale; 332 class_locale.word = le32_to_cpu(evt_detail->cl.word); 333 334 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL) 335 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 336 le32_to_cpu(evt_detail->seq_num), 337 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 338 (class_locale.members.locale), 339 format_class(class_locale.members.class), 340 evt_detail->description); 341 } 342 343 /** 344 * The following functions are defined for xscale 345 * (deviceid : 1064R, PERC5) controllers 346 */ 347 348 /** 349 * megasas_enable_intr_xscale - Enables interrupts 350 * @regs: MFI register set 351 */ 352 static inline void 353 megasas_enable_intr_xscale(struct megasas_instance *instance) 354 { 355 struct megasas_register_set __iomem *regs; 356 357 regs = instance->reg_set; 358 writel(0, &(regs)->outbound_intr_mask); 359 360 /* Dummy readl to force pci flush */ 361 readl(®s->outbound_intr_mask); 362 } 363 364 /** 365 * megasas_disable_intr_xscale -Disables interrupt 366 * @regs: MFI register set 367 */ 368 static inline void 369 megasas_disable_intr_xscale(struct megasas_instance *instance) 370 { 371 struct megasas_register_set __iomem *regs; 372 u32 mask = 0x1f; 373 374 regs = instance->reg_set; 375 writel(mask, ®s->outbound_intr_mask); 376 /* Dummy readl to force pci flush */ 377 readl(®s->outbound_intr_mask); 378 } 379 380 /** 381 * megasas_read_fw_status_reg_xscale - returns the current FW status value 382 * @regs: MFI register set 383 */ 384 static u32 385 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) 386 { 387 return readl(&(regs)->outbound_msg_0); 388 } 389 /** 390 * megasas_clear_interrupt_xscale - Check & clear interrupt 391 * @regs: MFI register set 392 */ 393 static int 394 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 395 { 396 u32 status; 397 u32 mfiStatus = 0; 398 399 /* 400 * Check if it is our interrupt 401 */ 402 status = readl(®s->outbound_intr_status); 403 404 if (status & MFI_OB_INTR_STATUS_MASK) 405 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 406 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 407 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 408 409 /* 410 * Clear the interrupt by writing back the same value 411 */ 412 if (mfiStatus) 413 writel(status, ®s->outbound_intr_status); 414 415 /* Dummy readl to force pci flush */ 416 readl(®s->outbound_intr_status); 417 418 return mfiStatus; 419 } 420 421 /** 422 * megasas_fire_cmd_xscale - Sends command to the FW 423 * @frame_phys_addr : Physical address of cmd 424 * @frame_count : Number of frames for the command 425 * @regs : MFI register set 426 */ 427 static inline void 428 megasas_fire_cmd_xscale(struct megasas_instance *instance, 429 dma_addr_t frame_phys_addr, 430 u32 frame_count, 431 struct megasas_register_set __iomem *regs) 432 { 433 unsigned long flags; 434 435 spin_lock_irqsave(&instance->hba_lock, flags); 436 writel((frame_phys_addr >> 3)|(frame_count), 437 &(regs)->inbound_queue_port); 438 spin_unlock_irqrestore(&instance->hba_lock, flags); 439 } 440 441 /** 442 * megasas_adp_reset_xscale - For controller reset 443 * @regs: MFI register set 444 */ 445 static int 446 megasas_adp_reset_xscale(struct megasas_instance *instance, 447 struct megasas_register_set __iomem *regs) 448 { 449 u32 i; 450 u32 pcidata; 451 452 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 453 454 for (i = 0; i < 3; i++) 455 msleep(1000); /* sleep for 3 secs */ 456 pcidata = 0; 457 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 458 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 459 if (pcidata & 0x2) { 460 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 461 pcidata &= ~0x2; 462 pci_write_config_dword(instance->pdev, 463 MFI_1068_PCSR_OFFSET, pcidata); 464 465 for (i = 0; i < 2; i++) 466 msleep(1000); /* need to wait 2 secs again */ 467 468 pcidata = 0; 469 pci_read_config_dword(instance->pdev, 470 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 471 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 472 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 473 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 474 pcidata = 0; 475 pci_write_config_dword(instance->pdev, 476 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 477 } 478 } 479 return 0; 480 } 481 482 /** 483 * megasas_check_reset_xscale - For controller reset check 484 * @regs: MFI register set 485 */ 486 static int 487 megasas_check_reset_xscale(struct megasas_instance *instance, 488 struct megasas_register_set __iomem *regs) 489 { 490 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 491 (le32_to_cpu(*instance->consumer) == 492 MEGASAS_ADPRESET_INPROG_SIGN)) 493 return 1; 494 return 0; 495 } 496 497 static struct megasas_instance_template megasas_instance_template_xscale = { 498 499 .fire_cmd = megasas_fire_cmd_xscale, 500 .enable_intr = megasas_enable_intr_xscale, 501 .disable_intr = megasas_disable_intr_xscale, 502 .clear_intr = megasas_clear_intr_xscale, 503 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 504 .adp_reset = megasas_adp_reset_xscale, 505 .check_reset = megasas_check_reset_xscale, 506 .service_isr = megasas_isr, 507 .tasklet = megasas_complete_cmd_dpc, 508 .init_adapter = megasas_init_adapter_mfi, 509 .build_and_issue_cmd = megasas_build_and_issue_cmd, 510 .issue_dcmd = megasas_issue_dcmd, 511 }; 512 513 /** 514 * This is the end of set of functions & definitions specific 515 * to xscale (deviceid : 1064R, PERC5) controllers 516 */ 517 518 /** 519 * The following functions are defined for ppc (deviceid : 0x60) 520 * controllers 521 */ 522 523 /** 524 * megasas_enable_intr_ppc - Enables interrupts 525 * @regs: MFI register set 526 */ 527 static inline void 528 megasas_enable_intr_ppc(struct megasas_instance *instance) 529 { 530 struct megasas_register_set __iomem *regs; 531 532 regs = instance->reg_set; 533 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 534 535 writel(~0x80000000, &(regs)->outbound_intr_mask); 536 537 /* Dummy readl to force pci flush */ 538 readl(®s->outbound_intr_mask); 539 } 540 541 /** 542 * megasas_disable_intr_ppc - Disable interrupt 543 * @regs: MFI register set 544 */ 545 static inline void 546 megasas_disable_intr_ppc(struct megasas_instance *instance) 547 { 548 struct megasas_register_set __iomem *regs; 549 u32 mask = 0xFFFFFFFF; 550 551 regs = instance->reg_set; 552 writel(mask, ®s->outbound_intr_mask); 553 /* Dummy readl to force pci flush */ 554 readl(®s->outbound_intr_mask); 555 } 556 557 /** 558 * megasas_read_fw_status_reg_ppc - returns the current FW status value 559 * @regs: MFI register set 560 */ 561 static u32 562 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 563 { 564 return readl(&(regs)->outbound_scratch_pad); 565 } 566 567 /** 568 * megasas_clear_interrupt_ppc - Check & clear interrupt 569 * @regs: MFI register set 570 */ 571 static int 572 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 573 { 574 u32 status, mfiStatus = 0; 575 576 /* 577 * Check if it is our interrupt 578 */ 579 status = readl(®s->outbound_intr_status); 580 581 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 582 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 583 584 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 585 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 586 587 /* 588 * Clear the interrupt by writing back the same value 589 */ 590 writel(status, ®s->outbound_doorbell_clear); 591 592 /* Dummy readl to force pci flush */ 593 readl(®s->outbound_doorbell_clear); 594 595 return mfiStatus; 596 } 597 598 /** 599 * megasas_fire_cmd_ppc - Sends command to the FW 600 * @frame_phys_addr : Physical address of cmd 601 * @frame_count : Number of frames for the command 602 * @regs : MFI register set 603 */ 604 static inline void 605 megasas_fire_cmd_ppc(struct megasas_instance *instance, 606 dma_addr_t frame_phys_addr, 607 u32 frame_count, 608 struct megasas_register_set __iomem *regs) 609 { 610 unsigned long flags; 611 612 spin_lock_irqsave(&instance->hba_lock, flags); 613 writel((frame_phys_addr | (frame_count<<1))|1, 614 &(regs)->inbound_queue_port); 615 spin_unlock_irqrestore(&instance->hba_lock, flags); 616 } 617 618 /** 619 * megasas_check_reset_ppc - For controller reset check 620 * @regs: MFI register set 621 */ 622 static int 623 megasas_check_reset_ppc(struct megasas_instance *instance, 624 struct megasas_register_set __iomem *regs) 625 { 626 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 627 return 1; 628 629 return 0; 630 } 631 632 static struct megasas_instance_template megasas_instance_template_ppc = { 633 634 .fire_cmd = megasas_fire_cmd_ppc, 635 .enable_intr = megasas_enable_intr_ppc, 636 .disable_intr = megasas_disable_intr_ppc, 637 .clear_intr = megasas_clear_intr_ppc, 638 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 639 .adp_reset = megasas_adp_reset_xscale, 640 .check_reset = megasas_check_reset_ppc, 641 .service_isr = megasas_isr, 642 .tasklet = megasas_complete_cmd_dpc, 643 .init_adapter = megasas_init_adapter_mfi, 644 .build_and_issue_cmd = megasas_build_and_issue_cmd, 645 .issue_dcmd = megasas_issue_dcmd, 646 }; 647 648 /** 649 * megasas_enable_intr_skinny - Enables interrupts 650 * @regs: MFI register set 651 */ 652 static inline void 653 megasas_enable_intr_skinny(struct megasas_instance *instance) 654 { 655 struct megasas_register_set __iomem *regs; 656 657 regs = instance->reg_set; 658 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 659 660 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 661 662 /* Dummy readl to force pci flush */ 663 readl(®s->outbound_intr_mask); 664 } 665 666 /** 667 * megasas_disable_intr_skinny - Disables interrupt 668 * @regs: MFI register set 669 */ 670 static inline void 671 megasas_disable_intr_skinny(struct megasas_instance *instance) 672 { 673 struct megasas_register_set __iomem *regs; 674 u32 mask = 0xFFFFFFFF; 675 676 regs = instance->reg_set; 677 writel(mask, ®s->outbound_intr_mask); 678 /* Dummy readl to force pci flush */ 679 readl(®s->outbound_intr_mask); 680 } 681 682 /** 683 * megasas_read_fw_status_reg_skinny - returns the current FW status value 684 * @regs: MFI register set 685 */ 686 static u32 687 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) 688 { 689 return readl(&(regs)->outbound_scratch_pad); 690 } 691 692 /** 693 * megasas_clear_interrupt_skinny - Check & clear interrupt 694 * @regs: MFI register set 695 */ 696 static int 697 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) 698 { 699 u32 status; 700 u32 mfiStatus = 0; 701 702 /* 703 * Check if it is our interrupt 704 */ 705 status = readl(®s->outbound_intr_status); 706 707 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 708 return 0; 709 } 710 711 /* 712 * Check if it is our interrupt 713 */ 714 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == 715 MFI_STATE_FAULT) { 716 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 717 } else 718 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 719 720 /* 721 * Clear the interrupt by writing back the same value 722 */ 723 writel(status, ®s->outbound_intr_status); 724 725 /* 726 * dummy read to flush PCI 727 */ 728 readl(®s->outbound_intr_status); 729 730 return mfiStatus; 731 } 732 733 /** 734 * megasas_fire_cmd_skinny - Sends command to the FW 735 * @frame_phys_addr : Physical address of cmd 736 * @frame_count : Number of frames for the command 737 * @regs : MFI register set 738 */ 739 static inline void 740 megasas_fire_cmd_skinny(struct megasas_instance *instance, 741 dma_addr_t frame_phys_addr, 742 u32 frame_count, 743 struct megasas_register_set __iomem *regs) 744 { 745 unsigned long flags; 746 747 spin_lock_irqsave(&instance->hba_lock, flags); 748 writel(upper_32_bits(frame_phys_addr), 749 &(regs)->inbound_high_queue_port); 750 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 751 &(regs)->inbound_low_queue_port); 752 mmiowb(); 753 spin_unlock_irqrestore(&instance->hba_lock, flags); 754 } 755 756 /** 757 * megasas_check_reset_skinny - For controller reset check 758 * @regs: MFI register set 759 */ 760 static int 761 megasas_check_reset_skinny(struct megasas_instance *instance, 762 struct megasas_register_set __iomem *regs) 763 { 764 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 765 return 1; 766 767 return 0; 768 } 769 770 static struct megasas_instance_template megasas_instance_template_skinny = { 771 772 .fire_cmd = megasas_fire_cmd_skinny, 773 .enable_intr = megasas_enable_intr_skinny, 774 .disable_intr = megasas_disable_intr_skinny, 775 .clear_intr = megasas_clear_intr_skinny, 776 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 777 .adp_reset = megasas_adp_reset_gen2, 778 .check_reset = megasas_check_reset_skinny, 779 .service_isr = megasas_isr, 780 .tasklet = megasas_complete_cmd_dpc, 781 .init_adapter = megasas_init_adapter_mfi, 782 .build_and_issue_cmd = megasas_build_and_issue_cmd, 783 .issue_dcmd = megasas_issue_dcmd, 784 }; 785 786 787 /** 788 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 789 * controllers 790 */ 791 792 /** 793 * megasas_enable_intr_gen2 - Enables interrupts 794 * @regs: MFI register set 795 */ 796 static inline void 797 megasas_enable_intr_gen2(struct megasas_instance *instance) 798 { 799 struct megasas_register_set __iomem *regs; 800 801 regs = instance->reg_set; 802 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 803 804 /* write ~0x00000005 (4 & 1) to the intr mask*/ 805 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 806 807 /* Dummy readl to force pci flush */ 808 readl(®s->outbound_intr_mask); 809 } 810 811 /** 812 * megasas_disable_intr_gen2 - Disables interrupt 813 * @regs: MFI register set 814 */ 815 static inline void 816 megasas_disable_intr_gen2(struct megasas_instance *instance) 817 { 818 struct megasas_register_set __iomem *regs; 819 u32 mask = 0xFFFFFFFF; 820 821 regs = instance->reg_set; 822 writel(mask, ®s->outbound_intr_mask); 823 /* Dummy readl to force pci flush */ 824 readl(®s->outbound_intr_mask); 825 } 826 827 /** 828 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 829 * @regs: MFI register set 830 */ 831 static u32 832 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) 833 { 834 return readl(&(regs)->outbound_scratch_pad); 835 } 836 837 /** 838 * megasas_clear_interrupt_gen2 - Check & clear interrupt 839 * @regs: MFI register set 840 */ 841 static int 842 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 843 { 844 u32 status; 845 u32 mfiStatus = 0; 846 847 /* 848 * Check if it is our interrupt 849 */ 850 status = readl(®s->outbound_intr_status); 851 852 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 853 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 854 } 855 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 856 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 857 } 858 859 /* 860 * Clear the interrupt by writing back the same value 861 */ 862 if (mfiStatus) 863 writel(status, ®s->outbound_doorbell_clear); 864 865 /* Dummy readl to force pci flush */ 866 readl(®s->outbound_intr_status); 867 868 return mfiStatus; 869 } 870 /** 871 * megasas_fire_cmd_gen2 - Sends command to the FW 872 * @frame_phys_addr : Physical address of cmd 873 * @frame_count : Number of frames for the command 874 * @regs : MFI register set 875 */ 876 static inline void 877 megasas_fire_cmd_gen2(struct megasas_instance *instance, 878 dma_addr_t frame_phys_addr, 879 u32 frame_count, 880 struct megasas_register_set __iomem *regs) 881 { 882 unsigned long flags; 883 884 spin_lock_irqsave(&instance->hba_lock, flags); 885 writel((frame_phys_addr | (frame_count<<1))|1, 886 &(regs)->inbound_queue_port); 887 spin_unlock_irqrestore(&instance->hba_lock, flags); 888 } 889 890 /** 891 * megasas_adp_reset_gen2 - For controller reset 892 * @regs: MFI register set 893 */ 894 static int 895 megasas_adp_reset_gen2(struct megasas_instance *instance, 896 struct megasas_register_set __iomem *reg_set) 897 { 898 u32 retry = 0 ; 899 u32 HostDiag; 900 u32 __iomem *seq_offset = ®_set->seq_offset; 901 u32 __iomem *hostdiag_offset = ®_set->host_diag; 902 903 if (instance->instancet == &megasas_instance_template_skinny) { 904 seq_offset = ®_set->fusion_seq_offset; 905 hostdiag_offset = ®_set->fusion_host_diag; 906 } 907 908 writel(0, seq_offset); 909 writel(4, seq_offset); 910 writel(0xb, seq_offset); 911 writel(2, seq_offset); 912 writel(7, seq_offset); 913 writel(0xd, seq_offset); 914 915 msleep(1000); 916 917 HostDiag = (u32)readl(hostdiag_offset); 918 919 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 920 msleep(100); 921 HostDiag = (u32)readl(hostdiag_offset); 922 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 923 retry, HostDiag); 924 925 if (retry++ >= 100) 926 return 1; 927 928 } 929 930 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 931 932 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 933 934 ssleep(10); 935 936 HostDiag = (u32)readl(hostdiag_offset); 937 while (HostDiag & DIAG_RESET_ADAPTER) { 938 msleep(100); 939 HostDiag = (u32)readl(hostdiag_offset); 940 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 941 retry, HostDiag); 942 943 if (retry++ >= 1000) 944 return 1; 945 946 } 947 return 0; 948 } 949 950 /** 951 * megasas_check_reset_gen2 - For controller reset check 952 * @regs: MFI register set 953 */ 954 static int 955 megasas_check_reset_gen2(struct megasas_instance *instance, 956 struct megasas_register_set __iomem *regs) 957 { 958 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 959 return 1; 960 961 return 0; 962 } 963 964 static struct megasas_instance_template megasas_instance_template_gen2 = { 965 966 .fire_cmd = megasas_fire_cmd_gen2, 967 .enable_intr = megasas_enable_intr_gen2, 968 .disable_intr = megasas_disable_intr_gen2, 969 .clear_intr = megasas_clear_intr_gen2, 970 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 971 .adp_reset = megasas_adp_reset_gen2, 972 .check_reset = megasas_check_reset_gen2, 973 .service_isr = megasas_isr, 974 .tasklet = megasas_complete_cmd_dpc, 975 .init_adapter = megasas_init_adapter_mfi, 976 .build_and_issue_cmd = megasas_build_and_issue_cmd, 977 .issue_dcmd = megasas_issue_dcmd, 978 }; 979 980 /** 981 * This is the end of set of functions & definitions 982 * specific to gen2 (deviceid : 0x78, 0x79) controllers 983 */ 984 985 /* 986 * Template added for TB (Fusion) 987 */ 988 extern struct megasas_instance_template megasas_instance_template_fusion; 989 990 /** 991 * megasas_issue_polled - Issues a polling command 992 * @instance: Adapter soft state 993 * @cmd: Command packet to be issued 994 * 995 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 996 */ 997 int 998 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 999 { 1000 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1001 1002 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1003 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1004 1005 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1006 (instance->instancet->issue_dcmd(instance, cmd))) { 1007 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1008 __func__, __LINE__); 1009 return DCMD_NOT_FIRED; 1010 } 1011 1012 return wait_and_poll(instance, cmd, instance->requestorId ? 1013 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1014 } 1015 1016 /** 1017 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1018 * @instance: Adapter soft state 1019 * @cmd: Command to be issued 1020 * @timeout: Timeout in seconds 1021 * 1022 * This function waits on an event for the command to be returned from ISR. 1023 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1024 * Used to issue ioctl commands. 1025 */ 1026 int 1027 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1028 struct megasas_cmd *cmd, int timeout) 1029 { 1030 int ret = 0; 1031 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1032 1033 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1034 (instance->instancet->issue_dcmd(instance, cmd))) { 1035 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1036 __func__, __LINE__); 1037 return DCMD_NOT_FIRED; 1038 } 1039 1040 if (timeout) { 1041 ret = wait_event_timeout(instance->int_cmd_wait_q, 1042 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1043 if (!ret) { 1044 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n", 1045 __func__, __LINE__); 1046 return DCMD_TIMEOUT; 1047 } 1048 } else 1049 wait_event(instance->int_cmd_wait_q, 1050 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1051 1052 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1053 DCMD_SUCCESS : DCMD_FAILED; 1054 } 1055 1056 /** 1057 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1058 * @instance: Adapter soft state 1059 * @cmd_to_abort: Previously issued cmd to be aborted 1060 * @timeout: Timeout in seconds 1061 * 1062 * MFI firmware can abort previously issued AEN comamnd (automatic event 1063 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1064 * cmd and waits for return status. 1065 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1066 */ 1067 static int 1068 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1069 struct megasas_cmd *cmd_to_abort, int timeout) 1070 { 1071 struct megasas_cmd *cmd; 1072 struct megasas_abort_frame *abort_fr; 1073 int ret = 0; 1074 1075 cmd = megasas_get_cmd(instance); 1076 1077 if (!cmd) 1078 return -1; 1079 1080 abort_fr = &cmd->frame->abort; 1081 1082 /* 1083 * Prepare and issue the abort frame 1084 */ 1085 abort_fr->cmd = MFI_CMD_ABORT; 1086 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1087 abort_fr->flags = cpu_to_le16(0); 1088 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1089 abort_fr->abort_mfi_phys_addr_lo = 1090 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1091 abort_fr->abort_mfi_phys_addr_hi = 1092 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1093 1094 cmd->sync_cmd = 1; 1095 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1096 1097 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1098 (instance->instancet->issue_dcmd(instance, cmd))) { 1099 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1100 __func__, __LINE__); 1101 return DCMD_NOT_FIRED; 1102 } 1103 1104 if (timeout) { 1105 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1106 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1107 if (!ret) { 1108 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n", 1109 __func__, __LINE__); 1110 return DCMD_TIMEOUT; 1111 } 1112 } else 1113 wait_event(instance->abort_cmd_wait_q, 1114 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); 1115 1116 cmd->sync_cmd = 0; 1117 1118 megasas_return_cmd(instance, cmd); 1119 return (cmd->cmd_status_drv == MFI_STAT_OK) ? 1120 DCMD_SUCCESS : DCMD_FAILED; 1121 } 1122 1123 /** 1124 * megasas_make_sgl32 - Prepares 32-bit SGL 1125 * @instance: Adapter soft state 1126 * @scp: SCSI command from the mid-layer 1127 * @mfi_sgl: SGL to be filled in 1128 * 1129 * If successful, this function returns the number of SG elements. Otherwise, 1130 * it returnes -1. 1131 */ 1132 static int 1133 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1134 union megasas_sgl *mfi_sgl) 1135 { 1136 int i; 1137 int sge_count; 1138 struct scatterlist *os_sgl; 1139 1140 sge_count = scsi_dma_map(scp); 1141 BUG_ON(sge_count < 0); 1142 1143 if (sge_count) { 1144 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1145 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1146 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1147 } 1148 } 1149 return sge_count; 1150 } 1151 1152 /** 1153 * megasas_make_sgl64 - Prepares 64-bit SGL 1154 * @instance: Adapter soft state 1155 * @scp: SCSI command from the mid-layer 1156 * @mfi_sgl: SGL to be filled in 1157 * 1158 * If successful, this function returns the number of SG elements. Otherwise, 1159 * it returnes -1. 1160 */ 1161 static int 1162 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1163 union megasas_sgl *mfi_sgl) 1164 { 1165 int i; 1166 int sge_count; 1167 struct scatterlist *os_sgl; 1168 1169 sge_count = scsi_dma_map(scp); 1170 BUG_ON(sge_count < 0); 1171 1172 if (sge_count) { 1173 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1174 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1175 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1176 } 1177 } 1178 return sge_count; 1179 } 1180 1181 /** 1182 * megasas_make_sgl_skinny - Prepares IEEE SGL 1183 * @instance: Adapter soft state 1184 * @scp: SCSI command from the mid-layer 1185 * @mfi_sgl: SGL to be filled in 1186 * 1187 * If successful, this function returns the number of SG elements. Otherwise, 1188 * it returnes -1. 1189 */ 1190 static int 1191 megasas_make_sgl_skinny(struct megasas_instance *instance, 1192 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1193 { 1194 int i; 1195 int sge_count; 1196 struct scatterlist *os_sgl; 1197 1198 sge_count = scsi_dma_map(scp); 1199 1200 if (sge_count) { 1201 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1202 mfi_sgl->sge_skinny[i].length = 1203 cpu_to_le32(sg_dma_len(os_sgl)); 1204 mfi_sgl->sge_skinny[i].phys_addr = 1205 cpu_to_le64(sg_dma_address(os_sgl)); 1206 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1207 } 1208 } 1209 return sge_count; 1210 } 1211 1212 /** 1213 * megasas_get_frame_count - Computes the number of frames 1214 * @frame_type : type of frame- io or pthru frame 1215 * @sge_count : number of sg elements 1216 * 1217 * Returns the number of frames required for numnber of sge's (sge_count) 1218 */ 1219 1220 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1221 u8 sge_count, u8 frame_type) 1222 { 1223 int num_cnt; 1224 int sge_bytes; 1225 u32 sge_sz; 1226 u32 frame_count = 0; 1227 1228 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1229 sizeof(struct megasas_sge32); 1230 1231 if (instance->flag_ieee) { 1232 sge_sz = sizeof(struct megasas_sge_skinny); 1233 } 1234 1235 /* 1236 * Main frame can contain 2 SGEs for 64-bit SGLs and 1237 * 3 SGEs for 32-bit SGLs for ldio & 1238 * 1 SGEs for 64-bit SGLs and 1239 * 2 SGEs for 32-bit SGLs for pthru frame 1240 */ 1241 if (unlikely(frame_type == PTHRU_FRAME)) { 1242 if (instance->flag_ieee == 1) { 1243 num_cnt = sge_count - 1; 1244 } else if (IS_DMA64) 1245 num_cnt = sge_count - 1; 1246 else 1247 num_cnt = sge_count - 2; 1248 } else { 1249 if (instance->flag_ieee == 1) { 1250 num_cnt = sge_count - 1; 1251 } else if (IS_DMA64) 1252 num_cnt = sge_count - 2; 1253 else 1254 num_cnt = sge_count - 3; 1255 } 1256 1257 if (num_cnt > 0) { 1258 sge_bytes = sge_sz * num_cnt; 1259 1260 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1261 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1262 } 1263 /* Main frame */ 1264 frame_count += 1; 1265 1266 if (frame_count > 7) 1267 frame_count = 8; 1268 return frame_count; 1269 } 1270 1271 /** 1272 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1273 * @instance: Adapter soft state 1274 * @scp: SCSI command 1275 * @cmd: Command to be prepared in 1276 * 1277 * This function prepares CDB commands. These are typcially pass-through 1278 * commands to the devices. 1279 */ 1280 static int 1281 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1282 struct megasas_cmd *cmd) 1283 { 1284 u32 is_logical; 1285 u32 device_id; 1286 u16 flags = 0; 1287 struct megasas_pthru_frame *pthru; 1288 1289 is_logical = MEGASAS_IS_LOGICAL(scp); 1290 device_id = MEGASAS_DEV_INDEX(scp); 1291 pthru = (struct megasas_pthru_frame *)cmd->frame; 1292 1293 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1294 flags = MFI_FRAME_DIR_WRITE; 1295 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1296 flags = MFI_FRAME_DIR_READ; 1297 else if (scp->sc_data_direction == PCI_DMA_NONE) 1298 flags = MFI_FRAME_DIR_NONE; 1299 1300 if (instance->flag_ieee == 1) { 1301 flags |= MFI_FRAME_IEEE; 1302 } 1303 1304 /* 1305 * Prepare the DCDB frame 1306 */ 1307 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1308 pthru->cmd_status = 0x0; 1309 pthru->scsi_status = 0x0; 1310 pthru->target_id = device_id; 1311 pthru->lun = scp->device->lun; 1312 pthru->cdb_len = scp->cmd_len; 1313 pthru->timeout = 0; 1314 pthru->pad_0 = 0; 1315 pthru->flags = cpu_to_le16(flags); 1316 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1317 1318 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1319 1320 /* 1321 * If the command is for the tape device, set the 1322 * pthru timeout to the os layer timeout value. 1323 */ 1324 if (scp->device->type == TYPE_TAPE) { 1325 if ((scp->request->timeout / HZ) > 0xFFFF) 1326 pthru->timeout = cpu_to_le16(0xFFFF); 1327 else 1328 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1329 } 1330 1331 /* 1332 * Construct SGL 1333 */ 1334 if (instance->flag_ieee == 1) { 1335 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1336 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1337 &pthru->sgl); 1338 } else if (IS_DMA64) { 1339 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1340 pthru->sge_count = megasas_make_sgl64(instance, scp, 1341 &pthru->sgl); 1342 } else 1343 pthru->sge_count = megasas_make_sgl32(instance, scp, 1344 &pthru->sgl); 1345 1346 if (pthru->sge_count > instance->max_num_sge) { 1347 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1348 pthru->sge_count); 1349 return 0; 1350 } 1351 1352 /* 1353 * Sense info specific 1354 */ 1355 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1356 pthru->sense_buf_phys_addr_hi = 1357 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1358 pthru->sense_buf_phys_addr_lo = 1359 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1360 1361 /* 1362 * Compute the total number of frames this command consumes. FW uses 1363 * this number to pull sufficient number of frames from host memory. 1364 */ 1365 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1366 PTHRU_FRAME); 1367 1368 return cmd->frame_count; 1369 } 1370 1371 /** 1372 * megasas_build_ldio - Prepares IOs to logical devices 1373 * @instance: Adapter soft state 1374 * @scp: SCSI command 1375 * @cmd: Command to be prepared 1376 * 1377 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1378 */ 1379 static int 1380 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1381 struct megasas_cmd *cmd) 1382 { 1383 u32 device_id; 1384 u8 sc = scp->cmnd[0]; 1385 u16 flags = 0; 1386 struct megasas_io_frame *ldio; 1387 1388 device_id = MEGASAS_DEV_INDEX(scp); 1389 ldio = (struct megasas_io_frame *)cmd->frame; 1390 1391 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1392 flags = MFI_FRAME_DIR_WRITE; 1393 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1394 flags = MFI_FRAME_DIR_READ; 1395 1396 if (instance->flag_ieee == 1) { 1397 flags |= MFI_FRAME_IEEE; 1398 } 1399 1400 /* 1401 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1402 */ 1403 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1404 ldio->cmd_status = 0x0; 1405 ldio->scsi_status = 0x0; 1406 ldio->target_id = device_id; 1407 ldio->timeout = 0; 1408 ldio->reserved_0 = 0; 1409 ldio->pad_0 = 0; 1410 ldio->flags = cpu_to_le16(flags); 1411 ldio->start_lba_hi = 0; 1412 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1413 1414 /* 1415 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1416 */ 1417 if (scp->cmd_len == 6) { 1418 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1419 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1420 ((u32) scp->cmnd[2] << 8) | 1421 (u32) scp->cmnd[3]); 1422 1423 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1424 } 1425 1426 /* 1427 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1428 */ 1429 else if (scp->cmd_len == 10) { 1430 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1431 ((u32) scp->cmnd[7] << 8)); 1432 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1433 ((u32) scp->cmnd[3] << 16) | 1434 ((u32) scp->cmnd[4] << 8) | 1435 (u32) scp->cmnd[5]); 1436 } 1437 1438 /* 1439 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1440 */ 1441 else if (scp->cmd_len == 12) { 1442 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1443 ((u32) scp->cmnd[7] << 16) | 1444 ((u32) scp->cmnd[8] << 8) | 1445 (u32) scp->cmnd[9]); 1446 1447 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1448 ((u32) scp->cmnd[3] << 16) | 1449 ((u32) scp->cmnd[4] << 8) | 1450 (u32) scp->cmnd[5]); 1451 } 1452 1453 /* 1454 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1455 */ 1456 else if (scp->cmd_len == 16) { 1457 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1458 ((u32) scp->cmnd[11] << 16) | 1459 ((u32) scp->cmnd[12] << 8) | 1460 (u32) scp->cmnd[13]); 1461 1462 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1463 ((u32) scp->cmnd[7] << 16) | 1464 ((u32) scp->cmnd[8] << 8) | 1465 (u32) scp->cmnd[9]); 1466 1467 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1468 ((u32) scp->cmnd[3] << 16) | 1469 ((u32) scp->cmnd[4] << 8) | 1470 (u32) scp->cmnd[5]); 1471 1472 } 1473 1474 /* 1475 * Construct SGL 1476 */ 1477 if (instance->flag_ieee) { 1478 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1479 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1480 &ldio->sgl); 1481 } else if (IS_DMA64) { 1482 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1483 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1484 } else 1485 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1486 1487 if (ldio->sge_count > instance->max_num_sge) { 1488 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1489 ldio->sge_count); 1490 return 0; 1491 } 1492 1493 /* 1494 * Sense info specific 1495 */ 1496 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1497 ldio->sense_buf_phys_addr_hi = 0; 1498 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1499 1500 /* 1501 * Compute the total number of frames this command consumes. FW uses 1502 * this number to pull sufficient number of frames from host memory. 1503 */ 1504 cmd->frame_count = megasas_get_frame_count(instance, 1505 ldio->sge_count, IO_FRAME); 1506 1507 return cmd->frame_count; 1508 } 1509 1510 /** 1511 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1512 * and whether it's RW or non RW 1513 * @scmd: SCSI command 1514 * 1515 */ 1516 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1517 { 1518 int ret; 1519 1520 switch (cmd->cmnd[0]) { 1521 case READ_10: 1522 case WRITE_10: 1523 case READ_12: 1524 case WRITE_12: 1525 case READ_6: 1526 case WRITE_6: 1527 case READ_16: 1528 case WRITE_16: 1529 ret = (MEGASAS_IS_LOGICAL(cmd)) ? 1530 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1531 break; 1532 default: 1533 ret = (MEGASAS_IS_LOGICAL(cmd)) ? 1534 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1535 } 1536 return ret; 1537 } 1538 1539 /** 1540 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1541 * in FW 1542 * @instance: Adapter soft state 1543 */ 1544 static inline void 1545 megasas_dump_pending_frames(struct megasas_instance *instance) 1546 { 1547 struct megasas_cmd *cmd; 1548 int i,n; 1549 union megasas_sgl *mfi_sgl; 1550 struct megasas_io_frame *ldio; 1551 struct megasas_pthru_frame *pthru; 1552 u32 sgcount; 1553 u32 max_cmd = instance->max_fw_cmds; 1554 1555 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1556 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1557 if (IS_DMA64) 1558 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1559 else 1560 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1561 1562 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1563 for (i = 0; i < max_cmd; i++) { 1564 cmd = instance->cmd_list[i]; 1565 if (!cmd->scmd) 1566 continue; 1567 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1568 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1569 ldio = (struct megasas_io_frame *)cmd->frame; 1570 mfi_sgl = &ldio->sgl; 1571 sgcount = ldio->sge_count; 1572 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1573 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1574 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1575 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1576 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1577 } else { 1578 pthru = (struct megasas_pthru_frame *) cmd->frame; 1579 mfi_sgl = &pthru->sgl; 1580 sgcount = pthru->sge_count; 1581 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1582 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1583 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1584 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1585 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1586 } 1587 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1588 for (n = 0; n < sgcount; n++) { 1589 if (IS_DMA64) 1590 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1591 le32_to_cpu(mfi_sgl->sge64[n].length), 1592 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1593 else 1594 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1595 le32_to_cpu(mfi_sgl->sge32[n].length), 1596 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1597 } 1598 } 1599 } /*for max_cmd*/ 1600 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1601 for (i = 0; i < max_cmd; i++) { 1602 1603 cmd = instance->cmd_list[i]; 1604 1605 if (cmd->sync_cmd == 1) 1606 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1607 } 1608 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1609 } 1610 1611 u32 1612 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1613 struct scsi_cmnd *scmd) 1614 { 1615 struct megasas_cmd *cmd; 1616 u32 frame_count; 1617 1618 cmd = megasas_get_cmd(instance); 1619 if (!cmd) 1620 return SCSI_MLQUEUE_HOST_BUSY; 1621 1622 /* 1623 * Logical drive command 1624 */ 1625 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1626 frame_count = megasas_build_ldio(instance, scmd, cmd); 1627 else 1628 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1629 1630 if (!frame_count) 1631 goto out_return_cmd; 1632 1633 cmd->scmd = scmd; 1634 scmd->SCp.ptr = (char *)cmd; 1635 1636 /* 1637 * Issue the command to the FW 1638 */ 1639 atomic_inc(&instance->fw_outstanding); 1640 1641 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1642 cmd->frame_count-1, instance->reg_set); 1643 1644 return 0; 1645 out_return_cmd: 1646 megasas_return_cmd(instance, cmd); 1647 return SCSI_MLQUEUE_HOST_BUSY; 1648 } 1649 1650 1651 /** 1652 * megasas_queue_command - Queue entry point 1653 * @scmd: SCSI command to be queued 1654 * @done: Callback entry point 1655 */ 1656 static int 1657 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1658 { 1659 struct megasas_instance *instance; 1660 struct MR_PRIV_DEVICE *mr_device_priv_data; 1661 1662 instance = (struct megasas_instance *) 1663 scmd->device->host->hostdata; 1664 1665 if (instance->unload == 1) { 1666 scmd->result = DID_NO_CONNECT << 16; 1667 scmd->scsi_done(scmd); 1668 return 0; 1669 } 1670 1671 if (instance->issuepend_done == 0) 1672 return SCSI_MLQUEUE_HOST_BUSY; 1673 1674 1675 /* Check for an mpio path and adjust behavior */ 1676 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1677 if (megasas_check_mpio_paths(instance, scmd) == 1678 (DID_RESET << 16)) { 1679 return SCSI_MLQUEUE_HOST_BUSY; 1680 } else { 1681 scmd->result = DID_NO_CONNECT << 16; 1682 scmd->scsi_done(scmd); 1683 return 0; 1684 } 1685 } 1686 1687 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1688 scmd->result = DID_NO_CONNECT << 16; 1689 scmd->scsi_done(scmd); 1690 return 0; 1691 } 1692 1693 mr_device_priv_data = scmd->device->hostdata; 1694 if (!mr_device_priv_data) { 1695 scmd->result = DID_NO_CONNECT << 16; 1696 scmd->scsi_done(scmd); 1697 return 0; 1698 } 1699 1700 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1701 return SCSI_MLQUEUE_HOST_BUSY; 1702 1703 if (mr_device_priv_data->tm_busy) 1704 return SCSI_MLQUEUE_DEVICE_BUSY; 1705 1706 1707 scmd->result = 0; 1708 1709 if (MEGASAS_IS_LOGICAL(scmd) && 1710 (scmd->device->id >= instance->fw_supported_vd_count || 1711 scmd->device->lun)) { 1712 scmd->result = DID_BAD_TARGET << 16; 1713 goto out_done; 1714 } 1715 1716 switch (scmd->cmnd[0]) { 1717 case SYNCHRONIZE_CACHE: 1718 /* 1719 * FW takes care of flush cache on its own 1720 * No need to send it down 1721 */ 1722 scmd->result = DID_OK << 16; 1723 goto out_done; 1724 default: 1725 break; 1726 } 1727 1728 return instance->instancet->build_and_issue_cmd(instance, scmd); 1729 1730 out_done: 1731 scmd->scsi_done(scmd); 1732 return 0; 1733 } 1734 1735 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1736 { 1737 int i; 1738 1739 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1740 1741 if ((megasas_mgmt_info.instance[i]) && 1742 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1743 return megasas_mgmt_info.instance[i]; 1744 } 1745 1746 return NULL; 1747 } 1748 1749 /* 1750 * megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities 1751 * 1752 * @sdev: OS provided scsi device 1753 * 1754 * Returns void 1755 */ 1756 void megasas_update_sdev_properties(struct scsi_device *sdev) 1757 { 1758 u16 pd_index = 0; 1759 u32 device_id, ld; 1760 struct megasas_instance *instance; 1761 struct fusion_context *fusion; 1762 struct MR_PRIV_DEVICE *mr_device_priv_data; 1763 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1764 struct MR_LD_RAID *raid; 1765 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1766 1767 instance = megasas_lookup_instance(sdev->host->host_no); 1768 fusion = instance->ctrl_context; 1769 mr_device_priv_data = sdev->hostdata; 1770 1771 if (!fusion) 1772 return; 1773 1774 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1775 instance->use_seqnum_jbod_fp) { 1776 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1777 sdev->id; 1778 pd_sync = (void *)fusion->pd_seq_sync 1779 [(instance->pd_seq_map_id - 1) & 1]; 1780 mr_device_priv_data->is_tm_capable = 1781 pd_sync->seq[pd_index].capability.tmCapable; 1782 } else { 1783 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1784 + sdev->id; 1785 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1786 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1787 raid = MR_LdRaidGet(ld, local_map_ptr); 1788 1789 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1790 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1791 mr_device_priv_data->is_tm_capable = 1792 raid->capability.tmCapable; 1793 } 1794 } 1795 1796 static void megasas_set_device_queue_depth(struct scsi_device *sdev) 1797 { 1798 u16 pd_index = 0; 1799 int ret = DCMD_FAILED; 1800 struct megasas_instance *instance; 1801 1802 instance = megasas_lookup_instance(sdev->host->host_no); 1803 1804 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1805 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 1806 1807 if (instance->pd_info) { 1808 mutex_lock(&instance->hba_mutex); 1809 ret = megasas_get_pd_info(instance, pd_index); 1810 mutex_unlock(&instance->hba_mutex); 1811 } 1812 1813 if (ret != DCMD_SUCCESS) 1814 return; 1815 1816 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1817 1818 switch (instance->pd_list[pd_index].interface) { 1819 case SAS_PD: 1820 scsi_change_queue_depth(sdev, MEGASAS_SAS_QD); 1821 break; 1822 1823 case SATA_PD: 1824 scsi_change_queue_depth(sdev, MEGASAS_SATA_QD); 1825 break; 1826 1827 default: 1828 scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD); 1829 } 1830 } 1831 } 1832 } 1833 1834 1835 static int megasas_slave_configure(struct scsi_device *sdev) 1836 { 1837 u16 pd_index = 0; 1838 struct megasas_instance *instance; 1839 1840 instance = megasas_lookup_instance(sdev->host->host_no); 1841 if (instance->pd_list_not_supported) { 1842 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1843 sdev->type == TYPE_DISK) { 1844 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1845 sdev->id; 1846 if (instance->pd_list[pd_index].driveState != 1847 MR_PD_STATE_SYSTEM) 1848 return -ENXIO; 1849 } 1850 } 1851 megasas_set_device_queue_depth(sdev); 1852 megasas_update_sdev_properties(sdev); 1853 1854 /* 1855 * The RAID firmware may require extended timeouts. 1856 */ 1857 blk_queue_rq_timeout(sdev->request_queue, 1858 scmd_timeout * HZ); 1859 1860 return 0; 1861 } 1862 1863 static int megasas_slave_alloc(struct scsi_device *sdev) 1864 { 1865 u16 pd_index = 0; 1866 struct megasas_instance *instance ; 1867 struct MR_PRIV_DEVICE *mr_device_priv_data; 1868 1869 instance = megasas_lookup_instance(sdev->host->host_no); 1870 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1871 /* 1872 * Open the OS scan to the SYSTEM PD 1873 */ 1874 pd_index = 1875 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1876 sdev->id; 1877 if ((instance->pd_list_not_supported || 1878 instance->pd_list[pd_index].driveState == 1879 MR_PD_STATE_SYSTEM)) { 1880 goto scan_target; 1881 } 1882 return -ENXIO; 1883 } 1884 1885 scan_target: 1886 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 1887 GFP_KERNEL); 1888 if (!mr_device_priv_data) 1889 return -ENOMEM; 1890 sdev->hostdata = mr_device_priv_data; 1891 return 0; 1892 } 1893 1894 static void megasas_slave_destroy(struct scsi_device *sdev) 1895 { 1896 kfree(sdev->hostdata); 1897 sdev->hostdata = NULL; 1898 } 1899 1900 /* 1901 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 1902 * kill adapter 1903 * @instance: Adapter soft state 1904 * 1905 */ 1906 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 1907 { 1908 int i; 1909 struct megasas_cmd *cmd_mfi; 1910 struct megasas_cmd_fusion *cmd_fusion; 1911 struct fusion_context *fusion = instance->ctrl_context; 1912 1913 /* Find all outstanding ioctls */ 1914 if (fusion) { 1915 for (i = 0; i < instance->max_fw_cmds; i++) { 1916 cmd_fusion = fusion->cmd_list[i]; 1917 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 1918 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 1919 if (cmd_mfi->sync_cmd && 1920 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) 1921 megasas_complete_cmd(instance, 1922 cmd_mfi, DID_OK); 1923 } 1924 } 1925 } else { 1926 for (i = 0; i < instance->max_fw_cmds; i++) { 1927 cmd_mfi = instance->cmd_list[i]; 1928 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 1929 MFI_CMD_ABORT) 1930 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 1931 } 1932 } 1933 } 1934 1935 1936 void megaraid_sas_kill_hba(struct megasas_instance *instance) 1937 { 1938 /* Set critical error to block I/O & ioctls in case caller didn't */ 1939 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 1940 /* Wait 1 second to ensure IO or ioctls in build have posted */ 1941 msleep(1000); 1942 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 1943 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 1944 (instance->ctrl_context)) { 1945 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 1946 /* Flush */ 1947 readl(&instance->reg_set->doorbell); 1948 if (instance->requestorId && instance->peerIsPresent) 1949 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 1950 } else { 1951 writel(MFI_STOP_ADP, 1952 &instance->reg_set->inbound_doorbell); 1953 } 1954 /* Complete outstanding ioctls when adapter is killed */ 1955 megasas_complete_outstanding_ioctls(instance); 1956 } 1957 1958 /** 1959 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 1960 * restored to max value 1961 * @instance: Adapter soft state 1962 * 1963 */ 1964 void 1965 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 1966 { 1967 unsigned long flags; 1968 1969 if (instance->flag & MEGASAS_FW_BUSY 1970 && time_after(jiffies, instance->last_time + 5 * HZ) 1971 && atomic_read(&instance->fw_outstanding) < 1972 instance->throttlequeuedepth + 1) { 1973 1974 spin_lock_irqsave(instance->host->host_lock, flags); 1975 instance->flag &= ~MEGASAS_FW_BUSY; 1976 1977 instance->host->can_queue = instance->cur_can_queue; 1978 spin_unlock_irqrestore(instance->host->host_lock, flags); 1979 } 1980 } 1981 1982 /** 1983 * megasas_complete_cmd_dpc - Returns FW's controller structure 1984 * @instance_addr: Address of adapter soft state 1985 * 1986 * Tasklet to complete cmds 1987 */ 1988 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 1989 { 1990 u32 producer; 1991 u32 consumer; 1992 u32 context; 1993 struct megasas_cmd *cmd; 1994 struct megasas_instance *instance = 1995 (struct megasas_instance *)instance_addr; 1996 unsigned long flags; 1997 1998 /* If we have already declared adapter dead, donot complete cmds */ 1999 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2000 return; 2001 2002 spin_lock_irqsave(&instance->completion_lock, flags); 2003 2004 producer = le32_to_cpu(*instance->producer); 2005 consumer = le32_to_cpu(*instance->consumer); 2006 2007 while (consumer != producer) { 2008 context = le32_to_cpu(instance->reply_queue[consumer]); 2009 if (context >= instance->max_fw_cmds) { 2010 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2011 context); 2012 BUG(); 2013 } 2014 2015 cmd = instance->cmd_list[context]; 2016 2017 megasas_complete_cmd(instance, cmd, DID_OK); 2018 2019 consumer++; 2020 if (consumer == (instance->max_fw_cmds + 1)) { 2021 consumer = 0; 2022 } 2023 } 2024 2025 *instance->consumer = cpu_to_le32(producer); 2026 2027 spin_unlock_irqrestore(&instance->completion_lock, flags); 2028 2029 /* 2030 * Check if we can restore can_queue 2031 */ 2032 megasas_check_and_restore_queue_depth(instance); 2033 } 2034 2035 /** 2036 * megasas_start_timer - Initializes a timer object 2037 * @instance: Adapter soft state 2038 * @timer: timer object to be initialized 2039 * @fn: timer function 2040 * @interval: time interval between timer function call 2041 * 2042 */ 2043 void megasas_start_timer(struct megasas_instance *instance, 2044 struct timer_list *timer, 2045 void *fn, unsigned long interval) 2046 { 2047 init_timer(timer); 2048 timer->expires = jiffies + interval; 2049 timer->data = (unsigned long)instance; 2050 timer->function = fn; 2051 add_timer(timer); 2052 } 2053 2054 static void 2055 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2056 2057 static void 2058 process_fw_state_change_wq(struct work_struct *work); 2059 2060 void megasas_do_ocr(struct megasas_instance *instance) 2061 { 2062 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2063 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2064 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2065 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2066 } 2067 instance->instancet->disable_intr(instance); 2068 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2069 instance->issuepend_done = 0; 2070 2071 atomic_set(&instance->fw_outstanding, 0); 2072 megasas_internal_reset_defer_cmds(instance); 2073 process_fw_state_change_wq(&instance->work_init); 2074 } 2075 2076 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2077 int initial) 2078 { 2079 struct megasas_cmd *cmd; 2080 struct megasas_dcmd_frame *dcmd; 2081 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2082 dma_addr_t new_affiliation_111_h; 2083 int ld, retval = 0; 2084 u8 thisVf; 2085 2086 cmd = megasas_get_cmd(instance); 2087 2088 if (!cmd) { 2089 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2090 "Failed to get cmd for scsi%d\n", 2091 instance->host->host_no); 2092 return -ENOMEM; 2093 } 2094 2095 dcmd = &cmd->frame->dcmd; 2096 2097 if (!instance->vf_affiliation_111) { 2098 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2099 "affiliation for scsi%d\n", instance->host->host_no); 2100 megasas_return_cmd(instance, cmd); 2101 return -ENOMEM; 2102 } 2103 2104 if (initial) 2105 memset(instance->vf_affiliation_111, 0, 2106 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2107 else { 2108 new_affiliation_111 = 2109 pci_alloc_consistent(instance->pdev, 2110 sizeof(struct MR_LD_VF_AFFILIATION_111), 2111 &new_affiliation_111_h); 2112 if (!new_affiliation_111) { 2113 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2114 "memory for new affiliation for scsi%d\n", 2115 instance->host->host_no); 2116 megasas_return_cmd(instance, cmd); 2117 return -ENOMEM; 2118 } 2119 memset(new_affiliation_111, 0, 2120 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2121 } 2122 2123 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2124 2125 dcmd->cmd = MFI_CMD_DCMD; 2126 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2127 dcmd->sge_count = 1; 2128 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2129 dcmd->timeout = 0; 2130 dcmd->pad_0 = 0; 2131 dcmd->data_xfer_len = 2132 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2133 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2134 2135 if (initial) 2136 dcmd->sgl.sge32[0].phys_addr = 2137 cpu_to_le32(instance->vf_affiliation_111_h); 2138 else 2139 dcmd->sgl.sge32[0].phys_addr = 2140 cpu_to_le32(new_affiliation_111_h); 2141 2142 dcmd->sgl.sge32[0].length = cpu_to_le32( 2143 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2144 2145 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2146 "scsi%d\n", instance->host->host_no); 2147 2148 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2149 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2150 " failed with status 0x%x for scsi%d\n", 2151 dcmd->cmd_status, instance->host->host_no); 2152 retval = 1; /* Do a scan if we couldn't get affiliation */ 2153 goto out; 2154 } 2155 2156 if (!initial) { 2157 thisVf = new_affiliation_111->thisVf; 2158 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2159 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2160 new_affiliation_111->map[ld].policy[thisVf]) { 2161 dev_warn(&instance->pdev->dev, "SR-IOV: " 2162 "Got new LD/VF affiliation for scsi%d\n", 2163 instance->host->host_no); 2164 memcpy(instance->vf_affiliation_111, 2165 new_affiliation_111, 2166 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2167 retval = 1; 2168 goto out; 2169 } 2170 } 2171 out: 2172 if (new_affiliation_111) { 2173 pci_free_consistent(instance->pdev, 2174 sizeof(struct MR_LD_VF_AFFILIATION_111), 2175 new_affiliation_111, 2176 new_affiliation_111_h); 2177 } 2178 2179 megasas_return_cmd(instance, cmd); 2180 2181 return retval; 2182 } 2183 2184 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2185 int initial) 2186 { 2187 struct megasas_cmd *cmd; 2188 struct megasas_dcmd_frame *dcmd; 2189 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2190 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2191 dma_addr_t new_affiliation_h; 2192 int i, j, retval = 0, found = 0, doscan = 0; 2193 u8 thisVf; 2194 2195 cmd = megasas_get_cmd(instance); 2196 2197 if (!cmd) { 2198 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2199 "Failed to get cmd for scsi%d\n", 2200 instance->host->host_no); 2201 return -ENOMEM; 2202 } 2203 2204 dcmd = &cmd->frame->dcmd; 2205 2206 if (!instance->vf_affiliation) { 2207 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2208 "affiliation for scsi%d\n", instance->host->host_no); 2209 megasas_return_cmd(instance, cmd); 2210 return -ENOMEM; 2211 } 2212 2213 if (initial) 2214 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2215 sizeof(struct MR_LD_VF_AFFILIATION)); 2216 else { 2217 new_affiliation = 2218 pci_alloc_consistent(instance->pdev, 2219 (MAX_LOGICAL_DRIVES + 1) * 2220 sizeof(struct MR_LD_VF_AFFILIATION), 2221 &new_affiliation_h); 2222 if (!new_affiliation) { 2223 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2224 "memory for new affiliation for scsi%d\n", 2225 instance->host->host_no); 2226 megasas_return_cmd(instance, cmd); 2227 return -ENOMEM; 2228 } 2229 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2230 sizeof(struct MR_LD_VF_AFFILIATION)); 2231 } 2232 2233 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2234 2235 dcmd->cmd = MFI_CMD_DCMD; 2236 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2237 dcmd->sge_count = 1; 2238 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2239 dcmd->timeout = 0; 2240 dcmd->pad_0 = 0; 2241 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2242 sizeof(struct MR_LD_VF_AFFILIATION)); 2243 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2244 2245 if (initial) 2246 dcmd->sgl.sge32[0].phys_addr = 2247 cpu_to_le32(instance->vf_affiliation_h); 2248 else 2249 dcmd->sgl.sge32[0].phys_addr = 2250 cpu_to_le32(new_affiliation_h); 2251 2252 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2253 sizeof(struct MR_LD_VF_AFFILIATION)); 2254 2255 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2256 "scsi%d\n", instance->host->host_no); 2257 2258 2259 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2260 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2261 " failed with status 0x%x for scsi%d\n", 2262 dcmd->cmd_status, instance->host->host_no); 2263 retval = 1; /* Do a scan if we couldn't get affiliation */ 2264 goto out; 2265 } 2266 2267 if (!initial) { 2268 if (!new_affiliation->ldCount) { 2269 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2270 "affiliation for passive path for scsi%d\n", 2271 instance->host->host_no); 2272 retval = 1; 2273 goto out; 2274 } 2275 newmap = new_affiliation->map; 2276 savedmap = instance->vf_affiliation->map; 2277 thisVf = new_affiliation->thisVf; 2278 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2279 found = 0; 2280 for (j = 0; j < instance->vf_affiliation->ldCount; 2281 j++) { 2282 if (newmap->ref.targetId == 2283 savedmap->ref.targetId) { 2284 found = 1; 2285 if (newmap->policy[thisVf] != 2286 savedmap->policy[thisVf]) { 2287 doscan = 1; 2288 goto out; 2289 } 2290 } 2291 savedmap = (struct MR_LD_VF_MAP *) 2292 ((unsigned char *)savedmap + 2293 savedmap->size); 2294 } 2295 if (!found && newmap->policy[thisVf] != 2296 MR_LD_ACCESS_HIDDEN) { 2297 doscan = 1; 2298 goto out; 2299 } 2300 newmap = (struct MR_LD_VF_MAP *) 2301 ((unsigned char *)newmap + newmap->size); 2302 } 2303 2304 newmap = new_affiliation->map; 2305 savedmap = instance->vf_affiliation->map; 2306 2307 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2308 found = 0; 2309 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2310 if (savedmap->ref.targetId == 2311 newmap->ref.targetId) { 2312 found = 1; 2313 if (savedmap->policy[thisVf] != 2314 newmap->policy[thisVf]) { 2315 doscan = 1; 2316 goto out; 2317 } 2318 } 2319 newmap = (struct MR_LD_VF_MAP *) 2320 ((unsigned char *)newmap + 2321 newmap->size); 2322 } 2323 if (!found && savedmap->policy[thisVf] != 2324 MR_LD_ACCESS_HIDDEN) { 2325 doscan = 1; 2326 goto out; 2327 } 2328 savedmap = (struct MR_LD_VF_MAP *) 2329 ((unsigned char *)savedmap + 2330 savedmap->size); 2331 } 2332 } 2333 out: 2334 if (doscan) { 2335 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2336 "affiliation for scsi%d\n", instance->host->host_no); 2337 memcpy(instance->vf_affiliation, new_affiliation, 2338 new_affiliation->size); 2339 retval = 1; 2340 } 2341 2342 if (new_affiliation) 2343 pci_free_consistent(instance->pdev, 2344 (MAX_LOGICAL_DRIVES + 1) * 2345 sizeof(struct MR_LD_VF_AFFILIATION), 2346 new_affiliation, new_affiliation_h); 2347 megasas_return_cmd(instance, cmd); 2348 2349 return retval; 2350 } 2351 2352 /* This function will get the current SR-IOV LD/VF affiliation */ 2353 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2354 int initial) 2355 { 2356 int retval; 2357 2358 if (instance->PlasmaFW111) 2359 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2360 else 2361 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2362 return retval; 2363 } 2364 2365 /* This function will tell FW to start the SR-IOV heartbeat */ 2366 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2367 int initial) 2368 { 2369 struct megasas_cmd *cmd; 2370 struct megasas_dcmd_frame *dcmd; 2371 int retval = 0; 2372 2373 cmd = megasas_get_cmd(instance); 2374 2375 if (!cmd) { 2376 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2377 "Failed to get cmd for scsi%d\n", 2378 instance->host->host_no); 2379 return -ENOMEM; 2380 } 2381 2382 dcmd = &cmd->frame->dcmd; 2383 2384 if (initial) { 2385 instance->hb_host_mem = 2386 pci_zalloc_consistent(instance->pdev, 2387 sizeof(struct MR_CTRL_HB_HOST_MEM), 2388 &instance->hb_host_mem_h); 2389 if (!instance->hb_host_mem) { 2390 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2391 " memory for heartbeat host memory for scsi%d\n", 2392 instance->host->host_no); 2393 retval = -ENOMEM; 2394 goto out; 2395 } 2396 } 2397 2398 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2399 2400 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2401 dcmd->cmd = MFI_CMD_DCMD; 2402 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2403 dcmd->sge_count = 1; 2404 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2405 dcmd->timeout = 0; 2406 dcmd->pad_0 = 0; 2407 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2408 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2409 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); 2410 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2411 2412 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2413 instance->host->host_no); 2414 2415 if (instance->ctrl_context && !instance->mask_interrupts) 2416 retval = megasas_issue_blocked_cmd(instance, cmd, 2417 MEGASAS_ROUTINE_WAIT_TIME_VF); 2418 else 2419 retval = megasas_issue_polled(instance, cmd); 2420 2421 if (retval) { 2422 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2423 "_MEM_ALLOC DCMD %s for scsi%d\n", 2424 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2425 "timed out" : "failed", instance->host->host_no); 2426 retval = 1; 2427 } 2428 2429 out: 2430 megasas_return_cmd(instance, cmd); 2431 2432 return retval; 2433 } 2434 2435 /* Handler for SR-IOV heartbeat */ 2436 void megasas_sriov_heartbeat_handler(unsigned long instance_addr) 2437 { 2438 struct megasas_instance *instance = 2439 (struct megasas_instance *)instance_addr; 2440 2441 if (instance->hb_host_mem->HB.fwCounter != 2442 instance->hb_host_mem->HB.driverCounter) { 2443 instance->hb_host_mem->HB.driverCounter = 2444 instance->hb_host_mem->HB.fwCounter; 2445 mod_timer(&instance->sriov_heartbeat_timer, 2446 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2447 } else { 2448 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2449 "completed for scsi%d\n", instance->host->host_no); 2450 schedule_work(&instance->work_init); 2451 } 2452 } 2453 2454 /** 2455 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2456 * @instance: Adapter soft state 2457 * 2458 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2459 * complete all its outstanding commands. Returns error if one or more IOs 2460 * are pending after this time period. It also marks the controller dead. 2461 */ 2462 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2463 { 2464 int i, sl, outstanding; 2465 u32 reset_index; 2466 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2467 unsigned long flags; 2468 struct list_head clist_local; 2469 struct megasas_cmd *reset_cmd; 2470 u32 fw_state; 2471 2472 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2473 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2474 __func__, __LINE__); 2475 return FAILED; 2476 } 2477 2478 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2479 2480 INIT_LIST_HEAD(&clist_local); 2481 spin_lock_irqsave(&instance->hba_lock, flags); 2482 list_splice_init(&instance->internal_reset_pending_q, 2483 &clist_local); 2484 spin_unlock_irqrestore(&instance->hba_lock, flags); 2485 2486 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2487 for (i = 0; i < wait_time; i++) { 2488 msleep(1000); 2489 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2490 break; 2491 } 2492 2493 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2494 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2495 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2496 return FAILED; 2497 } 2498 2499 reset_index = 0; 2500 while (!list_empty(&clist_local)) { 2501 reset_cmd = list_entry((&clist_local)->next, 2502 struct megasas_cmd, list); 2503 list_del_init(&reset_cmd->list); 2504 if (reset_cmd->scmd) { 2505 reset_cmd->scmd->result = DID_RESET << 16; 2506 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2507 reset_index, reset_cmd, 2508 reset_cmd->scmd->cmnd[0]); 2509 2510 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2511 megasas_return_cmd(instance, reset_cmd); 2512 } else if (reset_cmd->sync_cmd) { 2513 dev_notice(&instance->pdev->dev, "%p synch cmds" 2514 "reset queue\n", 2515 reset_cmd); 2516 2517 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 2518 instance->instancet->fire_cmd(instance, 2519 reset_cmd->frame_phys_addr, 2520 0, instance->reg_set); 2521 } else { 2522 dev_notice(&instance->pdev->dev, "%p unexpected" 2523 "cmds lst\n", 2524 reset_cmd); 2525 } 2526 reset_index++; 2527 } 2528 2529 return SUCCESS; 2530 } 2531 2532 for (i = 0; i < resetwaittime; i++) { 2533 outstanding = atomic_read(&instance->fw_outstanding); 2534 2535 if (!outstanding) 2536 break; 2537 2538 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2539 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2540 "commands to complete\n",i,outstanding); 2541 /* 2542 * Call cmd completion routine. Cmd to be 2543 * be completed directly without depending on isr. 2544 */ 2545 megasas_complete_cmd_dpc((unsigned long)instance); 2546 } 2547 2548 msleep(1000); 2549 } 2550 2551 i = 0; 2552 outstanding = atomic_read(&instance->fw_outstanding); 2553 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2554 2555 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2556 goto no_outstanding; 2557 2558 if (instance->disableOnlineCtrlReset) 2559 goto kill_hba_and_failed; 2560 do { 2561 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2562 dev_info(&instance->pdev->dev, 2563 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n", 2564 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2565 if (i == 3) 2566 goto kill_hba_and_failed; 2567 megasas_do_ocr(instance); 2568 2569 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2570 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2571 __func__, __LINE__); 2572 return FAILED; 2573 } 2574 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2575 __func__, __LINE__); 2576 2577 for (sl = 0; sl < 10; sl++) 2578 msleep(500); 2579 2580 outstanding = atomic_read(&instance->fw_outstanding); 2581 2582 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2583 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2584 goto no_outstanding; 2585 } 2586 i++; 2587 } while (i <= 3); 2588 2589 no_outstanding: 2590 2591 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2592 __func__, __LINE__); 2593 return SUCCESS; 2594 2595 kill_hba_and_failed: 2596 2597 /* Reset not supported, kill adapter */ 2598 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2599 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2600 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2601 atomic_read(&instance->fw_outstanding)); 2602 megasas_dump_pending_frames(instance); 2603 megaraid_sas_kill_hba(instance); 2604 2605 return FAILED; 2606 } 2607 2608 /** 2609 * megasas_generic_reset - Generic reset routine 2610 * @scmd: Mid-layer SCSI command 2611 * 2612 * This routine implements a generic reset handler for device, bus and host 2613 * reset requests. Device, bus and host specific reset handlers can use this 2614 * function after they do their specific tasks. 2615 */ 2616 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2617 { 2618 int ret_val; 2619 struct megasas_instance *instance; 2620 2621 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2622 2623 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2624 scmd->cmnd[0], scmd->retries); 2625 2626 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2627 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2628 return FAILED; 2629 } 2630 2631 ret_val = megasas_wait_for_outstanding(instance); 2632 if (ret_val == SUCCESS) 2633 dev_notice(&instance->pdev->dev, "reset successful\n"); 2634 else 2635 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2636 2637 return ret_val; 2638 } 2639 2640 /** 2641 * megasas_reset_timer - quiesce the adapter if required 2642 * @scmd: scsi cmnd 2643 * 2644 * Sets the FW busy flag and reduces the host->can_queue if the 2645 * cmd has not been completed within the timeout period. 2646 */ 2647 static enum 2648 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 2649 { 2650 struct megasas_instance *instance; 2651 unsigned long flags; 2652 2653 if (time_after(jiffies, scmd->jiffies_at_alloc + 2654 (scmd_timeout * 2) * HZ)) { 2655 return BLK_EH_NOT_HANDLED; 2656 } 2657 2658 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2659 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2660 /* FW is busy, throttle IO */ 2661 spin_lock_irqsave(instance->host->host_lock, flags); 2662 2663 instance->host->can_queue = instance->throttlequeuedepth; 2664 instance->last_time = jiffies; 2665 instance->flag |= MEGASAS_FW_BUSY; 2666 2667 spin_unlock_irqrestore(instance->host->host_lock, flags); 2668 } 2669 return BLK_EH_RESET_TIMER; 2670 } 2671 2672 /** 2673 * megasas_reset_bus_host - Bus & host reset handler entry point 2674 */ 2675 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2676 { 2677 int ret; 2678 struct megasas_instance *instance; 2679 2680 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2681 2682 /* 2683 * First wait for all commands to complete 2684 */ 2685 if (instance->ctrl_context) 2686 ret = megasas_reset_fusion(scmd->device->host, 1); 2687 else 2688 ret = megasas_generic_reset(scmd); 2689 2690 return ret; 2691 } 2692 2693 /** 2694 * megasas_task_abort - Issues task abort request to firmware 2695 * (supported only for fusion adapters) 2696 * @scmd: SCSI command pointer 2697 */ 2698 static int megasas_task_abort(struct scsi_cmnd *scmd) 2699 { 2700 int ret; 2701 struct megasas_instance *instance; 2702 2703 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2704 2705 if (instance->ctrl_context) 2706 ret = megasas_task_abort_fusion(scmd); 2707 else { 2708 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2709 ret = FAILED; 2710 } 2711 2712 return ret; 2713 } 2714 2715 /** 2716 * megasas_reset_target: Issues target reset request to firmware 2717 * (supported only for fusion adapters) 2718 * @scmd: SCSI command pointer 2719 */ 2720 static int megasas_reset_target(struct scsi_cmnd *scmd) 2721 { 2722 int ret; 2723 struct megasas_instance *instance; 2724 2725 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2726 2727 if (instance->ctrl_context) 2728 ret = megasas_reset_target_fusion(scmd); 2729 else { 2730 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2731 ret = FAILED; 2732 } 2733 2734 return ret; 2735 } 2736 2737 /** 2738 * megasas_bios_param - Returns disk geometry for a disk 2739 * @sdev: device handle 2740 * @bdev: block device 2741 * @capacity: drive capacity 2742 * @geom: geometry parameters 2743 */ 2744 static int 2745 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2746 sector_t capacity, int geom[]) 2747 { 2748 int heads; 2749 int sectors; 2750 sector_t cylinders; 2751 unsigned long tmp; 2752 2753 /* Default heads (64) & sectors (32) */ 2754 heads = 64; 2755 sectors = 32; 2756 2757 tmp = heads * sectors; 2758 cylinders = capacity; 2759 2760 sector_div(cylinders, tmp); 2761 2762 /* 2763 * Handle extended translation size for logical drives > 1Gb 2764 */ 2765 2766 if (capacity >= 0x200000) { 2767 heads = 255; 2768 sectors = 63; 2769 tmp = heads*sectors; 2770 cylinders = capacity; 2771 sector_div(cylinders, tmp); 2772 } 2773 2774 geom[0] = heads; 2775 geom[1] = sectors; 2776 geom[2] = cylinders; 2777 2778 return 0; 2779 } 2780 2781 static void megasas_aen_polling(struct work_struct *work); 2782 2783 /** 2784 * megasas_service_aen - Processes an event notification 2785 * @instance: Adapter soft state 2786 * @cmd: AEN command completed by the ISR 2787 * 2788 * For AEN, driver sends a command down to FW that is held by the FW till an 2789 * event occurs. When an event of interest occurs, FW completes the command 2790 * that it was previously holding. 2791 * 2792 * This routines sends SIGIO signal to processes that have registered with the 2793 * driver for AEN. 2794 */ 2795 static void 2796 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2797 { 2798 unsigned long flags; 2799 2800 /* 2801 * Don't signal app if it is just an aborted previously registered aen 2802 */ 2803 if ((!cmd->abort_aen) && (instance->unload == 0)) { 2804 spin_lock_irqsave(&poll_aen_lock, flags); 2805 megasas_poll_wait_aen = 1; 2806 spin_unlock_irqrestore(&poll_aen_lock, flags); 2807 wake_up(&megasas_poll_wait); 2808 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 2809 } 2810 else 2811 cmd->abort_aen = 0; 2812 2813 instance->aen_cmd = NULL; 2814 2815 megasas_return_cmd(instance, cmd); 2816 2817 if ((instance->unload == 0) && 2818 ((instance->issuepend_done == 1))) { 2819 struct megasas_aen_event *ev; 2820 2821 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2822 if (!ev) { 2823 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 2824 } else { 2825 ev->instance = instance; 2826 instance->ev = ev; 2827 INIT_DELAYED_WORK(&ev->hotplug_work, 2828 megasas_aen_polling); 2829 schedule_delayed_work(&ev->hotplug_work, 0); 2830 } 2831 } 2832 } 2833 2834 static ssize_t 2835 megasas_fw_crash_buffer_store(struct device *cdev, 2836 struct device_attribute *attr, const char *buf, size_t count) 2837 { 2838 struct Scsi_Host *shost = class_to_shost(cdev); 2839 struct megasas_instance *instance = 2840 (struct megasas_instance *) shost->hostdata; 2841 int val = 0; 2842 unsigned long flags; 2843 2844 if (kstrtoint(buf, 0, &val) != 0) 2845 return -EINVAL; 2846 2847 spin_lock_irqsave(&instance->crashdump_lock, flags); 2848 instance->fw_crash_buffer_offset = val; 2849 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2850 return strlen(buf); 2851 } 2852 2853 static ssize_t 2854 megasas_fw_crash_buffer_show(struct device *cdev, 2855 struct device_attribute *attr, char *buf) 2856 { 2857 struct Scsi_Host *shost = class_to_shost(cdev); 2858 struct megasas_instance *instance = 2859 (struct megasas_instance *) shost->hostdata; 2860 u32 size; 2861 unsigned long buff_addr; 2862 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 2863 unsigned long src_addr; 2864 unsigned long flags; 2865 u32 buff_offset; 2866 2867 spin_lock_irqsave(&instance->crashdump_lock, flags); 2868 buff_offset = instance->fw_crash_buffer_offset; 2869 if (!instance->crash_dump_buf && 2870 !((instance->fw_crash_state == AVAILABLE) || 2871 (instance->fw_crash_state == COPYING))) { 2872 dev_err(&instance->pdev->dev, 2873 "Firmware crash dump is not available\n"); 2874 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2875 return -EINVAL; 2876 } 2877 2878 buff_addr = (unsigned long) buf; 2879 2880 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 2881 dev_err(&instance->pdev->dev, 2882 "Firmware crash dump offset is out of range\n"); 2883 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2884 return 0; 2885 } 2886 2887 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 2888 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 2889 2890 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 2891 (buff_offset % dmachunk); 2892 memcpy(buf, (void *)src_addr, size); 2893 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2894 2895 return size; 2896 } 2897 2898 static ssize_t 2899 megasas_fw_crash_buffer_size_show(struct device *cdev, 2900 struct device_attribute *attr, char *buf) 2901 { 2902 struct Scsi_Host *shost = class_to_shost(cdev); 2903 struct megasas_instance *instance = 2904 (struct megasas_instance *) shost->hostdata; 2905 2906 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 2907 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 2908 } 2909 2910 static ssize_t 2911 megasas_fw_crash_state_store(struct device *cdev, 2912 struct device_attribute *attr, const char *buf, size_t count) 2913 { 2914 struct Scsi_Host *shost = class_to_shost(cdev); 2915 struct megasas_instance *instance = 2916 (struct megasas_instance *) shost->hostdata; 2917 int val = 0; 2918 unsigned long flags; 2919 2920 if (kstrtoint(buf, 0, &val) != 0) 2921 return -EINVAL; 2922 2923 if ((val <= AVAILABLE || val > COPY_ERROR)) { 2924 dev_err(&instance->pdev->dev, "application updates invalid " 2925 "firmware crash state\n"); 2926 return -EINVAL; 2927 } 2928 2929 instance->fw_crash_state = val; 2930 2931 if ((val == COPIED) || (val == COPY_ERROR)) { 2932 spin_lock_irqsave(&instance->crashdump_lock, flags); 2933 megasas_free_host_crash_buffer(instance); 2934 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2935 if (val == COPY_ERROR) 2936 dev_info(&instance->pdev->dev, "application failed to " 2937 "copy Firmware crash dump\n"); 2938 else 2939 dev_info(&instance->pdev->dev, "Firmware crash dump " 2940 "copied successfully\n"); 2941 } 2942 return strlen(buf); 2943 } 2944 2945 static ssize_t 2946 megasas_fw_crash_state_show(struct device *cdev, 2947 struct device_attribute *attr, char *buf) 2948 { 2949 struct Scsi_Host *shost = class_to_shost(cdev); 2950 struct megasas_instance *instance = 2951 (struct megasas_instance *) shost->hostdata; 2952 2953 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 2954 } 2955 2956 static ssize_t 2957 megasas_page_size_show(struct device *cdev, 2958 struct device_attribute *attr, char *buf) 2959 { 2960 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 2961 } 2962 2963 static ssize_t 2964 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 2965 char *buf) 2966 { 2967 struct Scsi_Host *shost = class_to_shost(cdev); 2968 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 2969 2970 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 2971 } 2972 2973 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 2974 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 2975 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 2976 megasas_fw_crash_buffer_size_show, NULL); 2977 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR, 2978 megasas_fw_crash_state_show, megasas_fw_crash_state_store); 2979 static DEVICE_ATTR(page_size, S_IRUGO, 2980 megasas_page_size_show, NULL); 2981 static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 2982 megasas_ldio_outstanding_show, NULL); 2983 2984 struct device_attribute *megaraid_host_attrs[] = { 2985 &dev_attr_fw_crash_buffer_size, 2986 &dev_attr_fw_crash_buffer, 2987 &dev_attr_fw_crash_state, 2988 &dev_attr_page_size, 2989 &dev_attr_ldio_outstanding, 2990 NULL, 2991 }; 2992 2993 /* 2994 * Scsi host template for megaraid_sas driver 2995 */ 2996 static struct scsi_host_template megasas_template = { 2997 2998 .module = THIS_MODULE, 2999 .name = "Avago SAS based MegaRAID driver", 3000 .proc_name = "megaraid_sas", 3001 .slave_configure = megasas_slave_configure, 3002 .slave_alloc = megasas_slave_alloc, 3003 .slave_destroy = megasas_slave_destroy, 3004 .queuecommand = megasas_queue_command, 3005 .eh_target_reset_handler = megasas_reset_target, 3006 .eh_abort_handler = megasas_task_abort, 3007 .eh_host_reset_handler = megasas_reset_bus_host, 3008 .eh_timed_out = megasas_reset_timer, 3009 .shost_attrs = megaraid_host_attrs, 3010 .bios_param = megasas_bios_param, 3011 .use_clustering = ENABLE_CLUSTERING, 3012 .change_queue_depth = scsi_change_queue_depth, 3013 .no_write_same = 1, 3014 }; 3015 3016 /** 3017 * megasas_complete_int_cmd - Completes an internal command 3018 * @instance: Adapter soft state 3019 * @cmd: Command to be completed 3020 * 3021 * The megasas_issue_blocked_cmd() function waits for a command to complete 3022 * after it issues a command. This function wakes up that waiting routine by 3023 * calling wake_up() on the wait queue. 3024 */ 3025 static void 3026 megasas_complete_int_cmd(struct megasas_instance *instance, 3027 struct megasas_cmd *cmd) 3028 { 3029 cmd->cmd_status_drv = cmd->frame->io.cmd_status; 3030 wake_up(&instance->int_cmd_wait_q); 3031 } 3032 3033 /** 3034 * megasas_complete_abort - Completes aborting a command 3035 * @instance: Adapter soft state 3036 * @cmd: Cmd that was issued to abort another cmd 3037 * 3038 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3039 * after it issues an abort on a previously issued command. This function 3040 * wakes up all functions waiting on the same wait queue. 3041 */ 3042 static void 3043 megasas_complete_abort(struct megasas_instance *instance, 3044 struct megasas_cmd *cmd) 3045 { 3046 if (cmd->sync_cmd) { 3047 cmd->sync_cmd = 0; 3048 cmd->cmd_status_drv = 0; 3049 wake_up(&instance->abort_cmd_wait_q); 3050 } 3051 } 3052 3053 /** 3054 * megasas_complete_cmd - Completes a command 3055 * @instance: Adapter soft state 3056 * @cmd: Command to be completed 3057 * @alt_status: If non-zero, use this value as status to 3058 * SCSI mid-layer instead of the value returned 3059 * by the FW. This should be used if caller wants 3060 * an alternate status (as in the case of aborted 3061 * commands) 3062 */ 3063 void 3064 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3065 u8 alt_status) 3066 { 3067 int exception = 0; 3068 struct megasas_header *hdr = &cmd->frame->hdr; 3069 unsigned long flags; 3070 struct fusion_context *fusion = instance->ctrl_context; 3071 u32 opcode, status; 3072 3073 /* flag for the retry reset */ 3074 cmd->retry_for_fw_reset = 0; 3075 3076 if (cmd->scmd) 3077 cmd->scmd->SCp.ptr = NULL; 3078 3079 switch (hdr->cmd) { 3080 case MFI_CMD_INVALID: 3081 /* Some older 1068 controller FW may keep a pended 3082 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3083 when booting the kdump kernel. Ignore this command to 3084 prevent a kernel panic on shutdown of the kdump kernel. */ 3085 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3086 "completed\n"); 3087 dev_warn(&instance->pdev->dev, "If you have a controller " 3088 "other than PERC5, please upgrade your firmware\n"); 3089 break; 3090 case MFI_CMD_PD_SCSI_IO: 3091 case MFI_CMD_LD_SCSI_IO: 3092 3093 /* 3094 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3095 * issued either through an IO path or an IOCTL path. If it 3096 * was via IOCTL, we will send it to internal completion. 3097 */ 3098 if (cmd->sync_cmd) { 3099 cmd->sync_cmd = 0; 3100 megasas_complete_int_cmd(instance, cmd); 3101 break; 3102 } 3103 3104 case MFI_CMD_LD_READ: 3105 case MFI_CMD_LD_WRITE: 3106 3107 if (alt_status) { 3108 cmd->scmd->result = alt_status << 16; 3109 exception = 1; 3110 } 3111 3112 if (exception) { 3113 3114 atomic_dec(&instance->fw_outstanding); 3115 3116 scsi_dma_unmap(cmd->scmd); 3117 cmd->scmd->scsi_done(cmd->scmd); 3118 megasas_return_cmd(instance, cmd); 3119 3120 break; 3121 } 3122 3123 switch (hdr->cmd_status) { 3124 3125 case MFI_STAT_OK: 3126 cmd->scmd->result = DID_OK << 16; 3127 break; 3128 3129 case MFI_STAT_SCSI_IO_FAILED: 3130 case MFI_STAT_LD_INIT_IN_PROGRESS: 3131 cmd->scmd->result = 3132 (DID_ERROR << 16) | hdr->scsi_status; 3133 break; 3134 3135 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3136 3137 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3138 3139 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3140 memset(cmd->scmd->sense_buffer, 0, 3141 SCSI_SENSE_BUFFERSIZE); 3142 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3143 hdr->sense_len); 3144 3145 cmd->scmd->result |= DRIVER_SENSE << 24; 3146 } 3147 3148 break; 3149 3150 case MFI_STAT_LD_OFFLINE: 3151 case MFI_STAT_DEVICE_NOT_FOUND: 3152 cmd->scmd->result = DID_BAD_TARGET << 16; 3153 break; 3154 3155 default: 3156 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3157 hdr->cmd_status); 3158 cmd->scmd->result = DID_ERROR << 16; 3159 break; 3160 } 3161 3162 atomic_dec(&instance->fw_outstanding); 3163 3164 scsi_dma_unmap(cmd->scmd); 3165 cmd->scmd->scsi_done(cmd->scmd); 3166 megasas_return_cmd(instance, cmd); 3167 3168 break; 3169 3170 case MFI_CMD_SMP: 3171 case MFI_CMD_STP: 3172 case MFI_CMD_DCMD: 3173 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3174 /* Check for LD map update */ 3175 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3176 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3177 fusion->fast_path_io = 0; 3178 spin_lock_irqsave(instance->host->host_lock, flags); 3179 instance->map_update_cmd = NULL; 3180 if (cmd->frame->hdr.cmd_status != 0) { 3181 if (cmd->frame->hdr.cmd_status != 3182 MFI_STAT_NOT_FOUND) 3183 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3184 cmd->frame->hdr.cmd_status); 3185 else { 3186 megasas_return_cmd(instance, cmd); 3187 spin_unlock_irqrestore( 3188 instance->host->host_lock, 3189 flags); 3190 break; 3191 } 3192 } else 3193 instance->map_id++; 3194 megasas_return_cmd(instance, cmd); 3195 3196 /* 3197 * Set fast path IO to ZERO. 3198 * Validate Map will set proper value. 3199 * Meanwhile all IOs will go as LD IO. 3200 */ 3201 if (MR_ValidateMapInfo(instance)) 3202 fusion->fast_path_io = 1; 3203 else 3204 fusion->fast_path_io = 0; 3205 megasas_sync_map_info(instance); 3206 spin_unlock_irqrestore(instance->host->host_lock, 3207 flags); 3208 break; 3209 } 3210 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3211 opcode == MR_DCMD_CTRL_EVENT_GET) { 3212 spin_lock_irqsave(&poll_aen_lock, flags); 3213 megasas_poll_wait_aen = 0; 3214 spin_unlock_irqrestore(&poll_aen_lock, flags); 3215 } 3216 3217 /* FW has an updated PD sequence */ 3218 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3219 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3220 3221 spin_lock_irqsave(instance->host->host_lock, flags); 3222 status = cmd->frame->hdr.cmd_status; 3223 instance->jbod_seq_cmd = NULL; 3224 megasas_return_cmd(instance, cmd); 3225 3226 if (status == MFI_STAT_OK) { 3227 instance->pd_seq_map_id++; 3228 /* Re-register a pd sync seq num cmd */ 3229 if (megasas_sync_pd_seq_num(instance, true)) 3230 instance->use_seqnum_jbod_fp = false; 3231 } else 3232 instance->use_seqnum_jbod_fp = false; 3233 3234 spin_unlock_irqrestore(instance->host->host_lock, flags); 3235 break; 3236 } 3237 3238 /* 3239 * See if got an event notification 3240 */ 3241 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3242 megasas_service_aen(instance, cmd); 3243 else 3244 megasas_complete_int_cmd(instance, cmd); 3245 3246 break; 3247 3248 case MFI_CMD_ABORT: 3249 /* 3250 * Cmd issued to abort another cmd returned 3251 */ 3252 megasas_complete_abort(instance, cmd); 3253 break; 3254 3255 default: 3256 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3257 hdr->cmd); 3258 break; 3259 } 3260 } 3261 3262 /** 3263 * megasas_issue_pending_cmds_again - issue all pending cmds 3264 * in FW again because of the fw reset 3265 * @instance: Adapter soft state 3266 */ 3267 static inline void 3268 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3269 { 3270 struct megasas_cmd *cmd; 3271 struct list_head clist_local; 3272 union megasas_evt_class_locale class_locale; 3273 unsigned long flags; 3274 u32 seq_num; 3275 3276 INIT_LIST_HEAD(&clist_local); 3277 spin_lock_irqsave(&instance->hba_lock, flags); 3278 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3279 spin_unlock_irqrestore(&instance->hba_lock, flags); 3280 3281 while (!list_empty(&clist_local)) { 3282 cmd = list_entry((&clist_local)->next, 3283 struct megasas_cmd, list); 3284 list_del_init(&cmd->list); 3285 3286 if (cmd->sync_cmd || cmd->scmd) { 3287 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3288 "detected to be pending while HBA reset\n", 3289 cmd, cmd->scmd, cmd->sync_cmd); 3290 3291 cmd->retry_for_fw_reset++; 3292 3293 if (cmd->retry_for_fw_reset == 3) { 3294 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3295 "was tried multiple times during reset." 3296 "Shutting down the HBA\n", 3297 cmd, cmd->scmd, cmd->sync_cmd); 3298 instance->instancet->disable_intr(instance); 3299 atomic_set(&instance->fw_reset_no_pci_access, 1); 3300 megaraid_sas_kill_hba(instance); 3301 return; 3302 } 3303 } 3304 3305 if (cmd->sync_cmd == 1) { 3306 if (cmd->scmd) { 3307 dev_notice(&instance->pdev->dev, "unexpected" 3308 "cmd attached to internal command!\n"); 3309 } 3310 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3311 "on the internal reset queue," 3312 "issue it again.\n", cmd); 3313 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3314 instance->instancet->fire_cmd(instance, 3315 cmd->frame_phys_addr, 3316 0, instance->reg_set); 3317 } else if (cmd->scmd) { 3318 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3319 "detected on the internal queue, issue again.\n", 3320 cmd, cmd->scmd->cmnd[0]); 3321 3322 atomic_inc(&instance->fw_outstanding); 3323 instance->instancet->fire_cmd(instance, 3324 cmd->frame_phys_addr, 3325 cmd->frame_count-1, instance->reg_set); 3326 } else { 3327 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3328 "internal reset defer list while re-issue!!\n", 3329 cmd); 3330 } 3331 } 3332 3333 if (instance->aen_cmd) { 3334 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3335 megasas_return_cmd(instance, instance->aen_cmd); 3336 3337 instance->aen_cmd = NULL; 3338 } 3339 3340 /* 3341 * Initiate AEN (Asynchronous Event Notification) 3342 */ 3343 seq_num = instance->last_seq_num; 3344 class_locale.members.reserved = 0; 3345 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3346 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3347 3348 megasas_register_aen(instance, seq_num, class_locale.word); 3349 } 3350 3351 /** 3352 * Move the internal reset pending commands to a deferred queue. 3353 * 3354 * We move the commands pending at internal reset time to a 3355 * pending queue. This queue would be flushed after successful 3356 * completion of the internal reset sequence. if the internal reset 3357 * did not complete in time, the kernel reset handler would flush 3358 * these commands. 3359 **/ 3360 static void 3361 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3362 { 3363 struct megasas_cmd *cmd; 3364 int i; 3365 u32 max_cmd = instance->max_fw_cmds; 3366 u32 defer_index; 3367 unsigned long flags; 3368 3369 defer_index = 0; 3370 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3371 for (i = 0; i < max_cmd; i++) { 3372 cmd = instance->cmd_list[i]; 3373 if (cmd->sync_cmd == 1 || cmd->scmd) { 3374 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3375 "on the defer queue as internal\n", 3376 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3377 3378 if (!list_empty(&cmd->list)) { 3379 dev_notice(&instance->pdev->dev, "ERROR while" 3380 " moving this cmd:%p, %d %p, it was" 3381 "discovered on some list?\n", 3382 cmd, cmd->sync_cmd, cmd->scmd); 3383 3384 list_del_init(&cmd->list); 3385 } 3386 defer_index++; 3387 list_add_tail(&cmd->list, 3388 &instance->internal_reset_pending_q); 3389 } 3390 } 3391 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3392 } 3393 3394 3395 static void 3396 process_fw_state_change_wq(struct work_struct *work) 3397 { 3398 struct megasas_instance *instance = 3399 container_of(work, struct megasas_instance, work_init); 3400 u32 wait; 3401 unsigned long flags; 3402 3403 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3404 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3405 atomic_read(&instance->adprecovery)); 3406 return ; 3407 } 3408 3409 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3410 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3411 "state, restarting it...\n"); 3412 3413 instance->instancet->disable_intr(instance); 3414 atomic_set(&instance->fw_outstanding, 0); 3415 3416 atomic_set(&instance->fw_reset_no_pci_access, 1); 3417 instance->instancet->adp_reset(instance, instance->reg_set); 3418 atomic_set(&instance->fw_reset_no_pci_access, 0); 3419 3420 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3421 "initiating next stage...\n"); 3422 3423 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3424 "state 2 starting...\n"); 3425 3426 /* waiting for about 20 second before start the second init */ 3427 for (wait = 0; wait < 30; wait++) { 3428 msleep(1000); 3429 } 3430 3431 if (megasas_transition_to_ready(instance, 1)) { 3432 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3433 3434 atomic_set(&instance->fw_reset_no_pci_access, 1); 3435 megaraid_sas_kill_hba(instance); 3436 return ; 3437 } 3438 3439 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3440 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3441 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3442 ) { 3443 *instance->consumer = *instance->producer; 3444 } else { 3445 *instance->consumer = 0; 3446 *instance->producer = 0; 3447 } 3448 3449 megasas_issue_init_mfi(instance); 3450 3451 spin_lock_irqsave(&instance->hba_lock, flags); 3452 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3453 spin_unlock_irqrestore(&instance->hba_lock, flags); 3454 instance->instancet->enable_intr(instance); 3455 3456 megasas_issue_pending_cmds_again(instance); 3457 instance->issuepend_done = 1; 3458 } 3459 } 3460 3461 /** 3462 * megasas_deplete_reply_queue - Processes all completed commands 3463 * @instance: Adapter soft state 3464 * @alt_status: Alternate status to be returned to 3465 * SCSI mid-layer instead of the status 3466 * returned by the FW 3467 * Note: this must be called with hba lock held 3468 */ 3469 static int 3470 megasas_deplete_reply_queue(struct megasas_instance *instance, 3471 u8 alt_status) 3472 { 3473 u32 mfiStatus; 3474 u32 fw_state; 3475 3476 if ((mfiStatus = instance->instancet->check_reset(instance, 3477 instance->reg_set)) == 1) { 3478 return IRQ_HANDLED; 3479 } 3480 3481 if ((mfiStatus = instance->instancet->clear_intr( 3482 instance->reg_set) 3483 ) == 0) { 3484 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3485 if (!instance->msix_vectors) 3486 return IRQ_NONE; 3487 } 3488 3489 instance->mfiStatus = mfiStatus; 3490 3491 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3492 fw_state = instance->instancet->read_fw_status_reg( 3493 instance->reg_set) & MFI_STATE_MASK; 3494 3495 if (fw_state != MFI_STATE_FAULT) { 3496 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3497 fw_state); 3498 } 3499 3500 if ((fw_state == MFI_STATE_FAULT) && 3501 (instance->disableOnlineCtrlReset == 0)) { 3502 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 3503 3504 if ((instance->pdev->device == 3505 PCI_DEVICE_ID_LSI_SAS1064R) || 3506 (instance->pdev->device == 3507 PCI_DEVICE_ID_DELL_PERC5) || 3508 (instance->pdev->device == 3509 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 3510 3511 *instance->consumer = 3512 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 3513 } 3514 3515 3516 instance->instancet->disable_intr(instance); 3517 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 3518 instance->issuepend_done = 0; 3519 3520 atomic_set(&instance->fw_outstanding, 0); 3521 megasas_internal_reset_defer_cmds(instance); 3522 3523 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 3524 fw_state, atomic_read(&instance->adprecovery)); 3525 3526 schedule_work(&instance->work_init); 3527 return IRQ_HANDLED; 3528 3529 } else { 3530 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 3531 fw_state, instance->disableOnlineCtrlReset); 3532 } 3533 } 3534 3535 tasklet_schedule(&instance->isr_tasklet); 3536 return IRQ_HANDLED; 3537 } 3538 /** 3539 * megasas_isr - isr entry point 3540 */ 3541 static irqreturn_t megasas_isr(int irq, void *devp) 3542 { 3543 struct megasas_irq_context *irq_context = devp; 3544 struct megasas_instance *instance = irq_context->instance; 3545 unsigned long flags; 3546 irqreturn_t rc; 3547 3548 if (atomic_read(&instance->fw_reset_no_pci_access)) 3549 return IRQ_HANDLED; 3550 3551 spin_lock_irqsave(&instance->hba_lock, flags); 3552 rc = megasas_deplete_reply_queue(instance, DID_OK); 3553 spin_unlock_irqrestore(&instance->hba_lock, flags); 3554 3555 return rc; 3556 } 3557 3558 /** 3559 * megasas_transition_to_ready - Move the FW to READY state 3560 * @instance: Adapter soft state 3561 * 3562 * During the initialization, FW passes can potentially be in any one of 3563 * several possible states. If the FW in operational, waiting-for-handshake 3564 * states, driver must take steps to bring it to ready state. Otherwise, it 3565 * has to wait for the ready state. 3566 */ 3567 int 3568 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 3569 { 3570 int i; 3571 u8 max_wait; 3572 u32 fw_state; 3573 u32 cur_state; 3574 u32 abs_state, curr_abs_state; 3575 3576 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); 3577 fw_state = abs_state & MFI_STATE_MASK; 3578 3579 if (fw_state != MFI_STATE_READY) 3580 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 3581 " state\n"); 3582 3583 while (fw_state != MFI_STATE_READY) { 3584 3585 switch (fw_state) { 3586 3587 case MFI_STATE_FAULT: 3588 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n"); 3589 if (ocr) { 3590 max_wait = MEGASAS_RESET_WAIT_TIME; 3591 cur_state = MFI_STATE_FAULT; 3592 break; 3593 } else 3594 return -ENODEV; 3595 3596 case MFI_STATE_WAIT_HANDSHAKE: 3597 /* 3598 * Set the CLR bit in inbound doorbell 3599 */ 3600 if ((instance->pdev->device == 3601 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3602 (instance->pdev->device == 3603 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3604 (instance->ctrl_context)) 3605 writel( 3606 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3607 &instance->reg_set->doorbell); 3608 else 3609 writel( 3610 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3611 &instance->reg_set->inbound_doorbell); 3612 3613 max_wait = MEGASAS_RESET_WAIT_TIME; 3614 cur_state = MFI_STATE_WAIT_HANDSHAKE; 3615 break; 3616 3617 case MFI_STATE_BOOT_MESSAGE_PENDING: 3618 if ((instance->pdev->device == 3619 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3620 (instance->pdev->device == 3621 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3622 (instance->ctrl_context)) 3623 writel(MFI_INIT_HOTPLUG, 3624 &instance->reg_set->doorbell); 3625 else 3626 writel(MFI_INIT_HOTPLUG, 3627 &instance->reg_set->inbound_doorbell); 3628 3629 max_wait = MEGASAS_RESET_WAIT_TIME; 3630 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 3631 break; 3632 3633 case MFI_STATE_OPERATIONAL: 3634 /* 3635 * Bring it to READY state; assuming max wait 10 secs 3636 */ 3637 instance->instancet->disable_intr(instance); 3638 if ((instance->pdev->device == 3639 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3640 (instance->pdev->device == 3641 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3642 (instance->ctrl_context)) { 3643 writel(MFI_RESET_FLAGS, 3644 &instance->reg_set->doorbell); 3645 3646 if (instance->ctrl_context) { 3647 for (i = 0; i < (10 * 1000); i += 20) { 3648 if (readl( 3649 &instance-> 3650 reg_set-> 3651 doorbell) & 1) 3652 msleep(20); 3653 else 3654 break; 3655 } 3656 } 3657 } else 3658 writel(MFI_RESET_FLAGS, 3659 &instance->reg_set->inbound_doorbell); 3660 3661 max_wait = MEGASAS_RESET_WAIT_TIME; 3662 cur_state = MFI_STATE_OPERATIONAL; 3663 break; 3664 3665 case MFI_STATE_UNDEFINED: 3666 /* 3667 * This state should not last for more than 2 seconds 3668 */ 3669 max_wait = MEGASAS_RESET_WAIT_TIME; 3670 cur_state = MFI_STATE_UNDEFINED; 3671 break; 3672 3673 case MFI_STATE_BB_INIT: 3674 max_wait = MEGASAS_RESET_WAIT_TIME; 3675 cur_state = MFI_STATE_BB_INIT; 3676 break; 3677 3678 case MFI_STATE_FW_INIT: 3679 max_wait = MEGASAS_RESET_WAIT_TIME; 3680 cur_state = MFI_STATE_FW_INIT; 3681 break; 3682 3683 case MFI_STATE_FW_INIT_2: 3684 max_wait = MEGASAS_RESET_WAIT_TIME; 3685 cur_state = MFI_STATE_FW_INIT_2; 3686 break; 3687 3688 case MFI_STATE_DEVICE_SCAN: 3689 max_wait = MEGASAS_RESET_WAIT_TIME; 3690 cur_state = MFI_STATE_DEVICE_SCAN; 3691 break; 3692 3693 case MFI_STATE_FLUSH_CACHE: 3694 max_wait = MEGASAS_RESET_WAIT_TIME; 3695 cur_state = MFI_STATE_FLUSH_CACHE; 3696 break; 3697 3698 default: 3699 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 3700 fw_state); 3701 return -ENODEV; 3702 } 3703 3704 /* 3705 * The cur_state should not last for more than max_wait secs 3706 */ 3707 for (i = 0; i < (max_wait * 1000); i++) { 3708 curr_abs_state = instance->instancet-> 3709 read_fw_status_reg(instance->reg_set); 3710 3711 if (abs_state == curr_abs_state) { 3712 msleep(1); 3713 } else 3714 break; 3715 } 3716 3717 /* 3718 * Return error if fw_state hasn't changed after max_wait 3719 */ 3720 if (curr_abs_state == abs_state) { 3721 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 3722 "in %d secs\n", fw_state, max_wait); 3723 return -ENODEV; 3724 } 3725 3726 abs_state = curr_abs_state; 3727 fw_state = curr_abs_state & MFI_STATE_MASK; 3728 } 3729 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 3730 3731 return 0; 3732 } 3733 3734 /** 3735 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 3736 * @instance: Adapter soft state 3737 */ 3738 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3739 { 3740 int i; 3741 u32 max_cmd = instance->max_mfi_cmds; 3742 struct megasas_cmd *cmd; 3743 3744 if (!instance->frame_dma_pool) 3745 return; 3746 3747 /* 3748 * Return all frames to pool 3749 */ 3750 for (i = 0; i < max_cmd; i++) { 3751 3752 cmd = instance->cmd_list[i]; 3753 3754 if (cmd->frame) 3755 pci_pool_free(instance->frame_dma_pool, cmd->frame, 3756 cmd->frame_phys_addr); 3757 3758 if (cmd->sense) 3759 pci_pool_free(instance->sense_dma_pool, cmd->sense, 3760 cmd->sense_phys_addr); 3761 } 3762 3763 /* 3764 * Now destroy the pool itself 3765 */ 3766 pci_pool_destroy(instance->frame_dma_pool); 3767 pci_pool_destroy(instance->sense_dma_pool); 3768 3769 instance->frame_dma_pool = NULL; 3770 instance->sense_dma_pool = NULL; 3771 } 3772 3773 /** 3774 * megasas_create_frame_pool - Creates DMA pool for cmd frames 3775 * @instance: Adapter soft state 3776 * 3777 * Each command packet has an embedded DMA memory buffer that is used for 3778 * filling MFI frame and the SG list that immediately follows the frame. This 3779 * function creates those DMA memory buffers for each command packet by using 3780 * PCI pool facility. 3781 */ 3782 static int megasas_create_frame_pool(struct megasas_instance *instance) 3783 { 3784 int i; 3785 u32 max_cmd; 3786 u32 sge_sz; 3787 u32 total_sz; 3788 u32 frame_count; 3789 struct megasas_cmd *cmd; 3790 3791 max_cmd = instance->max_mfi_cmds; 3792 3793 /* 3794 * Size of our frame is 64 bytes for MFI frame, followed by max SG 3795 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer 3796 */ 3797 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 3798 sizeof(struct megasas_sge32); 3799 3800 if (instance->flag_ieee) 3801 sge_sz = sizeof(struct megasas_sge_skinny); 3802 3803 /* 3804 * For MFI controllers. 3805 * max_num_sge = 60 3806 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 3807 * Total 960 byte (15 MFI frame of 64 byte) 3808 * 3809 * Fusion adapter require only 3 extra frame. 3810 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 3811 * max_sge_sz = 12 byte (sizeof megasas_sge64) 3812 * Total 192 byte (3 MFI frame of 64 byte) 3813 */ 3814 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); 3815 total_sz = MEGAMFI_FRAME_SIZE * frame_count; 3816 /* 3817 * Use DMA pool facility provided by PCI layer 3818 */ 3819 instance->frame_dma_pool = pci_pool_create("megasas frame pool", 3820 instance->pdev, total_sz, 256, 0); 3821 3822 if (!instance->frame_dma_pool) { 3823 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 3824 return -ENOMEM; 3825 } 3826 3827 instance->sense_dma_pool = pci_pool_create("megasas sense pool", 3828 instance->pdev, 128, 4, 0); 3829 3830 if (!instance->sense_dma_pool) { 3831 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 3832 3833 pci_pool_destroy(instance->frame_dma_pool); 3834 instance->frame_dma_pool = NULL; 3835 3836 return -ENOMEM; 3837 } 3838 3839 /* 3840 * Allocate and attach a frame to each of the commands in cmd_list. 3841 * By making cmd->index as the context instead of the &cmd, we can 3842 * always use 32bit context regardless of the architecture 3843 */ 3844 for (i = 0; i < max_cmd; i++) { 3845 3846 cmd = instance->cmd_list[i]; 3847 3848 cmd->frame = pci_pool_alloc(instance->frame_dma_pool, 3849 GFP_KERNEL, &cmd->frame_phys_addr); 3850 3851 cmd->sense = pci_pool_alloc(instance->sense_dma_pool, 3852 GFP_KERNEL, &cmd->sense_phys_addr); 3853 3854 /* 3855 * megasas_teardown_frame_pool() takes care of freeing 3856 * whatever has been allocated 3857 */ 3858 if (!cmd->frame || !cmd->sense) { 3859 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n"); 3860 megasas_teardown_frame_pool(instance); 3861 return -ENOMEM; 3862 } 3863 3864 memset(cmd->frame, 0, total_sz); 3865 cmd->frame->io.context = cpu_to_le32(cmd->index); 3866 cmd->frame->io.pad_0 = 0; 3867 if (!instance->ctrl_context && reset_devices) 3868 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 3869 } 3870 3871 return 0; 3872 } 3873 3874 /** 3875 * megasas_free_cmds - Free all the cmds in the free cmd pool 3876 * @instance: Adapter soft state 3877 */ 3878 void megasas_free_cmds(struct megasas_instance *instance) 3879 { 3880 int i; 3881 3882 /* First free the MFI frame pool */ 3883 megasas_teardown_frame_pool(instance); 3884 3885 /* Free all the commands in the cmd_list */ 3886 for (i = 0; i < instance->max_mfi_cmds; i++) 3887 3888 kfree(instance->cmd_list[i]); 3889 3890 /* Free the cmd_list buffer itself */ 3891 kfree(instance->cmd_list); 3892 instance->cmd_list = NULL; 3893 3894 INIT_LIST_HEAD(&instance->cmd_pool); 3895 } 3896 3897 /** 3898 * megasas_alloc_cmds - Allocates the command packets 3899 * @instance: Adapter soft state 3900 * 3901 * Each command that is issued to the FW, whether IO commands from the OS or 3902 * internal commands like IOCTLs, are wrapped in local data structure called 3903 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 3904 * the FW. 3905 * 3906 * Each frame has a 32-bit field called context (tag). This context is used 3907 * to get back the megasas_cmd from the frame when a frame gets completed in 3908 * the ISR. Typically the address of the megasas_cmd itself would be used as 3909 * the context. But we wanted to keep the differences between 32 and 64 bit 3910 * systems to the mininum. We always use 32 bit integers for the context. In 3911 * this driver, the 32 bit values are the indices into an array cmd_list. 3912 * This array is used only to look up the megasas_cmd given the context. The 3913 * free commands themselves are maintained in a linked list called cmd_pool. 3914 */ 3915 int megasas_alloc_cmds(struct megasas_instance *instance) 3916 { 3917 int i; 3918 int j; 3919 u32 max_cmd; 3920 struct megasas_cmd *cmd; 3921 struct fusion_context *fusion; 3922 3923 fusion = instance->ctrl_context; 3924 max_cmd = instance->max_mfi_cmds; 3925 3926 /* 3927 * instance->cmd_list is an array of struct megasas_cmd pointers. 3928 * Allocate the dynamic array first and then allocate individual 3929 * commands. 3930 */ 3931 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 3932 3933 if (!instance->cmd_list) { 3934 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 3935 return -ENOMEM; 3936 } 3937 3938 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); 3939 3940 for (i = 0; i < max_cmd; i++) { 3941 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 3942 GFP_KERNEL); 3943 3944 if (!instance->cmd_list[i]) { 3945 3946 for (j = 0; j < i; j++) 3947 kfree(instance->cmd_list[j]); 3948 3949 kfree(instance->cmd_list); 3950 instance->cmd_list = NULL; 3951 3952 return -ENOMEM; 3953 } 3954 } 3955 3956 for (i = 0; i < max_cmd; i++) { 3957 cmd = instance->cmd_list[i]; 3958 memset(cmd, 0, sizeof(struct megasas_cmd)); 3959 cmd->index = i; 3960 cmd->scmd = NULL; 3961 cmd->instance = instance; 3962 3963 list_add_tail(&cmd->list, &instance->cmd_pool); 3964 } 3965 3966 /* 3967 * Create a frame pool and assign one frame to each cmd 3968 */ 3969 if (megasas_create_frame_pool(instance)) { 3970 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 3971 megasas_free_cmds(instance); 3972 } 3973 3974 return 0; 3975 } 3976 3977 /* 3978 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 3979 * @instance: Adapter soft state 3980 * 3981 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 3982 * or FW is not under OCR. 3983 */ 3984 inline int 3985 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 3986 3987 if (!instance->ctrl_context) 3988 return KILL_ADAPTER; 3989 else if (instance->unload || 3990 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 3991 return IGNORE_TIMEOUT; 3992 else 3993 return INITIATE_OCR; 3994 } 3995 3996 static int 3997 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) 3998 { 3999 int ret; 4000 struct megasas_cmd *cmd; 4001 struct megasas_dcmd_frame *dcmd; 4002 4003 cmd = megasas_get_cmd(instance); 4004 4005 if (!cmd) { 4006 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4007 return -ENOMEM; 4008 } 4009 4010 dcmd = &cmd->frame->dcmd; 4011 4012 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4013 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4014 4015 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4016 dcmd->cmd = MFI_CMD_DCMD; 4017 dcmd->cmd_status = 0xFF; 4018 dcmd->sge_count = 1; 4019 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4020 dcmd->timeout = 0; 4021 dcmd->pad_0 = 0; 4022 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4023 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4024 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h); 4025 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4026 4027 if (instance->ctrl_context && !instance->mask_interrupts) 4028 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4029 else 4030 ret = megasas_issue_polled(instance, cmd); 4031 4032 switch (ret) { 4033 case DCMD_SUCCESS: 4034 instance->pd_list[device_id].interface = 4035 instance->pd_info->state.ddf.pdType.intf; 4036 break; 4037 4038 case DCMD_TIMEOUT: 4039 4040 switch (dcmd_timeout_ocr_possible(instance)) { 4041 case INITIATE_OCR: 4042 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4043 megasas_reset_fusion(instance->host, 4044 MFI_IO_TIMEOUT_OCR); 4045 break; 4046 case KILL_ADAPTER: 4047 megaraid_sas_kill_hba(instance); 4048 break; 4049 case IGNORE_TIMEOUT: 4050 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4051 __func__, __LINE__); 4052 break; 4053 } 4054 4055 break; 4056 } 4057 4058 if (ret != DCMD_TIMEOUT) 4059 megasas_return_cmd(instance, cmd); 4060 4061 return ret; 4062 } 4063 /* 4064 * megasas_get_pd_list_info - Returns FW's pd_list structure 4065 * @instance: Adapter soft state 4066 * @pd_list: pd_list structure 4067 * 4068 * Issues an internal command (DCMD) to get the FW's controller PD 4069 * list structure. This information is mainly used to find out SYSTEM 4070 * supported by the FW. 4071 */ 4072 static int 4073 megasas_get_pd_list(struct megasas_instance *instance) 4074 { 4075 int ret = 0, pd_index = 0; 4076 struct megasas_cmd *cmd; 4077 struct megasas_dcmd_frame *dcmd; 4078 struct MR_PD_LIST *ci; 4079 struct MR_PD_ADDRESS *pd_addr; 4080 dma_addr_t ci_h = 0; 4081 4082 if (instance->pd_list_not_supported) { 4083 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4084 "not supported by firmware\n"); 4085 return ret; 4086 } 4087 4088 cmd = megasas_get_cmd(instance); 4089 4090 if (!cmd) { 4091 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4092 return -ENOMEM; 4093 } 4094 4095 dcmd = &cmd->frame->dcmd; 4096 4097 ci = pci_alloc_consistent(instance->pdev, 4098 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); 4099 4100 if (!ci) { 4101 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n"); 4102 megasas_return_cmd(instance, cmd); 4103 return -ENOMEM; 4104 } 4105 4106 memset(ci, 0, sizeof(*ci)); 4107 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4108 4109 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4110 dcmd->mbox.b[1] = 0; 4111 dcmd->cmd = MFI_CMD_DCMD; 4112 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4113 dcmd->sge_count = 1; 4114 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4115 dcmd->timeout = 0; 4116 dcmd->pad_0 = 0; 4117 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4118 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4119 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4120 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4121 4122 if (instance->ctrl_context && !instance->mask_interrupts) 4123 ret = megasas_issue_blocked_cmd(instance, cmd, 4124 MFI_IO_TIMEOUT_SECS); 4125 else 4126 ret = megasas_issue_polled(instance, cmd); 4127 4128 switch (ret) { 4129 case DCMD_FAILED: 4130 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4131 "failed/not supported by firmware\n"); 4132 4133 if (instance->ctrl_context) 4134 megaraid_sas_kill_hba(instance); 4135 else 4136 instance->pd_list_not_supported = 1; 4137 break; 4138 case DCMD_TIMEOUT: 4139 4140 switch (dcmd_timeout_ocr_possible(instance)) { 4141 case INITIATE_OCR: 4142 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4143 /* 4144 * DCMD failed from AEN path. 4145 * AEN path already hold reset_mutex to avoid PCI access 4146 * while OCR is in progress. 4147 */ 4148 mutex_unlock(&instance->reset_mutex); 4149 megasas_reset_fusion(instance->host, 4150 MFI_IO_TIMEOUT_OCR); 4151 mutex_lock(&instance->reset_mutex); 4152 break; 4153 case KILL_ADAPTER: 4154 megaraid_sas_kill_hba(instance); 4155 break; 4156 case IGNORE_TIMEOUT: 4157 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4158 __func__, __LINE__); 4159 break; 4160 } 4161 4162 break; 4163 4164 case DCMD_SUCCESS: 4165 pd_addr = ci->addr; 4166 4167 if ((le32_to_cpu(ci->count) > 4168 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4169 break; 4170 4171 memset(instance->local_pd_list, 0, 4172 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4173 4174 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4175 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4176 le16_to_cpu(pd_addr->deviceId); 4177 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4178 pd_addr->scsiDevType; 4179 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4180 MR_PD_STATE_SYSTEM; 4181 pd_addr++; 4182 } 4183 4184 memcpy(instance->pd_list, instance->local_pd_list, 4185 sizeof(instance->pd_list)); 4186 break; 4187 4188 } 4189 4190 pci_free_consistent(instance->pdev, 4191 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 4192 ci, ci_h); 4193 4194 if (ret != DCMD_TIMEOUT) 4195 megasas_return_cmd(instance, cmd); 4196 4197 return ret; 4198 } 4199 4200 /* 4201 * megasas_get_ld_list_info - Returns FW's ld_list structure 4202 * @instance: Adapter soft state 4203 * @ld_list: ld_list structure 4204 * 4205 * Issues an internal command (DCMD) to get the FW's controller PD 4206 * list structure. This information is mainly used to find out SYSTEM 4207 * supported by the FW. 4208 */ 4209 static int 4210 megasas_get_ld_list(struct megasas_instance *instance) 4211 { 4212 int ret = 0, ld_index = 0, ids = 0; 4213 struct megasas_cmd *cmd; 4214 struct megasas_dcmd_frame *dcmd; 4215 struct MR_LD_LIST *ci; 4216 dma_addr_t ci_h = 0; 4217 u32 ld_count; 4218 4219 cmd = megasas_get_cmd(instance); 4220 4221 if (!cmd) { 4222 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4223 return -ENOMEM; 4224 } 4225 4226 dcmd = &cmd->frame->dcmd; 4227 4228 ci = pci_alloc_consistent(instance->pdev, 4229 sizeof(struct MR_LD_LIST), 4230 &ci_h); 4231 4232 if (!ci) { 4233 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n"); 4234 megasas_return_cmd(instance, cmd); 4235 return -ENOMEM; 4236 } 4237 4238 memset(ci, 0, sizeof(*ci)); 4239 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4240 4241 if (instance->supportmax256vd) 4242 dcmd->mbox.b[0] = 1; 4243 dcmd->cmd = MFI_CMD_DCMD; 4244 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4245 dcmd->sge_count = 1; 4246 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4247 dcmd->timeout = 0; 4248 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4249 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4250 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4251 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4252 dcmd->pad_0 = 0; 4253 4254 if (instance->ctrl_context && !instance->mask_interrupts) 4255 ret = megasas_issue_blocked_cmd(instance, cmd, 4256 MFI_IO_TIMEOUT_SECS); 4257 else 4258 ret = megasas_issue_polled(instance, cmd); 4259 4260 ld_count = le32_to_cpu(ci->ldCount); 4261 4262 switch (ret) { 4263 case DCMD_FAILED: 4264 megaraid_sas_kill_hba(instance); 4265 break; 4266 case DCMD_TIMEOUT: 4267 4268 switch (dcmd_timeout_ocr_possible(instance)) { 4269 case INITIATE_OCR: 4270 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4271 /* 4272 * DCMD failed from AEN path. 4273 * AEN path already hold reset_mutex to avoid PCI access 4274 * while OCR is in progress. 4275 */ 4276 mutex_unlock(&instance->reset_mutex); 4277 megasas_reset_fusion(instance->host, 4278 MFI_IO_TIMEOUT_OCR); 4279 mutex_lock(&instance->reset_mutex); 4280 break; 4281 case KILL_ADAPTER: 4282 megaraid_sas_kill_hba(instance); 4283 break; 4284 case IGNORE_TIMEOUT: 4285 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4286 __func__, __LINE__); 4287 break; 4288 } 4289 4290 break; 4291 4292 case DCMD_SUCCESS: 4293 if (ld_count > instance->fw_supported_vd_count) 4294 break; 4295 4296 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4297 4298 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4299 if (ci->ldList[ld_index].state != 0) { 4300 ids = ci->ldList[ld_index].ref.targetId; 4301 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4302 } 4303 } 4304 4305 break; 4306 } 4307 4308 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); 4309 4310 if (ret != DCMD_TIMEOUT) 4311 megasas_return_cmd(instance, cmd); 4312 4313 return ret; 4314 } 4315 4316 /** 4317 * megasas_ld_list_query - Returns FW's ld_list structure 4318 * @instance: Adapter soft state 4319 * @ld_list: ld_list structure 4320 * 4321 * Issues an internal command (DCMD) to get the FW's controller PD 4322 * list structure. This information is mainly used to find out SYSTEM 4323 * supported by the FW. 4324 */ 4325 static int 4326 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4327 { 4328 int ret = 0, ld_index = 0, ids = 0; 4329 struct megasas_cmd *cmd; 4330 struct megasas_dcmd_frame *dcmd; 4331 struct MR_LD_TARGETID_LIST *ci; 4332 dma_addr_t ci_h = 0; 4333 u32 tgtid_count; 4334 4335 cmd = megasas_get_cmd(instance); 4336 4337 if (!cmd) { 4338 dev_warn(&instance->pdev->dev, 4339 "megasas_ld_list_query: Failed to get cmd\n"); 4340 return -ENOMEM; 4341 } 4342 4343 dcmd = &cmd->frame->dcmd; 4344 4345 ci = pci_alloc_consistent(instance->pdev, 4346 sizeof(struct MR_LD_TARGETID_LIST), &ci_h); 4347 4348 if (!ci) { 4349 dev_warn(&instance->pdev->dev, 4350 "Failed to alloc mem for ld_list_query\n"); 4351 megasas_return_cmd(instance, cmd); 4352 return -ENOMEM; 4353 } 4354 4355 memset(ci, 0, sizeof(*ci)); 4356 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4357 4358 dcmd->mbox.b[0] = query_type; 4359 if (instance->supportmax256vd) 4360 dcmd->mbox.b[2] = 1; 4361 4362 dcmd->cmd = MFI_CMD_DCMD; 4363 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4364 dcmd->sge_count = 1; 4365 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4366 dcmd->timeout = 0; 4367 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4368 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4369 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4370 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4371 dcmd->pad_0 = 0; 4372 4373 if (instance->ctrl_context && !instance->mask_interrupts) 4374 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4375 else 4376 ret = megasas_issue_polled(instance, cmd); 4377 4378 switch (ret) { 4379 case DCMD_FAILED: 4380 dev_info(&instance->pdev->dev, 4381 "DCMD not supported by firmware - %s %d\n", 4382 __func__, __LINE__); 4383 ret = megasas_get_ld_list(instance); 4384 break; 4385 case DCMD_TIMEOUT: 4386 switch (dcmd_timeout_ocr_possible(instance)) { 4387 case INITIATE_OCR: 4388 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4389 /* 4390 * DCMD failed from AEN path. 4391 * AEN path already hold reset_mutex to avoid PCI access 4392 * while OCR is in progress. 4393 */ 4394 mutex_unlock(&instance->reset_mutex); 4395 megasas_reset_fusion(instance->host, 4396 MFI_IO_TIMEOUT_OCR); 4397 mutex_lock(&instance->reset_mutex); 4398 break; 4399 case KILL_ADAPTER: 4400 megaraid_sas_kill_hba(instance); 4401 break; 4402 case IGNORE_TIMEOUT: 4403 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4404 __func__, __LINE__); 4405 break; 4406 } 4407 4408 break; 4409 case DCMD_SUCCESS: 4410 tgtid_count = le32_to_cpu(ci->count); 4411 4412 if ((tgtid_count > (instance->fw_supported_vd_count))) 4413 break; 4414 4415 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4416 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4417 ids = ci->targetId[ld_index]; 4418 instance->ld_ids[ids] = ci->targetId[ld_index]; 4419 } 4420 4421 break; 4422 } 4423 4424 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), 4425 ci, ci_h); 4426 4427 if (ret != DCMD_TIMEOUT) 4428 megasas_return_cmd(instance, cmd); 4429 4430 return ret; 4431 } 4432 4433 /* 4434 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 4435 * instance : Controller's instance 4436 */ 4437 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4438 { 4439 struct fusion_context *fusion; 4440 u32 old_map_sz; 4441 u32 new_map_sz; 4442 4443 fusion = instance->ctrl_context; 4444 /* For MFI based controllers return dummy success */ 4445 if (!fusion) 4446 return; 4447 4448 instance->supportmax256vd = 4449 instance->ctrl_info->adapterOperations3.supportMaxExtLDs; 4450 /* Below is additional check to address future FW enhancement */ 4451 if (instance->ctrl_info->max_lds > 64) 4452 instance->supportmax256vd = 1; 4453 4454 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4455 * MEGASAS_MAX_DEV_PER_CHANNEL; 4456 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 4457 * MEGASAS_MAX_DEV_PER_CHANNEL; 4458 if (instance->supportmax256vd) { 4459 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 4460 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4461 } else { 4462 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 4463 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 4464 } 4465 4466 dev_info(&instance->pdev->dev, 4467 "firmware type\t: %s\n", 4468 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4469 "Legacy(64 VD) firmware"); 4470 4471 old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4472 (sizeof(struct MR_LD_SPAN_MAP) * 4473 (instance->fw_supported_vd_count - 1)); 4474 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4475 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + 4476 (sizeof(struct MR_LD_SPAN_MAP) * 4477 (instance->drv_supported_vd_count - 1)); 4478 4479 fusion->max_map_sz = max(old_map_sz, new_map_sz); 4480 4481 4482 if (instance->supportmax256vd) 4483 fusion->current_map_sz = new_map_sz; 4484 else 4485 fusion->current_map_sz = old_map_sz; 4486 } 4487 4488 /** 4489 * megasas_get_controller_info - Returns FW's controller structure 4490 * @instance: Adapter soft state 4491 * 4492 * Issues an internal command (DCMD) to get the FW's controller structure. 4493 * This information is mainly used to find out the maximum IO transfer per 4494 * command supported by the FW. 4495 */ 4496 int 4497 megasas_get_ctrl_info(struct megasas_instance *instance) 4498 { 4499 int ret = 0; 4500 struct megasas_cmd *cmd; 4501 struct megasas_dcmd_frame *dcmd; 4502 struct megasas_ctrl_info *ci; 4503 struct megasas_ctrl_info *ctrl_info; 4504 dma_addr_t ci_h = 0; 4505 4506 ctrl_info = instance->ctrl_info; 4507 4508 cmd = megasas_get_cmd(instance); 4509 4510 if (!cmd) { 4511 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 4512 return -ENOMEM; 4513 } 4514 4515 dcmd = &cmd->frame->dcmd; 4516 4517 ci = pci_alloc_consistent(instance->pdev, 4518 sizeof(struct megasas_ctrl_info), &ci_h); 4519 4520 if (!ci) { 4521 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n"); 4522 megasas_return_cmd(instance, cmd); 4523 return -ENOMEM; 4524 } 4525 4526 memset(ci, 0, sizeof(*ci)); 4527 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4528 4529 dcmd->cmd = MFI_CMD_DCMD; 4530 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4531 dcmd->sge_count = 1; 4532 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4533 dcmd->timeout = 0; 4534 dcmd->pad_0 = 0; 4535 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4536 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4537 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4538 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4539 dcmd->mbox.b[0] = 1; 4540 4541 if (instance->ctrl_context && !instance->mask_interrupts) 4542 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4543 else 4544 ret = megasas_issue_polled(instance, cmd); 4545 4546 switch (ret) { 4547 case DCMD_SUCCESS: 4548 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); 4549 /* Save required controller information in 4550 * CPU endianness format. 4551 */ 4552 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 4553 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4554 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); 4555 4556 /* Update the latest Ext VD info. 4557 * From Init path, store current firmware details. 4558 * From OCR path, detect any firmware properties changes. 4559 * in case of Firmware upgrade without system reboot. 4560 */ 4561 megasas_update_ext_vd_details(instance); 4562 instance->use_seqnum_jbod_fp = 4563 ctrl_info->adapterOperations3.useSeqNumJbodFP; 4564 4565 /*Check whether controller is iMR or MR */ 4566 instance->is_imr = (ctrl_info->memory_size ? 0 : 1); 4567 dev_info(&instance->pdev->dev, 4568 "controller type\t: %s(%dMB)\n", 4569 instance->is_imr ? "iMR" : "MR", 4570 le16_to_cpu(ctrl_info->memory_size)); 4571 4572 instance->disableOnlineCtrlReset = 4573 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4574 instance->secure_jbod_support = 4575 ctrl_info->adapterOperations3.supportSecurityonJBOD; 4576 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4577 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4578 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4579 instance->secure_jbod_support ? "Yes" : "No"); 4580 break; 4581 4582 case DCMD_TIMEOUT: 4583 switch (dcmd_timeout_ocr_possible(instance)) { 4584 case INITIATE_OCR: 4585 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4586 megasas_reset_fusion(instance->host, 4587 MFI_IO_TIMEOUT_OCR); 4588 break; 4589 case KILL_ADAPTER: 4590 megaraid_sas_kill_hba(instance); 4591 break; 4592 case IGNORE_TIMEOUT: 4593 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4594 __func__, __LINE__); 4595 break; 4596 } 4597 case DCMD_FAILED: 4598 megaraid_sas_kill_hba(instance); 4599 break; 4600 4601 } 4602 4603 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4604 ci, ci_h); 4605 4606 megasas_return_cmd(instance, cmd); 4607 4608 4609 return ret; 4610 } 4611 4612 /* 4613 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 4614 * to firmware 4615 * 4616 * @instance: Adapter soft state 4617 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 4618 MR_CRASH_BUF_TURN_OFF = 0 4619 MR_CRASH_BUF_TURN_ON = 1 4620 * @return 0 on success non-zero on failure. 4621 * Issues an internal command (DCMD) to set parameters for crash dump feature. 4622 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 4623 * that driver supports crash dump feature. This DCMD will be sent only if 4624 * crash dump feature is supported by the FW. 4625 * 4626 */ 4627 int megasas_set_crash_dump_params(struct megasas_instance *instance, 4628 u8 crash_buf_state) 4629 { 4630 int ret = 0; 4631 struct megasas_cmd *cmd; 4632 struct megasas_dcmd_frame *dcmd; 4633 4634 cmd = megasas_get_cmd(instance); 4635 4636 if (!cmd) { 4637 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 4638 return -ENOMEM; 4639 } 4640 4641 4642 dcmd = &cmd->frame->dcmd; 4643 4644 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4645 dcmd->mbox.b[0] = crash_buf_state; 4646 dcmd->cmd = MFI_CMD_DCMD; 4647 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4648 dcmd->sge_count = 1; 4649 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4650 dcmd->timeout = 0; 4651 dcmd->pad_0 = 0; 4652 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4653 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 4654 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); 4655 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4656 4657 if (instance->ctrl_context && !instance->mask_interrupts) 4658 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4659 else 4660 ret = megasas_issue_polled(instance, cmd); 4661 4662 if (ret == DCMD_TIMEOUT) { 4663 switch (dcmd_timeout_ocr_possible(instance)) { 4664 case INITIATE_OCR: 4665 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4666 megasas_reset_fusion(instance->host, 4667 MFI_IO_TIMEOUT_OCR); 4668 break; 4669 case KILL_ADAPTER: 4670 megaraid_sas_kill_hba(instance); 4671 break; 4672 case IGNORE_TIMEOUT: 4673 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4674 __func__, __LINE__); 4675 break; 4676 } 4677 } else 4678 megasas_return_cmd(instance, cmd); 4679 4680 return ret; 4681 } 4682 4683 /** 4684 * megasas_issue_init_mfi - Initializes the FW 4685 * @instance: Adapter soft state 4686 * 4687 * Issues the INIT MFI cmd 4688 */ 4689 static int 4690 megasas_issue_init_mfi(struct megasas_instance *instance) 4691 { 4692 __le32 context; 4693 struct megasas_cmd *cmd; 4694 struct megasas_init_frame *init_frame; 4695 struct megasas_init_queue_info *initq_info; 4696 dma_addr_t init_frame_h; 4697 dma_addr_t initq_info_h; 4698 4699 /* 4700 * Prepare a init frame. Note the init frame points to queue info 4701 * structure. Each frame has SGL allocated after first 64 bytes. For 4702 * this frame - since we don't need any SGL - we use SGL's space as 4703 * queue info structure 4704 * 4705 * We will not get a NULL command below. We just created the pool. 4706 */ 4707 cmd = megasas_get_cmd(instance); 4708 4709 init_frame = (struct megasas_init_frame *)cmd->frame; 4710 initq_info = (struct megasas_init_queue_info *) 4711 ((unsigned long)init_frame + 64); 4712 4713 init_frame_h = cmd->frame_phys_addr; 4714 initq_info_h = init_frame_h + 64; 4715 4716 context = init_frame->context; 4717 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 4718 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 4719 init_frame->context = context; 4720 4721 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 4722 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 4723 4724 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 4725 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 4726 4727 init_frame->cmd = MFI_CMD_INIT; 4728 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 4729 init_frame->queue_info_new_phys_addr_lo = 4730 cpu_to_le32(lower_32_bits(initq_info_h)); 4731 init_frame->queue_info_new_phys_addr_hi = 4732 cpu_to_le32(upper_32_bits(initq_info_h)); 4733 4734 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 4735 4736 /* 4737 * disable the intr before firing the init frame to FW 4738 */ 4739 instance->instancet->disable_intr(instance); 4740 4741 /* 4742 * Issue the init frame in polled mode 4743 */ 4744 4745 if (megasas_issue_polled(instance, cmd)) { 4746 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 4747 megasas_return_cmd(instance, cmd); 4748 goto fail_fw_init; 4749 } 4750 4751 megasas_return_cmd(instance, cmd); 4752 4753 return 0; 4754 4755 fail_fw_init: 4756 return -EINVAL; 4757 } 4758 4759 static u32 4760 megasas_init_adapter_mfi(struct megasas_instance *instance) 4761 { 4762 struct megasas_register_set __iomem *reg_set; 4763 u32 context_sz; 4764 u32 reply_q_sz; 4765 4766 reg_set = instance->reg_set; 4767 4768 /* 4769 * Get various operational parameters from status register 4770 */ 4771 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 4772 /* 4773 * Reduce the max supported cmds by 1. This is to ensure that the 4774 * reply_q_sz (1 more than the max cmd that driver may send) 4775 * does not exceed max cmds that the FW can support 4776 */ 4777 instance->max_fw_cmds = instance->max_fw_cmds-1; 4778 instance->max_mfi_cmds = instance->max_fw_cmds; 4779 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 4780 0x10; 4781 /* 4782 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 4783 * are reserved for IOCTL + driver's internal DCMDs. 4784 */ 4785 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4786 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 4787 instance->max_scsi_cmds = (instance->max_fw_cmds - 4788 MEGASAS_SKINNY_INT_CMDS); 4789 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 4790 } else { 4791 instance->max_scsi_cmds = (instance->max_fw_cmds - 4792 MEGASAS_INT_CMDS); 4793 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 4794 } 4795 4796 instance->cur_can_queue = instance->max_scsi_cmds; 4797 /* 4798 * Create a pool of commands 4799 */ 4800 if (megasas_alloc_cmds(instance)) 4801 goto fail_alloc_cmds; 4802 4803 /* 4804 * Allocate memory for reply queue. Length of reply queue should 4805 * be _one_ more than the maximum commands handled by the firmware. 4806 * 4807 * Note: When FW completes commands, it places corresponding contex 4808 * values in this circular reply queue. This circular queue is a fairly 4809 * typical producer-consumer queue. FW is the producer (of completed 4810 * commands) and the driver is the consumer. 4811 */ 4812 context_sz = sizeof(u32); 4813 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 4814 4815 instance->reply_queue = pci_alloc_consistent(instance->pdev, 4816 reply_q_sz, 4817 &instance->reply_queue_h); 4818 4819 if (!instance->reply_queue) { 4820 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 4821 goto fail_reply_queue; 4822 } 4823 4824 if (megasas_issue_init_mfi(instance)) 4825 goto fail_fw_init; 4826 4827 if (megasas_get_ctrl_info(instance)) { 4828 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 4829 "Fail from %s %d\n", instance->unique_id, 4830 __func__, __LINE__); 4831 goto fail_fw_init; 4832 } 4833 4834 instance->fw_support_ieee = 0; 4835 instance->fw_support_ieee = 4836 (instance->instancet->read_fw_status_reg(reg_set) & 4837 0x04000000); 4838 4839 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 4840 instance->fw_support_ieee); 4841 4842 if (instance->fw_support_ieee) 4843 instance->flag_ieee = 1; 4844 4845 return 0; 4846 4847 fail_fw_init: 4848 4849 pci_free_consistent(instance->pdev, reply_q_sz, 4850 instance->reply_queue, instance->reply_queue_h); 4851 fail_reply_queue: 4852 megasas_free_cmds(instance); 4853 4854 fail_alloc_cmds: 4855 return 1; 4856 } 4857 4858 /* 4859 * megasas_setup_irqs_msix - register legacy interrupts. 4860 * @instance: Adapter soft state 4861 * 4862 * Do not enable interrupt, only setup ISRs. 4863 * 4864 * Return 0 on success. 4865 */ 4866 static int 4867 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 4868 { 4869 struct pci_dev *pdev; 4870 4871 pdev = instance->pdev; 4872 instance->irq_context[0].instance = instance; 4873 instance->irq_context[0].MSIxIndex = 0; 4874 if (request_irq(pdev->irq, instance->instancet->service_isr, 4875 IRQF_SHARED, "megasas", &instance->irq_context[0])) { 4876 dev_err(&instance->pdev->dev, 4877 "Failed to register IRQ from %s %d\n", 4878 __func__, __LINE__); 4879 return -1; 4880 } 4881 return 0; 4882 } 4883 4884 /** 4885 * megasas_setup_irqs_msix - register MSI-x interrupts. 4886 * @instance: Adapter soft state 4887 * @is_probe: Driver probe check 4888 * 4889 * Do not enable interrupt, only setup ISRs. 4890 * 4891 * Return 0 on success. 4892 */ 4893 static int 4894 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 4895 { 4896 int i, j, cpu; 4897 struct pci_dev *pdev; 4898 4899 pdev = instance->pdev; 4900 4901 /* Try MSI-x */ 4902 cpu = cpumask_first(cpu_online_mask); 4903 for (i = 0; i < instance->msix_vectors; i++) { 4904 instance->irq_context[i].instance = instance; 4905 instance->irq_context[i].MSIxIndex = i; 4906 if (request_irq(instance->msixentry[i].vector, 4907 instance->instancet->service_isr, 0, "megasas", 4908 &instance->irq_context[i])) { 4909 dev_err(&instance->pdev->dev, 4910 "Failed to register IRQ for vector %d.\n", i); 4911 for (j = 0; j < i; j++) { 4912 if (smp_affinity_enable) 4913 irq_set_affinity_hint( 4914 instance->msixentry[j].vector, NULL); 4915 free_irq(instance->msixentry[j].vector, 4916 &instance->irq_context[j]); 4917 } 4918 /* Retry irq register for IO_APIC*/ 4919 instance->msix_vectors = 0; 4920 if (is_probe) 4921 return megasas_setup_irqs_ioapic(instance); 4922 else 4923 return -1; 4924 } 4925 if (smp_affinity_enable) { 4926 if (irq_set_affinity_hint(instance->msixentry[i].vector, 4927 get_cpu_mask(cpu))) 4928 dev_err(&instance->pdev->dev, 4929 "Failed to set affinity hint" 4930 " for cpu %d\n", cpu); 4931 cpu = cpumask_next(cpu, cpu_online_mask); 4932 } 4933 } 4934 return 0; 4935 } 4936 4937 /* 4938 * megasas_destroy_irqs- unregister interrupts. 4939 * @instance: Adapter soft state 4940 * return: void 4941 */ 4942 static void 4943 megasas_destroy_irqs(struct megasas_instance *instance) { 4944 4945 int i; 4946 4947 if (instance->msix_vectors) 4948 for (i = 0; i < instance->msix_vectors; i++) { 4949 if (smp_affinity_enable) 4950 irq_set_affinity_hint( 4951 instance->msixentry[i].vector, NULL); 4952 free_irq(instance->msixentry[i].vector, 4953 &instance->irq_context[i]); 4954 } 4955 else 4956 free_irq(instance->pdev->irq, &instance->irq_context[0]); 4957 } 4958 4959 /** 4960 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 4961 * @instance: Adapter soft state 4962 * @is_probe: Driver probe check 4963 * 4964 * Return 0 on success. 4965 */ 4966 void 4967 megasas_setup_jbod_map(struct megasas_instance *instance) 4968 { 4969 int i; 4970 struct fusion_context *fusion = instance->ctrl_context; 4971 u32 pd_seq_map_sz; 4972 4973 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 4974 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 4975 4976 if (reset_devices || !fusion || 4977 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 4978 dev_info(&instance->pdev->dev, 4979 "Jbod map is not supported %s %d\n", 4980 __func__, __LINE__); 4981 instance->use_seqnum_jbod_fp = false; 4982 return; 4983 } 4984 4985 if (fusion->pd_seq_sync[0]) 4986 goto skip_alloc; 4987 4988 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 4989 fusion->pd_seq_sync[i] = dma_alloc_coherent 4990 (&instance->pdev->dev, pd_seq_map_sz, 4991 &fusion->pd_seq_phys[i], GFP_KERNEL); 4992 if (!fusion->pd_seq_sync[i]) { 4993 dev_err(&instance->pdev->dev, 4994 "Failed to allocate memory from %s %d\n", 4995 __func__, __LINE__); 4996 if (i == 1) { 4997 dma_free_coherent(&instance->pdev->dev, 4998 pd_seq_map_sz, fusion->pd_seq_sync[0], 4999 fusion->pd_seq_phys[0]); 5000 fusion->pd_seq_sync[0] = NULL; 5001 } 5002 instance->use_seqnum_jbod_fp = false; 5003 return; 5004 } 5005 } 5006 5007 skip_alloc: 5008 if (!megasas_sync_pd_seq_num(instance, false) && 5009 !megasas_sync_pd_seq_num(instance, true)) 5010 instance->use_seqnum_jbod_fp = true; 5011 else 5012 instance->use_seqnum_jbod_fp = false; 5013 } 5014 5015 /** 5016 * megasas_init_fw - Initializes the FW 5017 * @instance: Adapter soft state 5018 * 5019 * This is the main function for initializing firmware 5020 */ 5021 5022 static int megasas_init_fw(struct megasas_instance *instance) 5023 { 5024 u32 max_sectors_1; 5025 u32 max_sectors_2; 5026 u32 tmp_sectors, msix_enable, scratch_pad_2; 5027 resource_size_t base_addr; 5028 struct megasas_register_set __iomem *reg_set; 5029 struct megasas_ctrl_info *ctrl_info = NULL; 5030 unsigned long bar_list; 5031 int i, loop, fw_msix_count = 0; 5032 struct IOV_111 *iovPtr; 5033 struct fusion_context *fusion; 5034 5035 fusion = instance->ctrl_context; 5036 5037 /* Find first memory bar */ 5038 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5039 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); 5040 if (pci_request_selected_regions(instance->pdev, instance->bar, 5041 "megasas: LSI")) { 5042 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5043 return -EBUSY; 5044 } 5045 5046 base_addr = pci_resource_start(instance->pdev, instance->bar); 5047 instance->reg_set = ioremap_nocache(base_addr, 8192); 5048 5049 if (!instance->reg_set) { 5050 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 5051 goto fail_ioremap; 5052 } 5053 5054 reg_set = instance->reg_set; 5055 5056 switch (instance->pdev->device) { 5057 case PCI_DEVICE_ID_LSI_FUSION: 5058 case PCI_DEVICE_ID_LSI_PLASMA: 5059 case PCI_DEVICE_ID_LSI_INVADER: 5060 case PCI_DEVICE_ID_LSI_FURY: 5061 case PCI_DEVICE_ID_LSI_INTRUDER: 5062 case PCI_DEVICE_ID_LSI_INTRUDER_24: 5063 case PCI_DEVICE_ID_LSI_CUTLASS_52: 5064 case PCI_DEVICE_ID_LSI_CUTLASS_53: 5065 instance->instancet = &megasas_instance_template_fusion; 5066 break; 5067 case PCI_DEVICE_ID_LSI_SAS1078R: 5068 case PCI_DEVICE_ID_LSI_SAS1078DE: 5069 instance->instancet = &megasas_instance_template_ppc; 5070 break; 5071 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5072 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5073 instance->instancet = &megasas_instance_template_gen2; 5074 break; 5075 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5076 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5077 instance->instancet = &megasas_instance_template_skinny; 5078 break; 5079 case PCI_DEVICE_ID_LSI_SAS1064R: 5080 case PCI_DEVICE_ID_DELL_PERC5: 5081 default: 5082 instance->instancet = &megasas_instance_template_xscale; 5083 break; 5084 } 5085 5086 if (megasas_transition_to_ready(instance, 0)) { 5087 atomic_set(&instance->fw_reset_no_pci_access, 1); 5088 instance->instancet->adp_reset 5089 (instance, instance->reg_set); 5090 atomic_set(&instance->fw_reset_no_pci_access, 0); 5091 dev_info(&instance->pdev->dev, 5092 "FW restarted successfully from %s!\n", 5093 __func__); 5094 5095 /*waitting for about 30 second before retry*/ 5096 ssleep(30); 5097 5098 if (megasas_transition_to_ready(instance, 0)) 5099 goto fail_ready_state; 5100 } 5101 5102 /* 5103 * MSI-X host index 0 is common for all adapter. 5104 * It is used for all MPT based Adapters. 5105 */ 5106 instance->reply_post_host_index_addr[0] = 5107 (u32 __iomem *)((u8 __iomem *)instance->reg_set + 5108 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5109 5110 /* Check if MSI-X is supported while in ready state */ 5111 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5112 0x4000000) >> 0x1a; 5113 if (msix_enable && !msix_disable) { 5114 scratch_pad_2 = readl 5115 (&instance->reg_set->outbound_scratch_pad_2); 5116 /* Check max MSI-X vectors */ 5117 if (fusion) { 5118 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ 5119 instance->msix_vectors = (scratch_pad_2 5120 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5121 fw_msix_count = instance->msix_vectors; 5122 } else { /* Invader series supports more than 8 MSI-x vectors*/ 5123 instance->msix_vectors = ((scratch_pad_2 5124 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5125 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5126 if (rdpq_enable) 5127 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 5128 1 : 0; 5129 fw_msix_count = instance->msix_vectors; 5130 /* Save 1-15 reply post index address to local memory 5131 * Index 0 is already saved from reg offset 5132 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 5133 */ 5134 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 5135 instance->reply_post_host_index_addr[loop] = 5136 (u32 __iomem *) 5137 ((u8 __iomem *)instance->reg_set + 5138 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 5139 + (loop * 0x10)); 5140 } 5141 } 5142 if (msix_vectors) 5143 instance->msix_vectors = min(msix_vectors, 5144 instance->msix_vectors); 5145 } else /* MFI adapters */ 5146 instance->msix_vectors = 1; 5147 /* Don't bother allocating more MSI-X vectors than cpus */ 5148 instance->msix_vectors = min(instance->msix_vectors, 5149 (unsigned int)num_online_cpus()); 5150 for (i = 0; i < instance->msix_vectors; i++) 5151 instance->msixentry[i].entry = i; 5152 i = pci_enable_msix_range(instance->pdev, instance->msixentry, 5153 1, instance->msix_vectors); 5154 if (i > 0) 5155 instance->msix_vectors = i; 5156 else 5157 instance->msix_vectors = 0; 5158 } 5159 5160 dev_info(&instance->pdev->dev, 5161 "firmware supports msix\t: (%d)", fw_msix_count); 5162 dev_info(&instance->pdev->dev, 5163 "current msix/online cpus\t: (%d/%d)\n", 5164 instance->msix_vectors, (unsigned int)num_online_cpus()); 5165 dev_info(&instance->pdev->dev, 5166 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 5167 5168 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5169 (unsigned long)instance); 5170 5171 if (instance->msix_vectors ? 5172 megasas_setup_irqs_msix(instance, 1) : 5173 megasas_setup_irqs_ioapic(instance)) 5174 goto fail_setup_irqs; 5175 5176 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), 5177 GFP_KERNEL); 5178 if (instance->ctrl_info == NULL) 5179 goto fail_init_adapter; 5180 5181 /* 5182 * Below are default value for legacy Firmware. 5183 * non-fusion based controllers 5184 */ 5185 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5186 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5187 /* Get operational params, sge flags, send init cmd to controller */ 5188 if (instance->instancet->init_adapter(instance)) 5189 goto fail_init_adapter; 5190 5191 5192 instance->instancet->enable_intr(instance); 5193 5194 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 5195 5196 megasas_setup_jbod_map(instance); 5197 5198 /** for passthrough 5199 * the following function will get the PD LIST. 5200 */ 5201 memset(instance->pd_list, 0, 5202 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5203 if (megasas_get_pd_list(instance) < 0) { 5204 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5205 goto fail_get_pd_list; 5206 } 5207 5208 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5209 if (megasas_ld_list_query(instance, 5210 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5211 megasas_get_ld_list(instance); 5212 5213 /* 5214 * Compute the max allowed sectors per IO: The controller info has two 5215 * limits on max sectors. Driver should use the minimum of these two. 5216 * 5217 * 1 << stripe_sz_ops.min = max sectors per strip 5218 * 5219 * Note that older firmwares ( < FW ver 30) didn't report information 5220 * to calculate max_sectors_1. So the number ended up as zero always. 5221 */ 5222 tmp_sectors = 0; 5223 ctrl_info = instance->ctrl_info; 5224 5225 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5226 le16_to_cpu(ctrl_info->max_strips_per_io); 5227 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 5228 5229 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 5230 5231 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 5232 instance->passive = ctrl_info->cluster.passive; 5233 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 5234 instance->UnevenSpanSupport = 5235 ctrl_info->adapterOperations2.supportUnevenSpans; 5236 if (instance->UnevenSpanSupport) { 5237 struct fusion_context *fusion = instance->ctrl_context; 5238 if (MR_ValidateMapInfo(instance)) 5239 fusion->fast_path_io = 1; 5240 else 5241 fusion->fast_path_io = 0; 5242 5243 } 5244 if (ctrl_info->host_interface.SRIOV) { 5245 instance->requestorId = ctrl_info->iov.requestorId; 5246 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 5247 if (!ctrl_info->adapterOperations2.activePassive) 5248 instance->PlasmaFW111 = 1; 5249 5250 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 5251 instance->PlasmaFW111 ? "1.11" : "new"); 5252 5253 if (instance->PlasmaFW111) { 5254 iovPtr = (struct IOV_111 *) 5255 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 5256 instance->requestorId = iovPtr->requestorId; 5257 } 5258 } 5259 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 5260 instance->requestorId); 5261 } 5262 5263 instance->crash_dump_fw_support = 5264 ctrl_info->adapterOperations3.supportCrashDump; 5265 instance->crash_dump_drv_support = 5266 (instance->crash_dump_fw_support && 5267 instance->crash_dump_buf); 5268 if (instance->crash_dump_drv_support) 5269 megasas_set_crash_dump_params(instance, 5270 MR_CRASH_BUF_TURN_OFF); 5271 5272 else { 5273 if (instance->crash_dump_buf) 5274 pci_free_consistent(instance->pdev, 5275 CRASH_DMA_BUF_SIZE, 5276 instance->crash_dump_buf, 5277 instance->crash_dump_h); 5278 instance->crash_dump_buf = NULL; 5279 } 5280 5281 5282 dev_info(&instance->pdev->dev, 5283 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5284 le16_to_cpu(ctrl_info->pci.vendor_id), 5285 le16_to_cpu(ctrl_info->pci.device_id), 5286 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 5287 le16_to_cpu(ctrl_info->pci.sub_device_id)); 5288 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 5289 instance->UnevenSpanSupport ? "yes" : "no"); 5290 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 5291 instance->crash_dump_drv_support ? "yes" : "no"); 5292 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5293 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5294 5295 5296 instance->max_sectors_per_req = instance->max_num_sge * 5297 SGE_BUFFER_SIZE / 512; 5298 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5299 instance->max_sectors_per_req = tmp_sectors; 5300 5301 /* Check for valid throttlequeuedepth module parameter */ 5302 if (throttlequeuedepth && 5303 throttlequeuedepth <= instance->max_scsi_cmds) 5304 instance->throttlequeuedepth = throttlequeuedepth; 5305 else 5306 instance->throttlequeuedepth = 5307 MEGASAS_THROTTLE_QUEUE_DEPTH; 5308 5309 if (resetwaittime > MEGASAS_RESET_WAIT_TIME) 5310 resetwaittime = MEGASAS_RESET_WAIT_TIME; 5311 5312 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 5313 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 5314 5315 /* Launch SR-IOV heartbeat timer */ 5316 if (instance->requestorId) { 5317 if (!megasas_sriov_start_heartbeat(instance, 1)) 5318 megasas_start_timer(instance, 5319 &instance->sriov_heartbeat_timer, 5320 megasas_sriov_heartbeat_handler, 5321 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 5322 else 5323 instance->skip_heartbeat_timer_del = 1; 5324 } 5325 5326 return 0; 5327 5328 fail_get_pd_list: 5329 instance->instancet->disable_intr(instance); 5330 fail_init_adapter: 5331 megasas_destroy_irqs(instance); 5332 fail_setup_irqs: 5333 if (instance->msix_vectors) 5334 pci_disable_msix(instance->pdev); 5335 instance->msix_vectors = 0; 5336 fail_ready_state: 5337 kfree(instance->ctrl_info); 5338 instance->ctrl_info = NULL; 5339 iounmap(instance->reg_set); 5340 5341 fail_ioremap: 5342 pci_release_selected_regions(instance->pdev, instance->bar); 5343 5344 return -EINVAL; 5345 } 5346 5347 /** 5348 * megasas_release_mfi - Reverses the FW initialization 5349 * @instance: Adapter soft state 5350 */ 5351 static void megasas_release_mfi(struct megasas_instance *instance) 5352 { 5353 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5354 5355 if (instance->reply_queue) 5356 pci_free_consistent(instance->pdev, reply_q_sz, 5357 instance->reply_queue, instance->reply_queue_h); 5358 5359 megasas_free_cmds(instance); 5360 5361 iounmap(instance->reg_set); 5362 5363 pci_release_selected_regions(instance->pdev, instance->bar); 5364 } 5365 5366 /** 5367 * megasas_get_seq_num - Gets latest event sequence numbers 5368 * @instance: Adapter soft state 5369 * @eli: FW event log sequence numbers information 5370 * 5371 * FW maintains a log of all events in a non-volatile area. Upper layers would 5372 * usually find out the latest sequence number of the events, the seq number at 5373 * the boot etc. They would "read" all the events below the latest seq number 5374 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 5375 * number), they would subsribe to AEN (asynchronous event notification) and 5376 * wait for the events to happen. 5377 */ 5378 static int 5379 megasas_get_seq_num(struct megasas_instance *instance, 5380 struct megasas_evt_log_info *eli) 5381 { 5382 struct megasas_cmd *cmd; 5383 struct megasas_dcmd_frame *dcmd; 5384 struct megasas_evt_log_info *el_info; 5385 dma_addr_t el_info_h = 0; 5386 5387 cmd = megasas_get_cmd(instance); 5388 5389 if (!cmd) { 5390 return -ENOMEM; 5391 } 5392 5393 dcmd = &cmd->frame->dcmd; 5394 el_info = pci_alloc_consistent(instance->pdev, 5395 sizeof(struct megasas_evt_log_info), 5396 &el_info_h); 5397 5398 if (!el_info) { 5399 megasas_return_cmd(instance, cmd); 5400 return -ENOMEM; 5401 } 5402 5403 memset(el_info, 0, sizeof(*el_info)); 5404 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5405 5406 dcmd->cmd = MFI_CMD_DCMD; 5407 dcmd->cmd_status = 0x0; 5408 dcmd->sge_count = 1; 5409 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5410 dcmd->timeout = 0; 5411 dcmd->pad_0 = 0; 5412 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5413 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5414 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); 5415 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5416 5417 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == 5418 DCMD_SUCCESS) { 5419 /* 5420 * Copy the data back into callers buffer 5421 */ 5422 eli->newest_seq_num = el_info->newest_seq_num; 5423 eli->oldest_seq_num = el_info->oldest_seq_num; 5424 eli->clear_seq_num = el_info->clear_seq_num; 5425 eli->shutdown_seq_num = el_info->shutdown_seq_num; 5426 eli->boot_seq_num = el_info->boot_seq_num; 5427 } else 5428 dev_err(&instance->pdev->dev, "DCMD failed " 5429 "from %s\n", __func__); 5430 5431 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 5432 el_info, el_info_h); 5433 5434 megasas_return_cmd(instance, cmd); 5435 5436 return 0; 5437 } 5438 5439 /** 5440 * megasas_register_aen - Registers for asynchronous event notification 5441 * @instance: Adapter soft state 5442 * @seq_num: The starting sequence number 5443 * @class_locale: Class of the event 5444 * 5445 * This function subscribes for AEN for events beyond the @seq_num. It requests 5446 * to be notified if and only if the event is of type @class_locale 5447 */ 5448 static int 5449 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 5450 u32 class_locale_word) 5451 { 5452 int ret_val; 5453 struct megasas_cmd *cmd; 5454 struct megasas_dcmd_frame *dcmd; 5455 union megasas_evt_class_locale curr_aen; 5456 union megasas_evt_class_locale prev_aen; 5457 5458 /* 5459 * If there an AEN pending already (aen_cmd), check if the 5460 * class_locale of that pending AEN is inclusive of the new 5461 * AEN request we currently have. If it is, then we don't have 5462 * to do anything. In other words, whichever events the current 5463 * AEN request is subscribing to, have already been subscribed 5464 * to. 5465 * 5466 * If the old_cmd is _not_ inclusive, then we have to abort 5467 * that command, form a class_locale that is superset of both 5468 * old and current and re-issue to the FW 5469 */ 5470 5471 curr_aen.word = class_locale_word; 5472 5473 if (instance->aen_cmd) { 5474 5475 prev_aen.word = 5476 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 5477 5478 /* 5479 * A class whose enum value is smaller is inclusive of all 5480 * higher values. If a PROGRESS (= -1) was previously 5481 * registered, then a new registration requests for higher 5482 * classes need not be sent to FW. They are automatically 5483 * included. 5484 * 5485 * Locale numbers don't have such hierarchy. They are bitmap 5486 * values 5487 */ 5488 if ((prev_aen.members.class <= curr_aen.members.class) && 5489 !((prev_aen.members.locale & curr_aen.members.locale) ^ 5490 curr_aen.members.locale)) { 5491 /* 5492 * Previously issued event registration includes 5493 * current request. Nothing to do. 5494 */ 5495 return 0; 5496 } else { 5497 curr_aen.members.locale |= prev_aen.members.locale; 5498 5499 if (prev_aen.members.class < curr_aen.members.class) 5500 curr_aen.members.class = prev_aen.members.class; 5501 5502 instance->aen_cmd->abort_aen = 1; 5503 ret_val = megasas_issue_blocked_abort_cmd(instance, 5504 instance-> 5505 aen_cmd, 30); 5506 5507 if (ret_val) { 5508 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 5509 "previous AEN command\n"); 5510 return ret_val; 5511 } 5512 } 5513 } 5514 5515 cmd = megasas_get_cmd(instance); 5516 5517 if (!cmd) 5518 return -ENOMEM; 5519 5520 dcmd = &cmd->frame->dcmd; 5521 5522 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 5523 5524 /* 5525 * Prepare DCMD for aen registration 5526 */ 5527 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5528 5529 dcmd->cmd = MFI_CMD_DCMD; 5530 dcmd->cmd_status = 0x0; 5531 dcmd->sge_count = 1; 5532 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5533 dcmd->timeout = 0; 5534 dcmd->pad_0 = 0; 5535 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5536 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 5537 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 5538 instance->last_seq_num = seq_num; 5539 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 5540 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); 5541 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5542 5543 if (instance->aen_cmd != NULL) { 5544 megasas_return_cmd(instance, cmd); 5545 return 0; 5546 } 5547 5548 /* 5549 * Store reference to the cmd used to register for AEN. When an 5550 * application wants us to register for AEN, we have to abort this 5551 * cmd and re-register with a new EVENT LOCALE supplied by that app 5552 */ 5553 instance->aen_cmd = cmd; 5554 5555 /* 5556 * Issue the aen registration frame 5557 */ 5558 instance->instancet->issue_dcmd(instance, cmd); 5559 5560 return 0; 5561 } 5562 5563 /** 5564 * megasas_start_aen - Subscribes to AEN during driver load time 5565 * @instance: Adapter soft state 5566 */ 5567 static int megasas_start_aen(struct megasas_instance *instance) 5568 { 5569 struct megasas_evt_log_info eli; 5570 union megasas_evt_class_locale class_locale; 5571 5572 /* 5573 * Get the latest sequence number from FW 5574 */ 5575 memset(&eli, 0, sizeof(eli)); 5576 5577 if (megasas_get_seq_num(instance, &eli)) 5578 return -1; 5579 5580 /* 5581 * Register AEN with FW for latest sequence number plus 1 5582 */ 5583 class_locale.members.reserved = 0; 5584 class_locale.members.locale = MR_EVT_LOCALE_ALL; 5585 class_locale.members.class = MR_EVT_CLASS_DEBUG; 5586 5587 return megasas_register_aen(instance, 5588 le32_to_cpu(eli.newest_seq_num) + 1, 5589 class_locale.word); 5590 } 5591 5592 /** 5593 * megasas_io_attach - Attaches this driver to SCSI mid-layer 5594 * @instance: Adapter soft state 5595 */ 5596 static int megasas_io_attach(struct megasas_instance *instance) 5597 { 5598 struct Scsi_Host *host = instance->host; 5599 5600 /* 5601 * Export parameters required by SCSI mid-layer 5602 */ 5603 host->irq = instance->pdev->irq; 5604 host->unique_id = instance->unique_id; 5605 host->can_queue = instance->max_scsi_cmds; 5606 host->this_id = instance->init_id; 5607 host->sg_tablesize = instance->max_num_sge; 5608 5609 if (instance->fw_support_ieee) 5610 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 5611 5612 /* 5613 * Check if the module parameter value for max_sectors can be used 5614 */ 5615 if (max_sectors && max_sectors < instance->max_sectors_per_req) 5616 instance->max_sectors_per_req = max_sectors; 5617 else { 5618 if (max_sectors) { 5619 if (((instance->pdev->device == 5620 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 5621 (instance->pdev->device == 5622 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 5623 (max_sectors <= MEGASAS_MAX_SECTORS)) { 5624 instance->max_sectors_per_req = max_sectors; 5625 } else { 5626 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 5627 "and <= %d (or < 1MB for GEN2 controller)\n", 5628 instance->max_sectors_per_req); 5629 } 5630 } 5631 } 5632 5633 host->max_sectors = instance->max_sectors_per_req; 5634 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 5635 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 5636 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 5637 host->max_lun = MEGASAS_MAX_LUN; 5638 host->max_cmd_len = 16; 5639 5640 /* 5641 * Notify the mid-layer about the new controller 5642 */ 5643 if (scsi_add_host(host, &instance->pdev->dev)) { 5644 dev_err(&instance->pdev->dev, 5645 "Failed to add host from %s %d\n", 5646 __func__, __LINE__); 5647 return -ENODEV; 5648 } 5649 5650 return 0; 5651 } 5652 5653 static int 5654 megasas_set_dma_mask(struct pci_dev *pdev) 5655 { 5656 /* 5657 * All our controllers are capable of performing 64-bit DMA 5658 */ 5659 if (IS_DMA64) { 5660 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 5661 5662 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5663 goto fail_set_dma_mask; 5664 } 5665 } else { 5666 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5667 goto fail_set_dma_mask; 5668 } 5669 /* 5670 * Ensure that all data structures are allocated in 32-bit 5671 * memory. 5672 */ 5673 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 5674 /* Try 32bit DMA mask and 32 bit Consistent dma mask */ 5675 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 5676 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 5677 dev_info(&pdev->dev, "set 32bit DMA mask" 5678 "and 32 bit consistent mask\n"); 5679 else 5680 goto fail_set_dma_mask; 5681 } 5682 5683 return 0; 5684 5685 fail_set_dma_mask: 5686 return 1; 5687 } 5688 5689 /** 5690 * megasas_probe_one - PCI hotplug entry point 5691 * @pdev: PCI device structure 5692 * @id: PCI ids of supported hotplugged adapter 5693 */ 5694 static int megasas_probe_one(struct pci_dev *pdev, 5695 const struct pci_device_id *id) 5696 { 5697 int rval, pos; 5698 struct Scsi_Host *host; 5699 struct megasas_instance *instance; 5700 u16 control = 0; 5701 struct fusion_context *fusion = NULL; 5702 5703 /* Reset MSI-X in the kdump kernel */ 5704 if (reset_devices) { 5705 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 5706 if (pos) { 5707 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 5708 &control); 5709 if (control & PCI_MSIX_FLAGS_ENABLE) { 5710 dev_info(&pdev->dev, "resetting MSI-X\n"); 5711 pci_write_config_word(pdev, 5712 pos + PCI_MSIX_FLAGS, 5713 control & 5714 ~PCI_MSIX_FLAGS_ENABLE); 5715 } 5716 } 5717 } 5718 5719 /* 5720 * PCI prepping: enable device set bus mastering and dma mask 5721 */ 5722 rval = pci_enable_device_mem(pdev); 5723 5724 if (rval) { 5725 return rval; 5726 } 5727 5728 pci_set_master(pdev); 5729 5730 if (megasas_set_dma_mask(pdev)) 5731 goto fail_set_dma_mask; 5732 5733 host = scsi_host_alloc(&megasas_template, 5734 sizeof(struct megasas_instance)); 5735 5736 if (!host) { 5737 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 5738 goto fail_alloc_instance; 5739 } 5740 5741 instance = (struct megasas_instance *)host->hostdata; 5742 memset(instance, 0, sizeof(*instance)); 5743 atomic_set(&instance->fw_reset_no_pci_access, 0); 5744 instance->pdev = pdev; 5745 5746 switch (instance->pdev->device) { 5747 case PCI_DEVICE_ID_LSI_FUSION: 5748 case PCI_DEVICE_ID_LSI_PLASMA: 5749 case PCI_DEVICE_ID_LSI_INVADER: 5750 case PCI_DEVICE_ID_LSI_FURY: 5751 case PCI_DEVICE_ID_LSI_INTRUDER: 5752 case PCI_DEVICE_ID_LSI_INTRUDER_24: 5753 case PCI_DEVICE_ID_LSI_CUTLASS_52: 5754 case PCI_DEVICE_ID_LSI_CUTLASS_53: 5755 { 5756 instance->ctrl_context_pages = 5757 get_order(sizeof(struct fusion_context)); 5758 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL, 5759 instance->ctrl_context_pages); 5760 if (!instance->ctrl_context) { 5761 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " 5762 "memory for Fusion context info\n"); 5763 goto fail_alloc_dma_buf; 5764 } 5765 fusion = instance->ctrl_context; 5766 memset(fusion, 0, 5767 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); 5768 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 5769 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) 5770 fusion->adapter_type = THUNDERBOLT_SERIES; 5771 else 5772 fusion->adapter_type = INVADER_SERIES; 5773 } 5774 break; 5775 default: /* For all other supported controllers */ 5776 5777 instance->producer = 5778 pci_alloc_consistent(pdev, sizeof(u32), 5779 &instance->producer_h); 5780 instance->consumer = 5781 pci_alloc_consistent(pdev, sizeof(u32), 5782 &instance->consumer_h); 5783 5784 if (!instance->producer || !instance->consumer) { 5785 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate" 5786 "memory for producer, consumer\n"); 5787 goto fail_alloc_dma_buf; 5788 } 5789 5790 *instance->producer = 0; 5791 *instance->consumer = 0; 5792 break; 5793 } 5794 5795 /* Crash dump feature related initialisation*/ 5796 instance->drv_buf_index = 0; 5797 instance->drv_buf_alloc = 0; 5798 instance->crash_dump_fw_support = 0; 5799 instance->crash_dump_app_support = 0; 5800 instance->fw_crash_state = UNAVAILABLE; 5801 spin_lock_init(&instance->crashdump_lock); 5802 instance->crash_dump_buf = NULL; 5803 5804 megasas_poll_wait_aen = 0; 5805 instance->flag_ieee = 0; 5806 instance->ev = NULL; 5807 instance->issuepend_done = 1; 5808 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 5809 instance->is_imr = 0; 5810 5811 instance->evt_detail = pci_alloc_consistent(pdev, 5812 sizeof(struct 5813 megasas_evt_detail), 5814 &instance->evt_detail_h); 5815 5816 if (!instance->evt_detail) { 5817 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for " 5818 "event detail structure\n"); 5819 goto fail_alloc_dma_buf; 5820 } 5821 5822 if (!reset_devices) { 5823 instance->system_info_buf = pci_zalloc_consistent(pdev, 5824 sizeof(struct MR_DRV_SYSTEM_INFO), 5825 &instance->system_info_h); 5826 if (!instance->system_info_buf) 5827 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); 5828 5829 instance->pd_info = pci_alloc_consistent(pdev, 5830 sizeof(struct MR_PD_INFO), &instance->pd_info_h); 5831 5832 if (!instance->pd_info) 5833 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 5834 5835 instance->crash_dump_buf = pci_alloc_consistent(pdev, 5836 CRASH_DMA_BUF_SIZE, 5837 &instance->crash_dump_h); 5838 if (!instance->crash_dump_buf) 5839 dev_err(&pdev->dev, "Can't allocate Firmware " 5840 "crash dump DMA buffer\n"); 5841 } 5842 5843 /* 5844 * Initialize locks and queues 5845 */ 5846 INIT_LIST_HEAD(&instance->cmd_pool); 5847 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 5848 5849 atomic_set(&instance->fw_outstanding,0); 5850 5851 init_waitqueue_head(&instance->int_cmd_wait_q); 5852 init_waitqueue_head(&instance->abort_cmd_wait_q); 5853 5854 spin_lock_init(&instance->mfi_pool_lock); 5855 spin_lock_init(&instance->hba_lock); 5856 spin_lock_init(&instance->completion_lock); 5857 5858 mutex_init(&instance->reset_mutex); 5859 mutex_init(&instance->hba_mutex); 5860 5861 /* 5862 * Initialize PCI related and misc parameters 5863 */ 5864 instance->host = host; 5865 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 5866 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 5867 instance->ctrl_info = NULL; 5868 5869 5870 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5871 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 5872 instance->flag_ieee = 1; 5873 5874 megasas_dbg_lvl = 0; 5875 instance->flag = 0; 5876 instance->unload = 1; 5877 instance->last_time = 0; 5878 instance->disableOnlineCtrlReset = 1; 5879 instance->UnevenSpanSupport = 0; 5880 5881 if (instance->ctrl_context) { 5882 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 5883 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); 5884 } else 5885 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 5886 5887 /* 5888 * Initialize MFI Firmware 5889 */ 5890 if (megasas_init_fw(instance)) 5891 goto fail_init_mfi; 5892 5893 if (instance->requestorId) { 5894 if (instance->PlasmaFW111) { 5895 instance->vf_affiliation_111 = 5896 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), 5897 &instance->vf_affiliation_111_h); 5898 if (!instance->vf_affiliation_111) 5899 dev_warn(&pdev->dev, "Can't allocate " 5900 "memory for VF affiliation buffer\n"); 5901 } else { 5902 instance->vf_affiliation = 5903 pci_alloc_consistent(pdev, 5904 (MAX_LOGICAL_DRIVES + 1) * 5905 sizeof(struct MR_LD_VF_AFFILIATION), 5906 &instance->vf_affiliation_h); 5907 if (!instance->vf_affiliation) 5908 dev_warn(&pdev->dev, "Can't allocate " 5909 "memory for VF affiliation buffer\n"); 5910 } 5911 } 5912 5913 /* 5914 * Store instance in PCI softstate 5915 */ 5916 pci_set_drvdata(pdev, instance); 5917 5918 /* 5919 * Add this controller to megasas_mgmt_info structure so that it 5920 * can be exported to management applications 5921 */ 5922 megasas_mgmt_info.count++; 5923 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 5924 megasas_mgmt_info.max_index++; 5925 5926 /* 5927 * Register with SCSI mid-layer 5928 */ 5929 if (megasas_io_attach(instance)) 5930 goto fail_io_attach; 5931 5932 instance->unload = 0; 5933 /* 5934 * Trigger SCSI to scan our drives 5935 */ 5936 scsi_scan_host(host); 5937 5938 /* 5939 * Initiate AEN (Asynchronous Event Notification) 5940 */ 5941 if (megasas_start_aen(instance)) { 5942 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 5943 goto fail_start_aen; 5944 } 5945 5946 /* Get current SR-IOV LD/VF affiliation */ 5947 if (instance->requestorId) 5948 megasas_get_ld_vf_affiliation(instance, 1); 5949 5950 return 0; 5951 5952 fail_start_aen: 5953 fail_io_attach: 5954 megasas_mgmt_info.count--; 5955 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 5956 megasas_mgmt_info.max_index--; 5957 5958 instance->instancet->disable_intr(instance); 5959 megasas_destroy_irqs(instance); 5960 5961 if (instance->ctrl_context) 5962 megasas_release_fusion(instance); 5963 else 5964 megasas_release_mfi(instance); 5965 if (instance->msix_vectors) 5966 pci_disable_msix(instance->pdev); 5967 fail_init_mfi: 5968 fail_alloc_dma_buf: 5969 if (instance->evt_detail) 5970 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5971 instance->evt_detail, 5972 instance->evt_detail_h); 5973 5974 if (instance->pd_info) 5975 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 5976 instance->pd_info, 5977 instance->pd_info_h); 5978 if (instance->producer) 5979 pci_free_consistent(pdev, sizeof(u32), instance->producer, 5980 instance->producer_h); 5981 if (instance->consumer) 5982 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 5983 instance->consumer_h); 5984 scsi_host_put(host); 5985 5986 fail_alloc_instance: 5987 fail_set_dma_mask: 5988 pci_disable_device(pdev); 5989 5990 return -ENODEV; 5991 } 5992 5993 /** 5994 * megasas_flush_cache - Requests FW to flush all its caches 5995 * @instance: Adapter soft state 5996 */ 5997 static void megasas_flush_cache(struct megasas_instance *instance) 5998 { 5999 struct megasas_cmd *cmd; 6000 struct megasas_dcmd_frame *dcmd; 6001 6002 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6003 return; 6004 6005 cmd = megasas_get_cmd(instance); 6006 6007 if (!cmd) 6008 return; 6009 6010 dcmd = &cmd->frame->dcmd; 6011 6012 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6013 6014 dcmd->cmd = MFI_CMD_DCMD; 6015 dcmd->cmd_status = 0x0; 6016 dcmd->sge_count = 0; 6017 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6018 dcmd->timeout = 0; 6019 dcmd->pad_0 = 0; 6020 dcmd->data_xfer_len = 0; 6021 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 6022 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 6023 6024 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6025 != DCMD_SUCCESS) { 6026 dev_err(&instance->pdev->dev, 6027 "return from %s %d\n", __func__, __LINE__); 6028 return; 6029 } 6030 6031 megasas_return_cmd(instance, cmd); 6032 } 6033 6034 /** 6035 * megasas_shutdown_controller - Instructs FW to shutdown the controller 6036 * @instance: Adapter soft state 6037 * @opcode: Shutdown/Hibernate 6038 */ 6039 static void megasas_shutdown_controller(struct megasas_instance *instance, 6040 u32 opcode) 6041 { 6042 struct megasas_cmd *cmd; 6043 struct megasas_dcmd_frame *dcmd; 6044 6045 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 6046 return; 6047 6048 cmd = megasas_get_cmd(instance); 6049 6050 if (!cmd) 6051 return; 6052 6053 if (instance->aen_cmd) 6054 megasas_issue_blocked_abort_cmd(instance, 6055 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 6056 if (instance->map_update_cmd) 6057 megasas_issue_blocked_abort_cmd(instance, 6058 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 6059 if (instance->jbod_seq_cmd) 6060 megasas_issue_blocked_abort_cmd(instance, 6061 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 6062 6063 dcmd = &cmd->frame->dcmd; 6064 6065 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6066 6067 dcmd->cmd = MFI_CMD_DCMD; 6068 dcmd->cmd_status = 0x0; 6069 dcmd->sge_count = 0; 6070 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 6071 dcmd->timeout = 0; 6072 dcmd->pad_0 = 0; 6073 dcmd->data_xfer_len = 0; 6074 dcmd->opcode = cpu_to_le32(opcode); 6075 6076 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 6077 != DCMD_SUCCESS) { 6078 dev_err(&instance->pdev->dev, 6079 "return from %s %d\n", __func__, __LINE__); 6080 return; 6081 } 6082 6083 megasas_return_cmd(instance, cmd); 6084 } 6085 6086 #ifdef CONFIG_PM 6087 /** 6088 * megasas_suspend - driver suspend entry point 6089 * @pdev: PCI device structure 6090 * @state: PCI power state to suspend routine 6091 */ 6092 static int 6093 megasas_suspend(struct pci_dev *pdev, pm_message_t state) 6094 { 6095 struct Scsi_Host *host; 6096 struct megasas_instance *instance; 6097 6098 instance = pci_get_drvdata(pdev); 6099 host = instance->host; 6100 instance->unload = 1; 6101 6102 /* Shutdown SR-IOV heartbeat timer */ 6103 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6104 del_timer_sync(&instance->sriov_heartbeat_timer); 6105 6106 megasas_flush_cache(instance); 6107 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 6108 6109 /* cancel the delayed work if this work still in queue */ 6110 if (instance->ev != NULL) { 6111 struct megasas_aen_event *ev = instance->ev; 6112 cancel_delayed_work_sync(&ev->hotplug_work); 6113 instance->ev = NULL; 6114 } 6115 6116 tasklet_kill(&instance->isr_tasklet); 6117 6118 pci_set_drvdata(instance->pdev, instance); 6119 instance->instancet->disable_intr(instance); 6120 6121 megasas_destroy_irqs(instance); 6122 6123 if (instance->msix_vectors) 6124 pci_disable_msix(instance->pdev); 6125 6126 pci_save_state(pdev); 6127 pci_disable_device(pdev); 6128 6129 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 6130 6131 return 0; 6132 } 6133 6134 /** 6135 * megasas_resume- driver resume entry point 6136 * @pdev: PCI device structure 6137 */ 6138 static int 6139 megasas_resume(struct pci_dev *pdev) 6140 { 6141 int rval; 6142 struct Scsi_Host *host; 6143 struct megasas_instance *instance; 6144 6145 instance = pci_get_drvdata(pdev); 6146 host = instance->host; 6147 pci_set_power_state(pdev, PCI_D0); 6148 pci_enable_wake(pdev, PCI_D0, 0); 6149 pci_restore_state(pdev); 6150 6151 /* 6152 * PCI prepping: enable device set bus mastering and dma mask 6153 */ 6154 rval = pci_enable_device_mem(pdev); 6155 6156 if (rval) { 6157 dev_err(&pdev->dev, "Enable device failed\n"); 6158 return rval; 6159 } 6160 6161 pci_set_master(pdev); 6162 6163 if (megasas_set_dma_mask(pdev)) 6164 goto fail_set_dma_mask; 6165 6166 /* 6167 * Initialize MFI Firmware 6168 */ 6169 6170 atomic_set(&instance->fw_outstanding, 0); 6171 6172 /* 6173 * We expect the FW state to be READY 6174 */ 6175 if (megasas_transition_to_ready(instance, 0)) 6176 goto fail_ready_state; 6177 6178 /* Now re-enable MSI-X */ 6179 if (instance->msix_vectors && 6180 pci_enable_msix_exact(instance->pdev, instance->msixentry, 6181 instance->msix_vectors)) 6182 goto fail_reenable_msix; 6183 6184 if (instance->ctrl_context) { 6185 megasas_reset_reply_desc(instance); 6186 if (megasas_ioc_init_fusion(instance)) { 6187 megasas_free_cmds(instance); 6188 megasas_free_cmds_fusion(instance); 6189 goto fail_init_mfi; 6190 } 6191 if (!megasas_get_map_info(instance)) 6192 megasas_sync_map_info(instance); 6193 } else { 6194 *instance->producer = 0; 6195 *instance->consumer = 0; 6196 if (megasas_issue_init_mfi(instance)) 6197 goto fail_init_mfi; 6198 } 6199 6200 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6201 (unsigned long)instance); 6202 6203 if (instance->msix_vectors ? 6204 megasas_setup_irqs_msix(instance, 0) : 6205 megasas_setup_irqs_ioapic(instance)) 6206 goto fail_init_mfi; 6207 6208 /* Re-launch SR-IOV heartbeat timer */ 6209 if (instance->requestorId) { 6210 if (!megasas_sriov_start_heartbeat(instance, 0)) 6211 megasas_start_timer(instance, 6212 &instance->sriov_heartbeat_timer, 6213 megasas_sriov_heartbeat_handler, 6214 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 6215 else { 6216 instance->skip_heartbeat_timer_del = 1; 6217 goto fail_init_mfi; 6218 } 6219 } 6220 6221 instance->instancet->enable_intr(instance); 6222 megasas_setup_jbod_map(instance); 6223 instance->unload = 0; 6224 6225 /* 6226 * Initiate AEN (Asynchronous Event Notification) 6227 */ 6228 if (megasas_start_aen(instance)) 6229 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 6230 6231 return 0; 6232 6233 fail_init_mfi: 6234 if (instance->evt_detail) 6235 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6236 instance->evt_detail, 6237 instance->evt_detail_h); 6238 6239 if (instance->pd_info) 6240 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6241 instance->pd_info, 6242 instance->pd_info_h); 6243 if (instance->producer) 6244 pci_free_consistent(pdev, sizeof(u32), instance->producer, 6245 instance->producer_h); 6246 if (instance->consumer) 6247 pci_free_consistent(pdev, sizeof(u32), instance->consumer, 6248 instance->consumer_h); 6249 scsi_host_put(host); 6250 6251 fail_set_dma_mask: 6252 fail_ready_state: 6253 fail_reenable_msix: 6254 6255 pci_disable_device(pdev); 6256 6257 return -ENODEV; 6258 } 6259 #else 6260 #define megasas_suspend NULL 6261 #define megasas_resume NULL 6262 #endif 6263 6264 /** 6265 * megasas_detach_one - PCI hot"un"plug entry point 6266 * @pdev: PCI device structure 6267 */ 6268 static void megasas_detach_one(struct pci_dev *pdev) 6269 { 6270 int i; 6271 struct Scsi_Host *host; 6272 struct megasas_instance *instance; 6273 struct fusion_context *fusion; 6274 u32 pd_seq_map_sz; 6275 6276 instance = pci_get_drvdata(pdev); 6277 instance->unload = 1; 6278 host = instance->host; 6279 fusion = instance->ctrl_context; 6280 6281 /* Shutdown SR-IOV heartbeat timer */ 6282 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6283 del_timer_sync(&instance->sriov_heartbeat_timer); 6284 6285 if (instance->fw_crash_state != UNAVAILABLE) 6286 megasas_free_host_crash_buffer(instance); 6287 scsi_remove_host(instance->host); 6288 megasas_flush_cache(instance); 6289 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6290 6291 /* cancel the delayed work if this work still in queue*/ 6292 if (instance->ev != NULL) { 6293 struct megasas_aen_event *ev = instance->ev; 6294 cancel_delayed_work_sync(&ev->hotplug_work); 6295 instance->ev = NULL; 6296 } 6297 6298 /* cancel all wait events */ 6299 wake_up_all(&instance->int_cmd_wait_q); 6300 6301 tasklet_kill(&instance->isr_tasklet); 6302 6303 /* 6304 * Take the instance off the instance array. Note that we will not 6305 * decrement the max_index. We let this array be sparse array 6306 */ 6307 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6308 if (megasas_mgmt_info.instance[i] == instance) { 6309 megasas_mgmt_info.count--; 6310 megasas_mgmt_info.instance[i] = NULL; 6311 6312 break; 6313 } 6314 } 6315 6316 instance->instancet->disable_intr(instance); 6317 6318 megasas_destroy_irqs(instance); 6319 6320 if (instance->msix_vectors) 6321 pci_disable_msix(instance->pdev); 6322 6323 if (instance->ctrl_context) { 6324 megasas_release_fusion(instance); 6325 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 6326 (sizeof(struct MR_PD_CFG_SEQ) * 6327 (MAX_PHYSICAL_DEVICES - 1)); 6328 for (i = 0; i < 2 ; i++) { 6329 if (fusion->ld_map[i]) 6330 dma_free_coherent(&instance->pdev->dev, 6331 fusion->max_map_sz, 6332 fusion->ld_map[i], 6333 fusion->ld_map_phys[i]); 6334 if (fusion->ld_drv_map[i]) 6335 free_pages((ulong)fusion->ld_drv_map[i], 6336 fusion->drv_map_pages); 6337 if (fusion->pd_seq_sync[i]) 6338 dma_free_coherent(&instance->pdev->dev, 6339 pd_seq_map_sz, 6340 fusion->pd_seq_sync[i], 6341 fusion->pd_seq_phys[i]); 6342 } 6343 free_pages((ulong)instance->ctrl_context, 6344 instance->ctrl_context_pages); 6345 } else { 6346 megasas_release_mfi(instance); 6347 pci_free_consistent(pdev, sizeof(u32), 6348 instance->producer, 6349 instance->producer_h); 6350 pci_free_consistent(pdev, sizeof(u32), 6351 instance->consumer, 6352 instance->consumer_h); 6353 } 6354 6355 kfree(instance->ctrl_info); 6356 6357 if (instance->evt_detail) 6358 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6359 instance->evt_detail, instance->evt_detail_h); 6360 6361 if (instance->pd_info) 6362 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6363 instance->pd_info, 6364 instance->pd_info_h); 6365 if (instance->vf_affiliation) 6366 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6367 sizeof(struct MR_LD_VF_AFFILIATION), 6368 instance->vf_affiliation, 6369 instance->vf_affiliation_h); 6370 6371 if (instance->vf_affiliation_111) 6372 pci_free_consistent(pdev, 6373 sizeof(struct MR_LD_VF_AFFILIATION_111), 6374 instance->vf_affiliation_111, 6375 instance->vf_affiliation_111_h); 6376 6377 if (instance->hb_host_mem) 6378 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), 6379 instance->hb_host_mem, 6380 instance->hb_host_mem_h); 6381 6382 if (instance->crash_dump_buf) 6383 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, 6384 instance->crash_dump_buf, instance->crash_dump_h); 6385 6386 if (instance->system_info_buf) 6387 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), 6388 instance->system_info_buf, instance->system_info_h); 6389 6390 scsi_host_put(host); 6391 6392 pci_disable_device(pdev); 6393 } 6394 6395 /** 6396 * megasas_shutdown - Shutdown entry point 6397 * @device: Generic device structure 6398 */ 6399 static void megasas_shutdown(struct pci_dev *pdev) 6400 { 6401 struct megasas_instance *instance = pci_get_drvdata(pdev); 6402 6403 instance->unload = 1; 6404 megasas_flush_cache(instance); 6405 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 6406 instance->instancet->disable_intr(instance); 6407 megasas_destroy_irqs(instance); 6408 6409 if (instance->msix_vectors) 6410 pci_disable_msix(instance->pdev); 6411 } 6412 6413 /** 6414 * megasas_mgmt_open - char node "open" entry point 6415 */ 6416 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 6417 { 6418 /* 6419 * Allow only those users with admin rights 6420 */ 6421 if (!capable(CAP_SYS_ADMIN)) 6422 return -EACCES; 6423 6424 return 0; 6425 } 6426 6427 /** 6428 * megasas_mgmt_fasync - Async notifier registration from applications 6429 * 6430 * This function adds the calling process to a driver global queue. When an 6431 * event occurs, SIGIO will be sent to all processes in this queue. 6432 */ 6433 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 6434 { 6435 int rc; 6436 6437 mutex_lock(&megasas_async_queue_mutex); 6438 6439 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 6440 6441 mutex_unlock(&megasas_async_queue_mutex); 6442 6443 if (rc >= 0) { 6444 /* For sanity check when we get ioctl */ 6445 filep->private_data = filep; 6446 return 0; 6447 } 6448 6449 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 6450 6451 return rc; 6452 } 6453 6454 /** 6455 * megasas_mgmt_poll - char node "poll" entry point 6456 * */ 6457 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) 6458 { 6459 unsigned int mask; 6460 unsigned long flags; 6461 6462 poll_wait(file, &megasas_poll_wait, wait); 6463 spin_lock_irqsave(&poll_aen_lock, flags); 6464 if (megasas_poll_wait_aen) 6465 mask = (POLLIN | POLLRDNORM); 6466 else 6467 mask = 0; 6468 megasas_poll_wait_aen = 0; 6469 spin_unlock_irqrestore(&poll_aen_lock, flags); 6470 return mask; 6471 } 6472 6473 /* 6474 * megasas_set_crash_dump_params_ioctl: 6475 * Send CRASH_DUMP_MODE DCMD to all controllers 6476 * @cmd: MFI command frame 6477 */ 6478 6479 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 6480 { 6481 struct megasas_instance *local_instance; 6482 int i, error = 0; 6483 int crash_support; 6484 6485 crash_support = cmd->frame->dcmd.mbox.w[0]; 6486 6487 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 6488 local_instance = megasas_mgmt_info.instance[i]; 6489 if (local_instance && local_instance->crash_dump_drv_support) { 6490 if ((atomic_read(&local_instance->adprecovery) == 6491 MEGASAS_HBA_OPERATIONAL) && 6492 !megasas_set_crash_dump_params(local_instance, 6493 crash_support)) { 6494 local_instance->crash_dump_app_support = 6495 crash_support; 6496 dev_info(&local_instance->pdev->dev, 6497 "Application firmware crash " 6498 "dump mode set success\n"); 6499 error = 0; 6500 } else { 6501 dev_info(&local_instance->pdev->dev, 6502 "Application firmware crash " 6503 "dump mode set failed\n"); 6504 error = -1; 6505 } 6506 } 6507 } 6508 return error; 6509 } 6510 6511 /** 6512 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 6513 * @instance: Adapter soft state 6514 * @argp: User's ioctl packet 6515 */ 6516 static int 6517 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 6518 struct megasas_iocpacket __user * user_ioc, 6519 struct megasas_iocpacket *ioc) 6520 { 6521 struct megasas_sge32 *kern_sge32; 6522 struct megasas_cmd *cmd; 6523 void *kbuff_arr[MAX_IOCTL_SGE]; 6524 dma_addr_t buf_handle = 0; 6525 int error = 0, i; 6526 void *sense = NULL; 6527 dma_addr_t sense_handle; 6528 unsigned long *sense_ptr; 6529 6530 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 6531 6532 if (ioc->sge_count > MAX_IOCTL_SGE) { 6533 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 6534 ioc->sge_count, MAX_IOCTL_SGE); 6535 return -EINVAL; 6536 } 6537 6538 cmd = megasas_get_cmd(instance); 6539 if (!cmd) { 6540 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 6541 return -ENOMEM; 6542 } 6543 6544 /* 6545 * User's IOCTL packet has 2 frames (maximum). Copy those two 6546 * frames into our cmd's frames. cmd->frame's context will get 6547 * overwritten when we copy from user's frames. So set that value 6548 * alone separately 6549 */ 6550 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 6551 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 6552 cmd->frame->hdr.pad_0 = 0; 6553 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | 6554 MFI_FRAME_SGL64 | 6555 MFI_FRAME_SENSE64)); 6556 6557 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 6558 error = megasas_set_crash_dump_params_ioctl(cmd); 6559 megasas_return_cmd(instance, cmd); 6560 return error; 6561 } 6562 6563 /* 6564 * The management interface between applications and the fw uses 6565 * MFI frames. E.g, RAID configuration changes, LD property changes 6566 * etc are accomplishes through different kinds of MFI frames. The 6567 * driver needs to care only about substituting user buffers with 6568 * kernel buffers in SGLs. The location of SGL is embedded in the 6569 * struct iocpacket itself. 6570 */ 6571 kern_sge32 = (struct megasas_sge32 *) 6572 ((unsigned long)cmd->frame + ioc->sgl_off); 6573 6574 /* 6575 * For each user buffer, create a mirror buffer and copy in 6576 */ 6577 for (i = 0; i < ioc->sge_count; i++) { 6578 if (!ioc->sgl[i].iov_len) 6579 continue; 6580 6581 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 6582 ioc->sgl[i].iov_len, 6583 &buf_handle, GFP_KERNEL); 6584 if (!kbuff_arr[i]) { 6585 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 6586 "kernel SGL buffer for IOCTL\n"); 6587 error = -ENOMEM; 6588 goto out; 6589 } 6590 6591 /* 6592 * We don't change the dma_coherent_mask, so 6593 * pci_alloc_consistent only returns 32bit addresses 6594 */ 6595 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 6596 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 6597 6598 /* 6599 * We created a kernel buffer corresponding to the 6600 * user buffer. Now copy in from the user buffer 6601 */ 6602 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 6603 (u32) (ioc->sgl[i].iov_len))) { 6604 error = -EFAULT; 6605 goto out; 6606 } 6607 } 6608 6609 if (ioc->sense_len) { 6610 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 6611 &sense_handle, GFP_KERNEL); 6612 if (!sense) { 6613 error = -ENOMEM; 6614 goto out; 6615 } 6616 6617 sense_ptr = 6618 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 6619 *sense_ptr = cpu_to_le32(sense_handle); 6620 } 6621 6622 /* 6623 * Set the sync_cmd flag so that the ISR knows not to complete this 6624 * cmd to the SCSI mid-layer 6625 */ 6626 cmd->sync_cmd = 1; 6627 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 6628 cmd->sync_cmd = 0; 6629 dev_err(&instance->pdev->dev, 6630 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", 6631 __func__, __LINE__, cmd->frame->dcmd.opcode, 6632 cmd->cmd_status_drv); 6633 return -EBUSY; 6634 } 6635 6636 cmd->sync_cmd = 0; 6637 6638 if (instance->unload == 1) { 6639 dev_info(&instance->pdev->dev, "Driver unload is in progress " 6640 "don't submit data to application\n"); 6641 goto out; 6642 } 6643 /* 6644 * copy out the kernel buffers to user buffers 6645 */ 6646 for (i = 0; i < ioc->sge_count; i++) { 6647 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 6648 ioc->sgl[i].iov_len)) { 6649 error = -EFAULT; 6650 goto out; 6651 } 6652 } 6653 6654 /* 6655 * copy out the sense 6656 */ 6657 if (ioc->sense_len) { 6658 /* 6659 * sense_ptr points to the location that has the user 6660 * sense buffer address 6661 */ 6662 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 6663 ioc->sense_off); 6664 6665 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), 6666 sense, ioc->sense_len)) { 6667 dev_err(&instance->pdev->dev, "Failed to copy out to user " 6668 "sense data\n"); 6669 error = -EFAULT; 6670 goto out; 6671 } 6672 } 6673 6674 /* 6675 * copy the status codes returned by the fw 6676 */ 6677 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 6678 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 6679 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 6680 error = -EFAULT; 6681 } 6682 6683 out: 6684 if (sense) { 6685 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 6686 sense, sense_handle); 6687 } 6688 6689 for (i = 0; i < ioc->sge_count; i++) { 6690 if (kbuff_arr[i]) { 6691 dma_free_coherent(&instance->pdev->dev, 6692 le32_to_cpu(kern_sge32[i].length), 6693 kbuff_arr[i], 6694 le32_to_cpu(kern_sge32[i].phys_addr)); 6695 kbuff_arr[i] = NULL; 6696 } 6697 } 6698 6699 megasas_return_cmd(instance, cmd); 6700 return error; 6701 } 6702 6703 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 6704 { 6705 struct megasas_iocpacket __user *user_ioc = 6706 (struct megasas_iocpacket __user *)arg; 6707 struct megasas_iocpacket *ioc; 6708 struct megasas_instance *instance; 6709 int error; 6710 int i; 6711 unsigned long flags; 6712 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 6713 6714 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 6715 if (!ioc) 6716 return -ENOMEM; 6717 6718 if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) { 6719 error = -EFAULT; 6720 goto out_kfree_ioc; 6721 } 6722 6723 instance = megasas_lookup_instance(ioc->host_no); 6724 if (!instance) { 6725 error = -ENODEV; 6726 goto out_kfree_ioc; 6727 } 6728 6729 /* Adjust ioctl wait time for VF mode */ 6730 if (instance->requestorId) 6731 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 6732 6733 /* Block ioctls in VF mode */ 6734 if (instance->requestorId && !allow_vf_ioctls) { 6735 error = -ENODEV; 6736 goto out_kfree_ioc; 6737 } 6738 6739 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 6740 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 6741 error = -ENODEV; 6742 goto out_kfree_ioc; 6743 } 6744 6745 if (instance->unload == 1) { 6746 error = -ENODEV; 6747 goto out_kfree_ioc; 6748 } 6749 6750 if (down_interruptible(&instance->ioctl_sem)) { 6751 error = -ERESTARTSYS; 6752 goto out_kfree_ioc; 6753 } 6754 6755 for (i = 0; i < wait_time; i++) { 6756 6757 spin_lock_irqsave(&instance->hba_lock, flags); 6758 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 6759 spin_unlock_irqrestore(&instance->hba_lock, flags); 6760 break; 6761 } 6762 spin_unlock_irqrestore(&instance->hba_lock, flags); 6763 6764 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6765 dev_notice(&instance->pdev->dev, "waiting" 6766 "for controller reset to finish\n"); 6767 } 6768 6769 msleep(1000); 6770 } 6771 6772 spin_lock_irqsave(&instance->hba_lock, flags); 6773 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6774 spin_unlock_irqrestore(&instance->hba_lock, flags); 6775 6776 dev_err(&instance->pdev->dev, "timed out while" 6777 "waiting for HBA to recover\n"); 6778 error = -ENODEV; 6779 goto out_up; 6780 } 6781 spin_unlock_irqrestore(&instance->hba_lock, flags); 6782 6783 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 6784 out_up: 6785 up(&instance->ioctl_sem); 6786 6787 out_kfree_ioc: 6788 kfree(ioc); 6789 return error; 6790 } 6791 6792 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 6793 { 6794 struct megasas_instance *instance; 6795 struct megasas_aen aen; 6796 int error; 6797 int i; 6798 unsigned long flags; 6799 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 6800 6801 if (file->private_data != file) { 6802 printk(KERN_DEBUG "megasas: fasync_helper was not " 6803 "called first\n"); 6804 return -EINVAL; 6805 } 6806 6807 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 6808 return -EFAULT; 6809 6810 instance = megasas_lookup_instance(aen.host_no); 6811 6812 if (!instance) 6813 return -ENODEV; 6814 6815 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 6816 return -ENODEV; 6817 } 6818 6819 if (instance->unload == 1) { 6820 return -ENODEV; 6821 } 6822 6823 for (i = 0; i < wait_time; i++) { 6824 6825 spin_lock_irqsave(&instance->hba_lock, flags); 6826 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) { 6827 spin_unlock_irqrestore(&instance->hba_lock, 6828 flags); 6829 break; 6830 } 6831 6832 spin_unlock_irqrestore(&instance->hba_lock, flags); 6833 6834 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6835 dev_notice(&instance->pdev->dev, "waiting for" 6836 "controller reset to finish\n"); 6837 } 6838 6839 msleep(1000); 6840 } 6841 6842 spin_lock_irqsave(&instance->hba_lock, flags); 6843 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 6844 spin_unlock_irqrestore(&instance->hba_lock, flags); 6845 dev_err(&instance->pdev->dev, "timed out while waiting" 6846 "for HBA to recover\n"); 6847 return -ENODEV; 6848 } 6849 spin_unlock_irqrestore(&instance->hba_lock, flags); 6850 6851 mutex_lock(&instance->reset_mutex); 6852 error = megasas_register_aen(instance, aen.seq_num, 6853 aen.class_locale_word); 6854 mutex_unlock(&instance->reset_mutex); 6855 return error; 6856 } 6857 6858 /** 6859 * megasas_mgmt_ioctl - char node ioctl entry point 6860 */ 6861 static long 6862 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 6863 { 6864 switch (cmd) { 6865 case MEGASAS_IOC_FIRMWARE: 6866 return megasas_mgmt_ioctl_fw(file, arg); 6867 6868 case MEGASAS_IOC_GET_AEN: 6869 return megasas_mgmt_ioctl_aen(file, arg); 6870 } 6871 6872 return -ENOTTY; 6873 } 6874 6875 #ifdef CONFIG_COMPAT 6876 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) 6877 { 6878 struct compat_megasas_iocpacket __user *cioc = 6879 (struct compat_megasas_iocpacket __user *)arg; 6880 struct megasas_iocpacket __user *ioc = 6881 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 6882 int i; 6883 int error = 0; 6884 compat_uptr_t ptr; 6885 u32 local_sense_off; 6886 u32 local_sense_len; 6887 u32 user_sense_off; 6888 6889 if (clear_user(ioc, sizeof(*ioc))) 6890 return -EFAULT; 6891 6892 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || 6893 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || 6894 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || 6895 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || 6896 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || 6897 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 6898 return -EFAULT; 6899 6900 /* 6901 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 6902 * sense_len is not null, so prepare the 64bit value under 6903 * the same condition. 6904 */ 6905 if (get_user(local_sense_off, &ioc->sense_off) || 6906 get_user(local_sense_len, &ioc->sense_len) || 6907 get_user(user_sense_off, &cioc->sense_off)) 6908 return -EFAULT; 6909 6910 if (local_sense_len) { 6911 void __user **sense_ioc_ptr = 6912 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 6913 compat_uptr_t *sense_cioc_ptr = 6914 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off); 6915 if (get_user(ptr, sense_cioc_ptr) || 6916 put_user(compat_ptr(ptr), sense_ioc_ptr)) 6917 return -EFAULT; 6918 } 6919 6920 for (i = 0; i < MAX_IOCTL_SGE; i++) { 6921 if (get_user(ptr, &cioc->sgl[i].iov_base) || 6922 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 6923 copy_in_user(&ioc->sgl[i].iov_len, 6924 &cioc->sgl[i].iov_len, sizeof(compat_size_t))) 6925 return -EFAULT; 6926 } 6927 6928 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); 6929 6930 if (copy_in_user(&cioc->frame.hdr.cmd_status, 6931 &ioc->frame.hdr.cmd_status, sizeof(u8))) { 6932 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); 6933 return -EFAULT; 6934 } 6935 return error; 6936 } 6937 6938 static long 6939 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 6940 unsigned long arg) 6941 { 6942 switch (cmd) { 6943 case MEGASAS_IOC_FIRMWARE32: 6944 return megasas_mgmt_compat_ioctl_fw(file, arg); 6945 case MEGASAS_IOC_GET_AEN: 6946 return megasas_mgmt_ioctl_aen(file, arg); 6947 } 6948 6949 return -ENOTTY; 6950 } 6951 #endif 6952 6953 /* 6954 * File operations structure for management interface 6955 */ 6956 static const struct file_operations megasas_mgmt_fops = { 6957 .owner = THIS_MODULE, 6958 .open = megasas_mgmt_open, 6959 .fasync = megasas_mgmt_fasync, 6960 .unlocked_ioctl = megasas_mgmt_ioctl, 6961 .poll = megasas_mgmt_poll, 6962 #ifdef CONFIG_COMPAT 6963 .compat_ioctl = megasas_mgmt_compat_ioctl, 6964 #endif 6965 .llseek = noop_llseek, 6966 }; 6967 6968 /* 6969 * PCI hotplug support registration structure 6970 */ 6971 static struct pci_driver megasas_pci_driver = { 6972 6973 .name = "megaraid_sas", 6974 .id_table = megasas_pci_table, 6975 .probe = megasas_probe_one, 6976 .remove = megasas_detach_one, 6977 .suspend = megasas_suspend, 6978 .resume = megasas_resume, 6979 .shutdown = megasas_shutdown, 6980 }; 6981 6982 /* 6983 * Sysfs driver attributes 6984 */ 6985 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) 6986 { 6987 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 6988 MEGASAS_VERSION); 6989 } 6990 6991 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); 6992 6993 static ssize_t 6994 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) 6995 { 6996 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 6997 MEGASAS_RELDATE); 6998 } 6999 7000 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); 7001 7002 static ssize_t 7003 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) 7004 { 7005 return sprintf(buf, "%u\n", support_poll_for_event); 7006 } 7007 7008 static DRIVER_ATTR(support_poll_for_event, S_IRUGO, 7009 megasas_sysfs_show_support_poll_for_event, NULL); 7010 7011 static ssize_t 7012 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) 7013 { 7014 return sprintf(buf, "%u\n", support_device_change); 7015 } 7016 7017 static DRIVER_ATTR(support_device_change, S_IRUGO, 7018 megasas_sysfs_show_support_device_change, NULL); 7019 7020 static ssize_t 7021 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) 7022 { 7023 return sprintf(buf, "%u\n", megasas_dbg_lvl); 7024 } 7025 7026 static ssize_t 7027 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) 7028 { 7029 int retval = count; 7030 7031 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 7032 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 7033 retval = -EINVAL; 7034 } 7035 return retval; 7036 } 7037 7038 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 7039 megasas_sysfs_set_dbg_lvl); 7040 7041 static void 7042 megasas_aen_polling(struct work_struct *work) 7043 { 7044 struct megasas_aen_event *ev = 7045 container_of(work, struct megasas_aen_event, hotplug_work.work); 7046 struct megasas_instance *instance = ev->instance; 7047 union megasas_evt_class_locale class_locale; 7048 struct Scsi_Host *host; 7049 struct scsi_device *sdev1; 7050 u16 pd_index = 0; 7051 u16 ld_index = 0; 7052 int i, j, doscan = 0; 7053 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; 7054 int error; 7055 u8 dcmd_ret = DCMD_SUCCESS; 7056 7057 if (!instance) { 7058 printk(KERN_ERR "invalid instance!\n"); 7059 kfree(ev); 7060 return; 7061 } 7062 7063 /* Adjust event workqueue thread wait time for VF mode */ 7064 if (instance->requestorId) 7065 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; 7066 7067 /* Don't run the event workqueue thread if OCR is running */ 7068 mutex_lock(&instance->reset_mutex); 7069 7070 instance->ev = NULL; 7071 host = instance->host; 7072 if (instance->evt_detail) { 7073 megasas_decode_evt(instance); 7074 7075 switch (le32_to_cpu(instance->evt_detail->code)) { 7076 7077 case MR_EVT_PD_INSERTED: 7078 case MR_EVT_PD_REMOVED: 7079 dcmd_ret = megasas_get_pd_list(instance); 7080 if (dcmd_ret == DCMD_SUCCESS) 7081 doscan = SCAN_PD_CHANNEL; 7082 break; 7083 7084 case MR_EVT_LD_OFFLINE: 7085 case MR_EVT_CFG_CLEARED: 7086 case MR_EVT_LD_DELETED: 7087 case MR_EVT_LD_CREATED: 7088 if (!instance->requestorId || 7089 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7090 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7091 7092 if (dcmd_ret == DCMD_SUCCESS) 7093 doscan = SCAN_VD_CHANNEL; 7094 7095 break; 7096 7097 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 7098 case MR_EVT_FOREIGN_CFG_IMPORTED: 7099 case MR_EVT_LD_STATE_CHANGE: 7100 dcmd_ret = megasas_get_pd_list(instance); 7101 7102 if (dcmd_ret != DCMD_SUCCESS) 7103 break; 7104 7105 if (!instance->requestorId || 7106 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) 7107 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 7108 7109 if (dcmd_ret != DCMD_SUCCESS) 7110 break; 7111 7112 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; 7113 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 7114 instance->host->host_no); 7115 break; 7116 7117 case MR_EVT_CTRL_PROP_CHANGED: 7118 dcmd_ret = megasas_get_ctrl_info(instance); 7119 break; 7120 default: 7121 doscan = 0; 7122 break; 7123 } 7124 } else { 7125 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 7126 mutex_unlock(&instance->reset_mutex); 7127 kfree(ev); 7128 return; 7129 } 7130 7131 mutex_unlock(&instance->reset_mutex); 7132 7133 if (doscan & SCAN_PD_CHANNEL) { 7134 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 7135 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7136 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; 7137 sdev1 = scsi_device_lookup(host, i, j, 0); 7138 if (instance->pd_list[pd_index].driveState == 7139 MR_PD_STATE_SYSTEM) { 7140 if (!sdev1) 7141 scsi_add_device(host, i, j, 0); 7142 else 7143 scsi_device_put(sdev1); 7144 } else { 7145 if (sdev1) { 7146 scsi_remove_device(sdev1); 7147 scsi_device_put(sdev1); 7148 } 7149 } 7150 } 7151 } 7152 } 7153 7154 if (doscan & SCAN_VD_CHANNEL) { 7155 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 7156 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 7157 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 7158 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7159 if (instance->ld_ids[ld_index] != 0xff) { 7160 if (!sdev1) 7161 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 7162 else 7163 scsi_device_put(sdev1); 7164 } else { 7165 if (sdev1) { 7166 scsi_remove_device(sdev1); 7167 scsi_device_put(sdev1); 7168 } 7169 } 7170 } 7171 } 7172 } 7173 7174 if (dcmd_ret == DCMD_SUCCESS) 7175 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 7176 else 7177 seq_num = instance->last_seq_num; 7178 7179 /* Register AEN with FW for latest sequence number plus 1 */ 7180 class_locale.members.reserved = 0; 7181 class_locale.members.locale = MR_EVT_LOCALE_ALL; 7182 class_locale.members.class = MR_EVT_CLASS_DEBUG; 7183 7184 if (instance->aen_cmd != NULL) { 7185 kfree(ev); 7186 return; 7187 } 7188 7189 mutex_lock(&instance->reset_mutex); 7190 error = megasas_register_aen(instance, seq_num, 7191 class_locale.word); 7192 if (error) 7193 dev_err(&instance->pdev->dev, 7194 "register aen failed error %x\n", error); 7195 7196 mutex_unlock(&instance->reset_mutex); 7197 kfree(ev); 7198 } 7199 7200 /** 7201 * megasas_init - Driver load entry point 7202 */ 7203 static int __init megasas_init(void) 7204 { 7205 int rval; 7206 7207 /* 7208 * Booted in kdump kernel, minimize memory footprints by 7209 * disabling few features 7210 */ 7211 if (reset_devices) { 7212 msix_vectors = 1; 7213 rdpq_enable = 0; 7214 dual_qdepth_disable = 1; 7215 } 7216 7217 /* 7218 * Announce driver version and other information 7219 */ 7220 pr_info("megasas: %s\n", MEGASAS_VERSION); 7221 7222 spin_lock_init(&poll_aen_lock); 7223 7224 support_poll_for_event = 2; 7225 support_device_change = 1; 7226 7227 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 7228 7229 /* 7230 * Register character device node 7231 */ 7232 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 7233 7234 if (rval < 0) { 7235 printk(KERN_DEBUG "megasas: failed to open device node\n"); 7236 return rval; 7237 } 7238 7239 megasas_mgmt_majorno = rval; 7240 7241 /* 7242 * Register ourselves as PCI hotplug module 7243 */ 7244 rval = pci_register_driver(&megasas_pci_driver); 7245 7246 if (rval) { 7247 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 7248 goto err_pcidrv; 7249 } 7250 7251 rval = driver_create_file(&megasas_pci_driver.driver, 7252 &driver_attr_version); 7253 if (rval) 7254 goto err_dcf_attr_ver; 7255 7256 rval = driver_create_file(&megasas_pci_driver.driver, 7257 &driver_attr_release_date); 7258 if (rval) 7259 goto err_dcf_rel_date; 7260 7261 rval = driver_create_file(&megasas_pci_driver.driver, 7262 &driver_attr_support_poll_for_event); 7263 if (rval) 7264 goto err_dcf_support_poll_for_event; 7265 7266 rval = driver_create_file(&megasas_pci_driver.driver, 7267 &driver_attr_dbg_lvl); 7268 if (rval) 7269 goto err_dcf_dbg_lvl; 7270 rval = driver_create_file(&megasas_pci_driver.driver, 7271 &driver_attr_support_device_change); 7272 if (rval) 7273 goto err_dcf_support_device_change; 7274 7275 return rval; 7276 7277 err_dcf_support_device_change: 7278 driver_remove_file(&megasas_pci_driver.driver, 7279 &driver_attr_dbg_lvl); 7280 err_dcf_dbg_lvl: 7281 driver_remove_file(&megasas_pci_driver.driver, 7282 &driver_attr_support_poll_for_event); 7283 err_dcf_support_poll_for_event: 7284 driver_remove_file(&megasas_pci_driver.driver, 7285 &driver_attr_release_date); 7286 err_dcf_rel_date: 7287 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7288 err_dcf_attr_ver: 7289 pci_unregister_driver(&megasas_pci_driver); 7290 err_pcidrv: 7291 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7292 return rval; 7293 } 7294 7295 /** 7296 * megasas_exit - Driver unload entry point 7297 */ 7298 static void __exit megasas_exit(void) 7299 { 7300 driver_remove_file(&megasas_pci_driver.driver, 7301 &driver_attr_dbg_lvl); 7302 driver_remove_file(&megasas_pci_driver.driver, 7303 &driver_attr_support_poll_for_event); 7304 driver_remove_file(&megasas_pci_driver.driver, 7305 &driver_attr_support_device_change); 7306 driver_remove_file(&megasas_pci_driver.driver, 7307 &driver_attr_release_date); 7308 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7309 7310 pci_unregister_driver(&megasas_pci_driver); 7311 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7312 } 7313 7314 module_init(megasas_init); 7315 module_exit(megasas_exit); 7316