1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2003-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * Authors: Broadcom Inc. 10 * Sreenivas Bagalkote 11 * Sumant Patro 12 * Bo Yang 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <asm/unaligned.h> 33 #include <linux/fs.h> 34 #include <linux/compat.h> 35 #include <linux/blkdev.h> 36 #include <linux/mutex.h> 37 #include <linux/poll.h> 38 #include <linux/vmalloc.h> 39 #include <linux/irq_poll.h> 40 #include <linux/blk-mq-pci.h> 41 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_tcq.h> 47 #include <scsi/scsi_dbg.h> 48 #include "megaraid_sas_fusion.h" 49 #include "megaraid_sas.h" 50 51 /* 52 * Number of sectors per IO command 53 * Will be set in megasas_init_mfi if user does not provide 54 */ 55 static unsigned int max_sectors; 56 module_param_named(max_sectors, max_sectors, int, 0444); 57 MODULE_PARM_DESC(max_sectors, 58 "Maximum number of sectors per IO command"); 59 60 static int msix_disable; 61 module_param(msix_disable, int, 0444); 62 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 63 64 static unsigned int msix_vectors; 65 module_param(msix_vectors, int, 0444); 66 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 67 68 static int allow_vf_ioctls; 69 module_param(allow_vf_ioctls, int, 0444); 70 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 71 72 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 73 module_param(throttlequeuedepth, int, 0444); 74 MODULE_PARM_DESC(throttlequeuedepth, 75 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 76 77 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 78 module_param(resetwaittime, int, 0444); 79 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 80 81 static int smp_affinity_enable = 1; 82 module_param(smp_affinity_enable, int, 0444); 83 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 84 85 static int rdpq_enable = 1; 86 module_param(rdpq_enable, int, 0444); 87 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 88 89 unsigned int dual_qdepth_disable; 90 module_param(dual_qdepth_disable, int, 0444); 91 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 92 93 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 94 module_param(scmd_timeout, int, 0444); 95 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 96 97 int perf_mode = -1; 98 module_param(perf_mode, int, 0444); 99 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" 100 "0 - balanced: High iops and low latency queues are allocated &\n\t\t" 101 "interrupt coalescing is enabled only on high iops queues\n\t\t" 102 "1 - iops: High iops queues are not allocated &\n\t\t" 103 "interrupt coalescing is enabled on all queues\n\t\t" 104 "2 - latency: High iops queues are not allocated &\n\t\t" 105 "interrupt coalescing is disabled on all queues\n\t\t" 106 "default mode is 'balanced'" 107 ); 108 109 int event_log_level = MFI_EVT_CLASS_CRITICAL; 110 module_param(event_log_level, int, 0644); 111 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 112 113 unsigned int enable_sdev_max_qd; 114 module_param(enable_sdev_max_qd, int, 0444); 115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 116 117 int poll_queues; 118 module_param(poll_queues, int, 0444); 119 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" 120 "This parameter is effective only if host_tagset_enable=1 &\n\t\t" 121 "It is not applicable for MFI_SERIES. &\n\t\t" 122 "Driver will work in latency mode. &\n\t\t" 123 "High iops queues are not allocated &\n\t\t" 124 ); 125 126 int host_tagset_enable = 1; 127 module_param(host_tagset_enable, int, 0444); 128 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 129 130 MODULE_LICENSE("GPL"); 131 MODULE_VERSION(MEGASAS_VERSION); 132 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 133 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 134 135 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 136 static int megasas_get_pd_list(struct megasas_instance *instance); 137 static int megasas_ld_list_query(struct megasas_instance *instance, 138 u8 query_type); 139 static int megasas_issue_init_mfi(struct megasas_instance *instance); 140 static int megasas_register_aen(struct megasas_instance *instance, 141 u32 seq_num, u32 class_locale_word); 142 static void megasas_get_pd_info(struct megasas_instance *instance, 143 struct scsi_device *sdev); 144 static void 145 megasas_set_ld_removed_by_fw(struct megasas_instance *instance); 146 147 /* 148 * PCI ID table for all supported controllers 149 */ 150 static struct pci_device_id megasas_pci_table[] = { 151 152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 153 /* xscale IOP */ 154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 155 /* ppc IOP */ 156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 157 /* ppc IOP */ 158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 159 /* gen2*/ 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 161 /* gen2*/ 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 163 /* skinny*/ 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 165 /* skinny*/ 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 167 /* xscale IOP, vega */ 168 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 169 /* xscale IOP */ 170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 171 /* Fusion */ 172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 173 /* Plasma */ 174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 175 /* Invader */ 176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 177 /* Fury */ 178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 179 /* Intruder */ 180 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 181 /* Intruder 24 port*/ 182 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 183 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 184 /* VENTURA */ 185 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 186 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 187 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 188 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 189 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 190 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 191 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 192 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 193 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 194 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 195 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, 196 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, 197 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, 198 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, 199 {} 200 }; 201 202 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 203 204 static int megasas_mgmt_majorno; 205 struct megasas_mgmt_info megasas_mgmt_info; 206 static struct fasync_struct *megasas_async_queue; 207 static DEFINE_MUTEX(megasas_async_queue_mutex); 208 209 static int megasas_poll_wait_aen; 210 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 211 static u32 support_poll_for_event; 212 u32 megasas_dbg_lvl; 213 static u32 support_device_change; 214 static bool support_nvme_encapsulation; 215 static bool support_pci_lane_margining; 216 217 /* define lock for aen poll */ 218 static DEFINE_SPINLOCK(poll_aen_lock); 219 220 extern struct dentry *megasas_debugfs_root; 221 extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); 222 223 void 224 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 225 u8 alt_status); 226 static u32 227 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 228 static int 229 megasas_adp_reset_gen2(struct megasas_instance *instance, 230 struct megasas_register_set __iomem *reg_set); 231 static irqreturn_t megasas_isr(int irq, void *devp); 232 static u32 233 megasas_init_adapter_mfi(struct megasas_instance *instance); 234 u32 235 megasas_build_and_issue_cmd(struct megasas_instance *instance, 236 struct scsi_cmnd *scmd); 237 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 238 int 239 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 240 int seconds); 241 void megasas_fusion_ocr_wq(struct work_struct *work); 242 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 243 int initial); 244 static int 245 megasas_set_dma_mask(struct megasas_instance *instance); 246 static int 247 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 248 static inline void 249 megasas_free_ctrl_mem(struct megasas_instance *instance); 250 static inline int 251 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 252 static inline void 253 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 254 static inline void 255 megasas_init_ctrl_params(struct megasas_instance *instance); 256 257 u32 megasas_readl(struct megasas_instance *instance, 258 const volatile void __iomem *addr) 259 { 260 u32 i = 0, ret_val; 261 /* 262 * Due to a HW errata in Aero controllers, reads to certain 263 * Fusion registers could intermittently return all zeroes. 264 * This behavior is transient in nature and subsequent reads will 265 * return valid value. As a workaround in driver, retry readl for 266 * up to thirty times until a non-zero value is read. 267 */ 268 if (instance->adapter_type == AERO_SERIES) { 269 do { 270 ret_val = readl(addr); 271 i++; 272 } while (ret_val == 0 && i < 30); 273 return ret_val; 274 } else { 275 return readl(addr); 276 } 277 } 278 279 /** 280 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 281 * @instance: Adapter soft state 282 * @dcmd: DCMD frame inside MFI command 283 * @dma_addr: DMA address of buffer to be passed to FW 284 * @dma_len: Length of DMA buffer to be passed to FW 285 * @return: void 286 */ 287 void megasas_set_dma_settings(struct megasas_instance *instance, 288 struct megasas_dcmd_frame *dcmd, 289 dma_addr_t dma_addr, u32 dma_len) 290 { 291 if (instance->consistent_mask_64bit) { 292 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 293 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 294 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 295 296 } else { 297 dcmd->sgl.sge32[0].phys_addr = 298 cpu_to_le32(lower_32_bits(dma_addr)); 299 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 300 dcmd->flags = cpu_to_le16(dcmd->flags); 301 } 302 } 303 304 static void 305 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 306 { 307 instance->instancet->fire_cmd(instance, 308 cmd->frame_phys_addr, 0, instance->reg_set); 309 return; 310 } 311 312 /** 313 * megasas_get_cmd - Get a command from the free pool 314 * @instance: Adapter soft state 315 * 316 * Returns a free command from the pool 317 */ 318 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 319 *instance) 320 { 321 unsigned long flags; 322 struct megasas_cmd *cmd = NULL; 323 324 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 325 326 if (!list_empty(&instance->cmd_pool)) { 327 cmd = list_entry((&instance->cmd_pool)->next, 328 struct megasas_cmd, list); 329 list_del_init(&cmd->list); 330 } else { 331 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 332 } 333 334 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 335 return cmd; 336 } 337 338 /** 339 * megasas_return_cmd - Return a cmd to free command pool 340 * @instance: Adapter soft state 341 * @cmd: Command packet to be returned to free command pool 342 */ 343 void 344 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 345 { 346 unsigned long flags; 347 u32 blk_tags; 348 struct megasas_cmd_fusion *cmd_fusion; 349 struct fusion_context *fusion = instance->ctrl_context; 350 351 /* This flag is used only for fusion adapter. 352 * Wait for Interrupt for Polled mode DCMD 353 */ 354 if (cmd->flags & DRV_DCMD_POLLED_MODE) 355 return; 356 357 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 358 359 if (fusion) { 360 blk_tags = instance->max_scsi_cmds + cmd->index; 361 cmd_fusion = fusion->cmd_list[blk_tags]; 362 megasas_return_cmd_fusion(instance, cmd_fusion); 363 } 364 cmd->scmd = NULL; 365 cmd->frame_count = 0; 366 cmd->flags = 0; 367 memset(cmd->frame, 0, instance->mfi_frame_size); 368 cmd->frame->io.context = cpu_to_le32(cmd->index); 369 if (!fusion && reset_devices) 370 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 371 list_add(&cmd->list, (&instance->cmd_pool)->next); 372 373 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 374 375 } 376 377 static const char * 378 format_timestamp(uint32_t timestamp) 379 { 380 static char buffer[32]; 381 382 if ((timestamp & 0xff000000) == 0xff000000) 383 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 384 0x00ffffff); 385 else 386 snprintf(buffer, sizeof(buffer), "%us", timestamp); 387 return buffer; 388 } 389 390 static const char * 391 format_class(int8_t class) 392 { 393 static char buffer[6]; 394 395 switch (class) { 396 case MFI_EVT_CLASS_DEBUG: 397 return "debug"; 398 case MFI_EVT_CLASS_PROGRESS: 399 return "progress"; 400 case MFI_EVT_CLASS_INFO: 401 return "info"; 402 case MFI_EVT_CLASS_WARNING: 403 return "WARN"; 404 case MFI_EVT_CLASS_CRITICAL: 405 return "CRIT"; 406 case MFI_EVT_CLASS_FATAL: 407 return "FATAL"; 408 case MFI_EVT_CLASS_DEAD: 409 return "DEAD"; 410 default: 411 snprintf(buffer, sizeof(buffer), "%d", class); 412 return buffer; 413 } 414 } 415 416 /** 417 * megasas_decode_evt: Decode FW AEN event and print critical event 418 * for information. 419 * @instance: Adapter soft state 420 */ 421 static void 422 megasas_decode_evt(struct megasas_instance *instance) 423 { 424 struct megasas_evt_detail *evt_detail = instance->evt_detail; 425 union megasas_evt_class_locale class_locale; 426 class_locale.word = le32_to_cpu(evt_detail->cl.word); 427 428 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 429 (event_log_level > MFI_EVT_CLASS_DEAD)) { 430 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 431 event_log_level = MFI_EVT_CLASS_CRITICAL; 432 } 433 434 if (class_locale.members.class >= event_log_level) 435 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 436 le32_to_cpu(evt_detail->seq_num), 437 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 438 (class_locale.members.locale), 439 format_class(class_locale.members.class), 440 evt_detail->description); 441 442 if (megasas_dbg_lvl & LD_PD_DEBUG) 443 dev_info(&instance->pdev->dev, 444 "evt_detail.args.ld.target_id/index %d/%d\n", 445 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index); 446 447 } 448 449 /* 450 * The following functions are defined for xscale 451 * (deviceid : 1064R, PERC5) controllers 452 */ 453 454 /** 455 * megasas_enable_intr_xscale - Enables interrupts 456 * @instance: Adapter soft state 457 */ 458 static inline void 459 megasas_enable_intr_xscale(struct megasas_instance *instance) 460 { 461 struct megasas_register_set __iomem *regs; 462 463 regs = instance->reg_set; 464 writel(0, &(regs)->outbound_intr_mask); 465 466 /* Dummy readl to force pci flush */ 467 readl(®s->outbound_intr_mask); 468 } 469 470 /** 471 * megasas_disable_intr_xscale -Disables interrupt 472 * @instance: Adapter soft state 473 */ 474 static inline void 475 megasas_disable_intr_xscale(struct megasas_instance *instance) 476 { 477 struct megasas_register_set __iomem *regs; 478 u32 mask = 0x1f; 479 480 regs = instance->reg_set; 481 writel(mask, ®s->outbound_intr_mask); 482 /* Dummy readl to force pci flush */ 483 readl(®s->outbound_intr_mask); 484 } 485 486 /** 487 * megasas_read_fw_status_reg_xscale - returns the current FW status value 488 * @instance: Adapter soft state 489 */ 490 static u32 491 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 492 { 493 return readl(&instance->reg_set->outbound_msg_0); 494 } 495 /** 496 * megasas_clear_intr_xscale - Check & clear interrupt 497 * @instance: Adapter soft state 498 */ 499 static int 500 megasas_clear_intr_xscale(struct megasas_instance *instance) 501 { 502 u32 status; 503 u32 mfiStatus = 0; 504 struct megasas_register_set __iomem *regs; 505 regs = instance->reg_set; 506 507 /* 508 * Check if it is our interrupt 509 */ 510 status = readl(®s->outbound_intr_status); 511 512 if (status & MFI_OB_INTR_STATUS_MASK) 513 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 514 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 515 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 516 517 /* 518 * Clear the interrupt by writing back the same value 519 */ 520 if (mfiStatus) 521 writel(status, ®s->outbound_intr_status); 522 523 /* Dummy readl to force pci flush */ 524 readl(®s->outbound_intr_status); 525 526 return mfiStatus; 527 } 528 529 /** 530 * megasas_fire_cmd_xscale - Sends command to the FW 531 * @instance: Adapter soft state 532 * @frame_phys_addr : Physical address of cmd 533 * @frame_count : Number of frames for the command 534 * @regs : MFI register set 535 */ 536 static inline void 537 megasas_fire_cmd_xscale(struct megasas_instance *instance, 538 dma_addr_t frame_phys_addr, 539 u32 frame_count, 540 struct megasas_register_set __iomem *regs) 541 { 542 unsigned long flags; 543 544 spin_lock_irqsave(&instance->hba_lock, flags); 545 writel((frame_phys_addr >> 3)|(frame_count), 546 &(regs)->inbound_queue_port); 547 spin_unlock_irqrestore(&instance->hba_lock, flags); 548 } 549 550 /** 551 * megasas_adp_reset_xscale - For controller reset 552 * @instance: Adapter soft state 553 * @regs: MFI register set 554 */ 555 static int 556 megasas_adp_reset_xscale(struct megasas_instance *instance, 557 struct megasas_register_set __iomem *regs) 558 { 559 u32 i; 560 u32 pcidata; 561 562 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 563 564 for (i = 0; i < 3; i++) 565 msleep(1000); /* sleep for 3 secs */ 566 pcidata = 0; 567 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 568 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 569 if (pcidata & 0x2) { 570 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 571 pcidata &= ~0x2; 572 pci_write_config_dword(instance->pdev, 573 MFI_1068_PCSR_OFFSET, pcidata); 574 575 for (i = 0; i < 2; i++) 576 msleep(1000); /* need to wait 2 secs again */ 577 578 pcidata = 0; 579 pci_read_config_dword(instance->pdev, 580 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 581 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 582 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 583 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 584 pcidata = 0; 585 pci_write_config_dword(instance->pdev, 586 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 587 } 588 } 589 return 0; 590 } 591 592 /** 593 * megasas_check_reset_xscale - For controller reset check 594 * @instance: Adapter soft state 595 * @regs: MFI register set 596 */ 597 static int 598 megasas_check_reset_xscale(struct megasas_instance *instance, 599 struct megasas_register_set __iomem *regs) 600 { 601 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 602 (le32_to_cpu(*instance->consumer) == 603 MEGASAS_ADPRESET_INPROG_SIGN)) 604 return 1; 605 return 0; 606 } 607 608 static struct megasas_instance_template megasas_instance_template_xscale = { 609 610 .fire_cmd = megasas_fire_cmd_xscale, 611 .enable_intr = megasas_enable_intr_xscale, 612 .disable_intr = megasas_disable_intr_xscale, 613 .clear_intr = megasas_clear_intr_xscale, 614 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 615 .adp_reset = megasas_adp_reset_xscale, 616 .check_reset = megasas_check_reset_xscale, 617 .service_isr = megasas_isr, 618 .tasklet = megasas_complete_cmd_dpc, 619 .init_adapter = megasas_init_adapter_mfi, 620 .build_and_issue_cmd = megasas_build_and_issue_cmd, 621 .issue_dcmd = megasas_issue_dcmd, 622 }; 623 624 /* 625 * This is the end of set of functions & definitions specific 626 * to xscale (deviceid : 1064R, PERC5) controllers 627 */ 628 629 /* 630 * The following functions are defined for ppc (deviceid : 0x60) 631 * controllers 632 */ 633 634 /** 635 * megasas_enable_intr_ppc - Enables interrupts 636 * @instance: Adapter soft state 637 */ 638 static inline void 639 megasas_enable_intr_ppc(struct megasas_instance *instance) 640 { 641 struct megasas_register_set __iomem *regs; 642 643 regs = instance->reg_set; 644 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 645 646 writel(~0x80000000, &(regs)->outbound_intr_mask); 647 648 /* Dummy readl to force pci flush */ 649 readl(®s->outbound_intr_mask); 650 } 651 652 /** 653 * megasas_disable_intr_ppc - Disable interrupt 654 * @instance: Adapter soft state 655 */ 656 static inline void 657 megasas_disable_intr_ppc(struct megasas_instance *instance) 658 { 659 struct megasas_register_set __iomem *regs; 660 u32 mask = 0xFFFFFFFF; 661 662 regs = instance->reg_set; 663 writel(mask, ®s->outbound_intr_mask); 664 /* Dummy readl to force pci flush */ 665 readl(®s->outbound_intr_mask); 666 } 667 668 /** 669 * megasas_read_fw_status_reg_ppc - returns the current FW status value 670 * @instance: Adapter soft state 671 */ 672 static u32 673 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 674 { 675 return readl(&instance->reg_set->outbound_scratch_pad_0); 676 } 677 678 /** 679 * megasas_clear_intr_ppc - Check & clear interrupt 680 * @instance: Adapter soft state 681 */ 682 static int 683 megasas_clear_intr_ppc(struct megasas_instance *instance) 684 { 685 u32 status, mfiStatus = 0; 686 struct megasas_register_set __iomem *regs; 687 regs = instance->reg_set; 688 689 /* 690 * Check if it is our interrupt 691 */ 692 status = readl(®s->outbound_intr_status); 693 694 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 695 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 696 697 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 698 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 699 700 /* 701 * Clear the interrupt by writing back the same value 702 */ 703 writel(status, ®s->outbound_doorbell_clear); 704 705 /* Dummy readl to force pci flush */ 706 readl(®s->outbound_doorbell_clear); 707 708 return mfiStatus; 709 } 710 711 /** 712 * megasas_fire_cmd_ppc - Sends command to the FW 713 * @instance: Adapter soft state 714 * @frame_phys_addr: Physical address of cmd 715 * @frame_count: Number of frames for the command 716 * @regs: MFI register set 717 */ 718 static inline void 719 megasas_fire_cmd_ppc(struct megasas_instance *instance, 720 dma_addr_t frame_phys_addr, 721 u32 frame_count, 722 struct megasas_register_set __iomem *regs) 723 { 724 unsigned long flags; 725 726 spin_lock_irqsave(&instance->hba_lock, flags); 727 writel((frame_phys_addr | (frame_count<<1))|1, 728 &(regs)->inbound_queue_port); 729 spin_unlock_irqrestore(&instance->hba_lock, flags); 730 } 731 732 /** 733 * megasas_check_reset_ppc - For controller reset check 734 * @instance: Adapter soft state 735 * @regs: MFI register set 736 */ 737 static int 738 megasas_check_reset_ppc(struct megasas_instance *instance, 739 struct megasas_register_set __iomem *regs) 740 { 741 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 742 return 1; 743 744 return 0; 745 } 746 747 static struct megasas_instance_template megasas_instance_template_ppc = { 748 749 .fire_cmd = megasas_fire_cmd_ppc, 750 .enable_intr = megasas_enable_intr_ppc, 751 .disable_intr = megasas_disable_intr_ppc, 752 .clear_intr = megasas_clear_intr_ppc, 753 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 754 .adp_reset = megasas_adp_reset_xscale, 755 .check_reset = megasas_check_reset_ppc, 756 .service_isr = megasas_isr, 757 .tasklet = megasas_complete_cmd_dpc, 758 .init_adapter = megasas_init_adapter_mfi, 759 .build_and_issue_cmd = megasas_build_and_issue_cmd, 760 .issue_dcmd = megasas_issue_dcmd, 761 }; 762 763 /** 764 * megasas_enable_intr_skinny - Enables interrupts 765 * @instance: Adapter soft state 766 */ 767 static inline void 768 megasas_enable_intr_skinny(struct megasas_instance *instance) 769 { 770 struct megasas_register_set __iomem *regs; 771 772 regs = instance->reg_set; 773 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 774 775 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 776 777 /* Dummy readl to force pci flush */ 778 readl(®s->outbound_intr_mask); 779 } 780 781 /** 782 * megasas_disable_intr_skinny - Disables interrupt 783 * @instance: Adapter soft state 784 */ 785 static inline void 786 megasas_disable_intr_skinny(struct megasas_instance *instance) 787 { 788 struct megasas_register_set __iomem *regs; 789 u32 mask = 0xFFFFFFFF; 790 791 regs = instance->reg_set; 792 writel(mask, ®s->outbound_intr_mask); 793 /* Dummy readl to force pci flush */ 794 readl(®s->outbound_intr_mask); 795 } 796 797 /** 798 * megasas_read_fw_status_reg_skinny - returns the current FW status value 799 * @instance: Adapter soft state 800 */ 801 static u32 802 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 803 { 804 return readl(&instance->reg_set->outbound_scratch_pad_0); 805 } 806 807 /** 808 * megasas_clear_intr_skinny - Check & clear interrupt 809 * @instance: Adapter soft state 810 */ 811 static int 812 megasas_clear_intr_skinny(struct megasas_instance *instance) 813 { 814 u32 status; 815 u32 mfiStatus = 0; 816 struct megasas_register_set __iomem *regs; 817 regs = instance->reg_set; 818 819 /* 820 * Check if it is our interrupt 821 */ 822 status = readl(®s->outbound_intr_status); 823 824 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 825 return 0; 826 } 827 828 /* 829 * Check if it is our interrupt 830 */ 831 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 832 MFI_STATE_FAULT) { 833 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 834 } else 835 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 836 837 /* 838 * Clear the interrupt by writing back the same value 839 */ 840 writel(status, ®s->outbound_intr_status); 841 842 /* 843 * dummy read to flush PCI 844 */ 845 readl(®s->outbound_intr_status); 846 847 return mfiStatus; 848 } 849 850 /** 851 * megasas_fire_cmd_skinny - Sends command to the FW 852 * @instance: Adapter soft state 853 * @frame_phys_addr: Physical address of cmd 854 * @frame_count: Number of frames for the command 855 * @regs: MFI register set 856 */ 857 static inline void 858 megasas_fire_cmd_skinny(struct megasas_instance *instance, 859 dma_addr_t frame_phys_addr, 860 u32 frame_count, 861 struct megasas_register_set __iomem *regs) 862 { 863 unsigned long flags; 864 865 spin_lock_irqsave(&instance->hba_lock, flags); 866 writel(upper_32_bits(frame_phys_addr), 867 &(regs)->inbound_high_queue_port); 868 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 869 &(regs)->inbound_low_queue_port); 870 spin_unlock_irqrestore(&instance->hba_lock, flags); 871 } 872 873 /** 874 * megasas_check_reset_skinny - For controller reset check 875 * @instance: Adapter soft state 876 * @regs: MFI register set 877 */ 878 static int 879 megasas_check_reset_skinny(struct megasas_instance *instance, 880 struct megasas_register_set __iomem *regs) 881 { 882 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 883 return 1; 884 885 return 0; 886 } 887 888 static struct megasas_instance_template megasas_instance_template_skinny = { 889 890 .fire_cmd = megasas_fire_cmd_skinny, 891 .enable_intr = megasas_enable_intr_skinny, 892 .disable_intr = megasas_disable_intr_skinny, 893 .clear_intr = megasas_clear_intr_skinny, 894 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 895 .adp_reset = megasas_adp_reset_gen2, 896 .check_reset = megasas_check_reset_skinny, 897 .service_isr = megasas_isr, 898 .tasklet = megasas_complete_cmd_dpc, 899 .init_adapter = megasas_init_adapter_mfi, 900 .build_and_issue_cmd = megasas_build_and_issue_cmd, 901 .issue_dcmd = megasas_issue_dcmd, 902 }; 903 904 905 /* 906 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 907 * controllers 908 */ 909 910 /** 911 * megasas_enable_intr_gen2 - Enables interrupts 912 * @instance: Adapter soft state 913 */ 914 static inline void 915 megasas_enable_intr_gen2(struct megasas_instance *instance) 916 { 917 struct megasas_register_set __iomem *regs; 918 919 regs = instance->reg_set; 920 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 921 922 /* write ~0x00000005 (4 & 1) to the intr mask*/ 923 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 924 925 /* Dummy readl to force pci flush */ 926 readl(®s->outbound_intr_mask); 927 } 928 929 /** 930 * megasas_disable_intr_gen2 - Disables interrupt 931 * @instance: Adapter soft state 932 */ 933 static inline void 934 megasas_disable_intr_gen2(struct megasas_instance *instance) 935 { 936 struct megasas_register_set __iomem *regs; 937 u32 mask = 0xFFFFFFFF; 938 939 regs = instance->reg_set; 940 writel(mask, ®s->outbound_intr_mask); 941 /* Dummy readl to force pci flush */ 942 readl(®s->outbound_intr_mask); 943 } 944 945 /** 946 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 947 * @instance: Adapter soft state 948 */ 949 static u32 950 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 951 { 952 return readl(&instance->reg_set->outbound_scratch_pad_0); 953 } 954 955 /** 956 * megasas_clear_intr_gen2 - Check & clear interrupt 957 * @instance: Adapter soft state 958 */ 959 static int 960 megasas_clear_intr_gen2(struct megasas_instance *instance) 961 { 962 u32 status; 963 u32 mfiStatus = 0; 964 struct megasas_register_set __iomem *regs; 965 regs = instance->reg_set; 966 967 /* 968 * Check if it is our interrupt 969 */ 970 status = readl(®s->outbound_intr_status); 971 972 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 973 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 974 } 975 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 976 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 977 } 978 979 /* 980 * Clear the interrupt by writing back the same value 981 */ 982 if (mfiStatus) 983 writel(status, ®s->outbound_doorbell_clear); 984 985 /* Dummy readl to force pci flush */ 986 readl(®s->outbound_intr_status); 987 988 return mfiStatus; 989 } 990 991 /** 992 * megasas_fire_cmd_gen2 - Sends command to the FW 993 * @instance: Adapter soft state 994 * @frame_phys_addr: Physical address of cmd 995 * @frame_count: Number of frames for the command 996 * @regs: MFI register set 997 */ 998 static inline void 999 megasas_fire_cmd_gen2(struct megasas_instance *instance, 1000 dma_addr_t frame_phys_addr, 1001 u32 frame_count, 1002 struct megasas_register_set __iomem *regs) 1003 { 1004 unsigned long flags; 1005 1006 spin_lock_irqsave(&instance->hba_lock, flags); 1007 writel((frame_phys_addr | (frame_count<<1))|1, 1008 &(regs)->inbound_queue_port); 1009 spin_unlock_irqrestore(&instance->hba_lock, flags); 1010 } 1011 1012 /** 1013 * megasas_adp_reset_gen2 - For controller reset 1014 * @instance: Adapter soft state 1015 * @reg_set: MFI register set 1016 */ 1017 static int 1018 megasas_adp_reset_gen2(struct megasas_instance *instance, 1019 struct megasas_register_set __iomem *reg_set) 1020 { 1021 u32 retry = 0 ; 1022 u32 HostDiag; 1023 u32 __iomem *seq_offset = ®_set->seq_offset; 1024 u32 __iomem *hostdiag_offset = ®_set->host_diag; 1025 1026 if (instance->instancet == &megasas_instance_template_skinny) { 1027 seq_offset = ®_set->fusion_seq_offset; 1028 hostdiag_offset = ®_set->fusion_host_diag; 1029 } 1030 1031 writel(0, seq_offset); 1032 writel(4, seq_offset); 1033 writel(0xb, seq_offset); 1034 writel(2, seq_offset); 1035 writel(7, seq_offset); 1036 writel(0xd, seq_offset); 1037 1038 msleep(1000); 1039 1040 HostDiag = (u32)readl(hostdiag_offset); 1041 1042 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 1043 msleep(100); 1044 HostDiag = (u32)readl(hostdiag_offset); 1045 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 1046 retry, HostDiag); 1047 1048 if (retry++ >= 100) 1049 return 1; 1050 1051 } 1052 1053 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 1054 1055 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1056 1057 ssleep(10); 1058 1059 HostDiag = (u32)readl(hostdiag_offset); 1060 while (HostDiag & DIAG_RESET_ADAPTER) { 1061 msleep(100); 1062 HostDiag = (u32)readl(hostdiag_offset); 1063 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1064 retry, HostDiag); 1065 1066 if (retry++ >= 1000) 1067 return 1; 1068 1069 } 1070 return 0; 1071 } 1072 1073 /** 1074 * megasas_check_reset_gen2 - For controller reset check 1075 * @instance: Adapter soft state 1076 * @regs: MFI register set 1077 */ 1078 static int 1079 megasas_check_reset_gen2(struct megasas_instance *instance, 1080 struct megasas_register_set __iomem *regs) 1081 { 1082 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1083 return 1; 1084 1085 return 0; 1086 } 1087 1088 static struct megasas_instance_template megasas_instance_template_gen2 = { 1089 1090 .fire_cmd = megasas_fire_cmd_gen2, 1091 .enable_intr = megasas_enable_intr_gen2, 1092 .disable_intr = megasas_disable_intr_gen2, 1093 .clear_intr = megasas_clear_intr_gen2, 1094 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1095 .adp_reset = megasas_adp_reset_gen2, 1096 .check_reset = megasas_check_reset_gen2, 1097 .service_isr = megasas_isr, 1098 .tasklet = megasas_complete_cmd_dpc, 1099 .init_adapter = megasas_init_adapter_mfi, 1100 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1101 .issue_dcmd = megasas_issue_dcmd, 1102 }; 1103 1104 /* 1105 * This is the end of set of functions & definitions 1106 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1107 */ 1108 1109 /* 1110 * Template added for TB (Fusion) 1111 */ 1112 extern struct megasas_instance_template megasas_instance_template_fusion; 1113 1114 /** 1115 * megasas_issue_polled - Issues a polling command 1116 * @instance: Adapter soft state 1117 * @cmd: Command packet to be issued 1118 * 1119 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1120 */ 1121 int 1122 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1123 { 1124 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1125 1126 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1127 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1128 1129 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1130 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1131 __func__, __LINE__); 1132 return DCMD_INIT; 1133 } 1134 1135 instance->instancet->issue_dcmd(instance, cmd); 1136 1137 return wait_and_poll(instance, cmd, instance->requestorId ? 1138 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1139 } 1140 1141 /** 1142 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1143 * @instance: Adapter soft state 1144 * @cmd: Command to be issued 1145 * @timeout: Timeout in seconds 1146 * 1147 * This function waits on an event for the command to be returned from ISR. 1148 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1149 * Used to issue ioctl commands. 1150 */ 1151 int 1152 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1153 struct megasas_cmd *cmd, int timeout) 1154 { 1155 int ret = 0; 1156 cmd->cmd_status_drv = DCMD_INIT; 1157 1158 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1159 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1160 __func__, __LINE__); 1161 return DCMD_INIT; 1162 } 1163 1164 instance->instancet->issue_dcmd(instance, cmd); 1165 1166 if (timeout) { 1167 ret = wait_event_timeout(instance->int_cmd_wait_q, 1168 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1169 if (!ret) { 1170 dev_err(&instance->pdev->dev, 1171 "DCMD(opcode: 0x%x) is timed out, func:%s\n", 1172 cmd->frame->dcmd.opcode, __func__); 1173 return DCMD_TIMEOUT; 1174 } 1175 } else 1176 wait_event(instance->int_cmd_wait_q, 1177 cmd->cmd_status_drv != DCMD_INIT); 1178 1179 return cmd->cmd_status_drv; 1180 } 1181 1182 /** 1183 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1184 * @instance: Adapter soft state 1185 * @cmd_to_abort: Previously issued cmd to be aborted 1186 * @timeout: Timeout in seconds 1187 * 1188 * MFI firmware can abort previously issued AEN comamnd (automatic event 1189 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1190 * cmd and waits for return status. 1191 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1192 */ 1193 static int 1194 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1195 struct megasas_cmd *cmd_to_abort, int timeout) 1196 { 1197 struct megasas_cmd *cmd; 1198 struct megasas_abort_frame *abort_fr; 1199 int ret = 0; 1200 u32 opcode; 1201 1202 cmd = megasas_get_cmd(instance); 1203 1204 if (!cmd) 1205 return -1; 1206 1207 abort_fr = &cmd->frame->abort; 1208 1209 /* 1210 * Prepare and issue the abort frame 1211 */ 1212 abort_fr->cmd = MFI_CMD_ABORT; 1213 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1214 abort_fr->flags = cpu_to_le16(0); 1215 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1216 abort_fr->abort_mfi_phys_addr_lo = 1217 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1218 abort_fr->abort_mfi_phys_addr_hi = 1219 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1220 1221 cmd->sync_cmd = 1; 1222 cmd->cmd_status_drv = DCMD_INIT; 1223 1224 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1225 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1226 __func__, __LINE__); 1227 return DCMD_INIT; 1228 } 1229 1230 instance->instancet->issue_dcmd(instance, cmd); 1231 1232 if (timeout) { 1233 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1234 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1235 if (!ret) { 1236 opcode = cmd_to_abort->frame->dcmd.opcode; 1237 dev_err(&instance->pdev->dev, 1238 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", 1239 opcode, __func__); 1240 return DCMD_TIMEOUT; 1241 } 1242 } else 1243 wait_event(instance->abort_cmd_wait_q, 1244 cmd->cmd_status_drv != DCMD_INIT); 1245 1246 cmd->sync_cmd = 0; 1247 1248 megasas_return_cmd(instance, cmd); 1249 return cmd->cmd_status_drv; 1250 } 1251 1252 /** 1253 * megasas_make_sgl32 - Prepares 32-bit SGL 1254 * @instance: Adapter soft state 1255 * @scp: SCSI command from the mid-layer 1256 * @mfi_sgl: SGL to be filled in 1257 * 1258 * If successful, this function returns the number of SG elements. Otherwise, 1259 * it returnes -1. 1260 */ 1261 static int 1262 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1263 union megasas_sgl *mfi_sgl) 1264 { 1265 int i; 1266 int sge_count; 1267 struct scatterlist *os_sgl; 1268 1269 sge_count = scsi_dma_map(scp); 1270 BUG_ON(sge_count < 0); 1271 1272 if (sge_count) { 1273 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1274 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1275 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1276 } 1277 } 1278 return sge_count; 1279 } 1280 1281 /** 1282 * megasas_make_sgl64 - Prepares 64-bit SGL 1283 * @instance: Adapter soft state 1284 * @scp: SCSI command from the mid-layer 1285 * @mfi_sgl: SGL to be filled in 1286 * 1287 * If successful, this function returns the number of SG elements. Otherwise, 1288 * it returnes -1. 1289 */ 1290 static int 1291 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1292 union megasas_sgl *mfi_sgl) 1293 { 1294 int i; 1295 int sge_count; 1296 struct scatterlist *os_sgl; 1297 1298 sge_count = scsi_dma_map(scp); 1299 BUG_ON(sge_count < 0); 1300 1301 if (sge_count) { 1302 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1303 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1304 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1305 } 1306 } 1307 return sge_count; 1308 } 1309 1310 /** 1311 * megasas_make_sgl_skinny - Prepares IEEE SGL 1312 * @instance: Adapter soft state 1313 * @scp: SCSI command from the mid-layer 1314 * @mfi_sgl: SGL to be filled in 1315 * 1316 * If successful, this function returns the number of SG elements. Otherwise, 1317 * it returnes -1. 1318 */ 1319 static int 1320 megasas_make_sgl_skinny(struct megasas_instance *instance, 1321 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1322 { 1323 int i; 1324 int sge_count; 1325 struct scatterlist *os_sgl; 1326 1327 sge_count = scsi_dma_map(scp); 1328 1329 if (sge_count) { 1330 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1331 mfi_sgl->sge_skinny[i].length = 1332 cpu_to_le32(sg_dma_len(os_sgl)); 1333 mfi_sgl->sge_skinny[i].phys_addr = 1334 cpu_to_le64(sg_dma_address(os_sgl)); 1335 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1336 } 1337 } 1338 return sge_count; 1339 } 1340 1341 /** 1342 * megasas_get_frame_count - Computes the number of frames 1343 * @frame_type : type of frame- io or pthru frame 1344 * @sge_count : number of sg elements 1345 * 1346 * Returns the number of frames required for numnber of sge's (sge_count) 1347 */ 1348 1349 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1350 u8 sge_count, u8 frame_type) 1351 { 1352 int num_cnt; 1353 int sge_bytes; 1354 u32 sge_sz; 1355 u32 frame_count = 0; 1356 1357 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1358 sizeof(struct megasas_sge32); 1359 1360 if (instance->flag_ieee) { 1361 sge_sz = sizeof(struct megasas_sge_skinny); 1362 } 1363 1364 /* 1365 * Main frame can contain 2 SGEs for 64-bit SGLs and 1366 * 3 SGEs for 32-bit SGLs for ldio & 1367 * 1 SGEs for 64-bit SGLs and 1368 * 2 SGEs for 32-bit SGLs for pthru frame 1369 */ 1370 if (unlikely(frame_type == PTHRU_FRAME)) { 1371 if (instance->flag_ieee == 1) { 1372 num_cnt = sge_count - 1; 1373 } else if (IS_DMA64) 1374 num_cnt = sge_count - 1; 1375 else 1376 num_cnt = sge_count - 2; 1377 } else { 1378 if (instance->flag_ieee == 1) { 1379 num_cnt = sge_count - 1; 1380 } else if (IS_DMA64) 1381 num_cnt = sge_count - 2; 1382 else 1383 num_cnt = sge_count - 3; 1384 } 1385 1386 if (num_cnt > 0) { 1387 sge_bytes = sge_sz * num_cnt; 1388 1389 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1390 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1391 } 1392 /* Main frame */ 1393 frame_count += 1; 1394 1395 if (frame_count > 7) 1396 frame_count = 8; 1397 return frame_count; 1398 } 1399 1400 /** 1401 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1402 * @instance: Adapter soft state 1403 * @scp: SCSI command 1404 * @cmd: Command to be prepared in 1405 * 1406 * This function prepares CDB commands. These are typcially pass-through 1407 * commands to the devices. 1408 */ 1409 static int 1410 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1411 struct megasas_cmd *cmd) 1412 { 1413 u32 is_logical; 1414 u32 device_id; 1415 u16 flags = 0; 1416 struct megasas_pthru_frame *pthru; 1417 1418 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1419 device_id = MEGASAS_DEV_INDEX(scp); 1420 pthru = (struct megasas_pthru_frame *)cmd->frame; 1421 1422 if (scp->sc_data_direction == DMA_TO_DEVICE) 1423 flags = MFI_FRAME_DIR_WRITE; 1424 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1425 flags = MFI_FRAME_DIR_READ; 1426 else if (scp->sc_data_direction == DMA_NONE) 1427 flags = MFI_FRAME_DIR_NONE; 1428 1429 if (instance->flag_ieee == 1) { 1430 flags |= MFI_FRAME_IEEE; 1431 } 1432 1433 /* 1434 * Prepare the DCDB frame 1435 */ 1436 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1437 pthru->cmd_status = 0x0; 1438 pthru->scsi_status = 0x0; 1439 pthru->target_id = device_id; 1440 pthru->lun = scp->device->lun; 1441 pthru->cdb_len = scp->cmd_len; 1442 pthru->timeout = 0; 1443 pthru->pad_0 = 0; 1444 pthru->flags = cpu_to_le16(flags); 1445 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1446 1447 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1448 1449 /* 1450 * If the command is for the tape device, set the 1451 * pthru timeout to the os layer timeout value. 1452 */ 1453 if (scp->device->type == TYPE_TAPE) { 1454 if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) 1455 pthru->timeout = cpu_to_le16(0xFFFF); 1456 else 1457 pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); 1458 } 1459 1460 /* 1461 * Construct SGL 1462 */ 1463 if (instance->flag_ieee == 1) { 1464 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1465 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1466 &pthru->sgl); 1467 } else if (IS_DMA64) { 1468 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1469 pthru->sge_count = megasas_make_sgl64(instance, scp, 1470 &pthru->sgl); 1471 } else 1472 pthru->sge_count = megasas_make_sgl32(instance, scp, 1473 &pthru->sgl); 1474 1475 if (pthru->sge_count > instance->max_num_sge) { 1476 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1477 pthru->sge_count); 1478 return 0; 1479 } 1480 1481 /* 1482 * Sense info specific 1483 */ 1484 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1485 pthru->sense_buf_phys_addr_hi = 1486 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1487 pthru->sense_buf_phys_addr_lo = 1488 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1489 1490 /* 1491 * Compute the total number of frames this command consumes. FW uses 1492 * this number to pull sufficient number of frames from host memory. 1493 */ 1494 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1495 PTHRU_FRAME); 1496 1497 return cmd->frame_count; 1498 } 1499 1500 /** 1501 * megasas_build_ldio - Prepares IOs to logical devices 1502 * @instance: Adapter soft state 1503 * @scp: SCSI command 1504 * @cmd: Command to be prepared 1505 * 1506 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1507 */ 1508 static int 1509 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1510 struct megasas_cmd *cmd) 1511 { 1512 u32 device_id; 1513 u8 sc = scp->cmnd[0]; 1514 u16 flags = 0; 1515 struct megasas_io_frame *ldio; 1516 1517 device_id = MEGASAS_DEV_INDEX(scp); 1518 ldio = (struct megasas_io_frame *)cmd->frame; 1519 1520 if (scp->sc_data_direction == DMA_TO_DEVICE) 1521 flags = MFI_FRAME_DIR_WRITE; 1522 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1523 flags = MFI_FRAME_DIR_READ; 1524 1525 if (instance->flag_ieee == 1) { 1526 flags |= MFI_FRAME_IEEE; 1527 } 1528 1529 /* 1530 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1531 */ 1532 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1533 ldio->cmd_status = 0x0; 1534 ldio->scsi_status = 0x0; 1535 ldio->target_id = device_id; 1536 ldio->timeout = 0; 1537 ldio->reserved_0 = 0; 1538 ldio->pad_0 = 0; 1539 ldio->flags = cpu_to_le16(flags); 1540 ldio->start_lba_hi = 0; 1541 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1542 1543 /* 1544 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1545 */ 1546 if (scp->cmd_len == 6) { 1547 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1548 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1549 ((u32) scp->cmnd[2] << 8) | 1550 (u32) scp->cmnd[3]); 1551 1552 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1553 } 1554 1555 /* 1556 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1557 */ 1558 else if (scp->cmd_len == 10) { 1559 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1560 ((u32) scp->cmnd[7] << 8)); 1561 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1562 ((u32) scp->cmnd[3] << 16) | 1563 ((u32) scp->cmnd[4] << 8) | 1564 (u32) scp->cmnd[5]); 1565 } 1566 1567 /* 1568 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1569 */ 1570 else if (scp->cmd_len == 12) { 1571 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1572 ((u32) scp->cmnd[7] << 16) | 1573 ((u32) scp->cmnd[8] << 8) | 1574 (u32) scp->cmnd[9]); 1575 1576 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1577 ((u32) scp->cmnd[3] << 16) | 1578 ((u32) scp->cmnd[4] << 8) | 1579 (u32) scp->cmnd[5]); 1580 } 1581 1582 /* 1583 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1584 */ 1585 else if (scp->cmd_len == 16) { 1586 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1587 ((u32) scp->cmnd[11] << 16) | 1588 ((u32) scp->cmnd[12] << 8) | 1589 (u32) scp->cmnd[13]); 1590 1591 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1592 ((u32) scp->cmnd[7] << 16) | 1593 ((u32) scp->cmnd[8] << 8) | 1594 (u32) scp->cmnd[9]); 1595 1596 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1597 ((u32) scp->cmnd[3] << 16) | 1598 ((u32) scp->cmnd[4] << 8) | 1599 (u32) scp->cmnd[5]); 1600 1601 } 1602 1603 /* 1604 * Construct SGL 1605 */ 1606 if (instance->flag_ieee) { 1607 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1608 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1609 &ldio->sgl); 1610 } else if (IS_DMA64) { 1611 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1612 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1613 } else 1614 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1615 1616 if (ldio->sge_count > instance->max_num_sge) { 1617 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1618 ldio->sge_count); 1619 return 0; 1620 } 1621 1622 /* 1623 * Sense info specific 1624 */ 1625 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1626 ldio->sense_buf_phys_addr_hi = 0; 1627 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1628 1629 /* 1630 * Compute the total number of frames this command consumes. FW uses 1631 * this number to pull sufficient number of frames from host memory. 1632 */ 1633 cmd->frame_count = megasas_get_frame_count(instance, 1634 ldio->sge_count, IO_FRAME); 1635 1636 return cmd->frame_count; 1637 } 1638 1639 /** 1640 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1641 * and whether it's RW or non RW 1642 * @cmd: SCSI command 1643 * 1644 */ 1645 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1646 { 1647 int ret; 1648 1649 switch (cmd->cmnd[0]) { 1650 case READ_10: 1651 case WRITE_10: 1652 case READ_12: 1653 case WRITE_12: 1654 case READ_6: 1655 case WRITE_6: 1656 case READ_16: 1657 case WRITE_16: 1658 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1659 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1660 break; 1661 default: 1662 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1663 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1664 } 1665 return ret; 1666 } 1667 1668 /** 1669 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1670 * in FW 1671 * @instance: Adapter soft state 1672 */ 1673 static inline void 1674 megasas_dump_pending_frames(struct megasas_instance *instance) 1675 { 1676 struct megasas_cmd *cmd; 1677 int i,n; 1678 union megasas_sgl *mfi_sgl; 1679 struct megasas_io_frame *ldio; 1680 struct megasas_pthru_frame *pthru; 1681 u32 sgcount; 1682 u16 max_cmd = instance->max_fw_cmds; 1683 1684 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1685 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1686 if (IS_DMA64) 1687 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1688 else 1689 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1690 1691 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1692 for (i = 0; i < max_cmd; i++) { 1693 cmd = instance->cmd_list[i]; 1694 if (!cmd->scmd) 1695 continue; 1696 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1697 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1698 ldio = (struct megasas_io_frame *)cmd->frame; 1699 mfi_sgl = &ldio->sgl; 1700 sgcount = ldio->sge_count; 1701 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1702 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1703 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1704 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1705 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1706 } else { 1707 pthru = (struct megasas_pthru_frame *) cmd->frame; 1708 mfi_sgl = &pthru->sgl; 1709 sgcount = pthru->sge_count; 1710 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1711 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1712 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1713 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1714 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1715 } 1716 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1717 for (n = 0; n < sgcount; n++) { 1718 if (IS_DMA64) 1719 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1720 le32_to_cpu(mfi_sgl->sge64[n].length), 1721 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1722 else 1723 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1724 le32_to_cpu(mfi_sgl->sge32[n].length), 1725 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1726 } 1727 } 1728 } /*for max_cmd*/ 1729 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1730 for (i = 0; i < max_cmd; i++) { 1731 1732 cmd = instance->cmd_list[i]; 1733 1734 if (cmd->sync_cmd == 1) 1735 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1736 } 1737 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1738 } 1739 1740 u32 1741 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1742 struct scsi_cmnd *scmd) 1743 { 1744 struct megasas_cmd *cmd; 1745 u32 frame_count; 1746 1747 cmd = megasas_get_cmd(instance); 1748 if (!cmd) 1749 return SCSI_MLQUEUE_HOST_BUSY; 1750 1751 /* 1752 * Logical drive command 1753 */ 1754 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1755 frame_count = megasas_build_ldio(instance, scmd, cmd); 1756 else 1757 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1758 1759 if (!frame_count) 1760 goto out_return_cmd; 1761 1762 cmd->scmd = scmd; 1763 megasas_priv(scmd)->cmd_priv = cmd; 1764 1765 /* 1766 * Issue the command to the FW 1767 */ 1768 atomic_inc(&instance->fw_outstanding); 1769 1770 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1771 cmd->frame_count-1, instance->reg_set); 1772 1773 return 0; 1774 out_return_cmd: 1775 megasas_return_cmd(instance, cmd); 1776 return SCSI_MLQUEUE_HOST_BUSY; 1777 } 1778 1779 1780 /** 1781 * megasas_queue_command - Queue entry point 1782 * @shost: adapter SCSI host 1783 * @scmd: SCSI command to be queued 1784 */ 1785 static int 1786 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1787 { 1788 struct megasas_instance *instance; 1789 struct MR_PRIV_DEVICE *mr_device_priv_data; 1790 u32 ld_tgt_id; 1791 1792 instance = (struct megasas_instance *) 1793 scmd->device->host->hostdata; 1794 1795 if (instance->unload == 1) { 1796 scmd->result = DID_NO_CONNECT << 16; 1797 scsi_done(scmd); 1798 return 0; 1799 } 1800 1801 if (instance->issuepend_done == 0) 1802 return SCSI_MLQUEUE_HOST_BUSY; 1803 1804 1805 /* Check for an mpio path and adjust behavior */ 1806 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1807 if (megasas_check_mpio_paths(instance, scmd) == 1808 (DID_REQUEUE << 16)) { 1809 return SCSI_MLQUEUE_HOST_BUSY; 1810 } else { 1811 scmd->result = DID_NO_CONNECT << 16; 1812 scsi_done(scmd); 1813 return 0; 1814 } 1815 } 1816 1817 mr_device_priv_data = scmd->device->hostdata; 1818 if (!mr_device_priv_data || 1819 (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { 1820 scmd->result = DID_NO_CONNECT << 16; 1821 scsi_done(scmd); 1822 return 0; 1823 } 1824 1825 if (MEGASAS_IS_LOGICAL(scmd->device)) { 1826 ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); 1827 if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { 1828 scmd->result = DID_NO_CONNECT << 16; 1829 scsi_done(scmd); 1830 return 0; 1831 } 1832 } 1833 1834 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1835 return SCSI_MLQUEUE_HOST_BUSY; 1836 1837 if (mr_device_priv_data->tm_busy) 1838 return SCSI_MLQUEUE_DEVICE_BUSY; 1839 1840 1841 scmd->result = 0; 1842 1843 if (MEGASAS_IS_LOGICAL(scmd->device) && 1844 (scmd->device->id >= instance->fw_supported_vd_count || 1845 scmd->device->lun)) { 1846 scmd->result = DID_BAD_TARGET << 16; 1847 goto out_done; 1848 } 1849 1850 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1851 MEGASAS_IS_LOGICAL(scmd->device) && 1852 (!instance->fw_sync_cache_support)) { 1853 scmd->result = DID_OK << 16; 1854 goto out_done; 1855 } 1856 1857 return instance->instancet->build_and_issue_cmd(instance, scmd); 1858 1859 out_done: 1860 scsi_done(scmd); 1861 return 0; 1862 } 1863 1864 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1865 { 1866 int i; 1867 1868 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1869 1870 if ((megasas_mgmt_info.instance[i]) && 1871 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1872 return megasas_mgmt_info.instance[i]; 1873 } 1874 1875 return NULL; 1876 } 1877 1878 /* 1879 * megasas_set_dynamic_target_properties - 1880 * Device property set by driver may not be static and it is required to be 1881 * updated after OCR 1882 * 1883 * set tm_capable. 1884 * set dma alignment (only for eedp protection enable vd). 1885 * 1886 * @sdev: OS provided scsi device 1887 * 1888 * Returns void 1889 */ 1890 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1891 struct queue_limits *lim, bool is_target_prop) 1892 { 1893 u16 pd_index = 0, ld; 1894 u32 device_id; 1895 struct megasas_instance *instance; 1896 struct fusion_context *fusion; 1897 struct MR_PRIV_DEVICE *mr_device_priv_data; 1898 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1899 struct MR_LD_RAID *raid; 1900 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1901 1902 instance = megasas_lookup_instance(sdev->host->host_no); 1903 fusion = instance->ctrl_context; 1904 mr_device_priv_data = sdev->hostdata; 1905 1906 if (!fusion || !mr_device_priv_data) 1907 return; 1908 1909 if (MEGASAS_IS_LOGICAL(sdev)) { 1910 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1911 + sdev->id; 1912 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1913 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1914 if (ld >= instance->fw_supported_vd_count) 1915 return; 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1919 if (lim) 1920 lim->dma_alignment = 0x7; 1921 } 1922 1923 mr_device_priv_data->is_tm_capable = 1924 raid->capability.tmCapable; 1925 1926 if (!raid->flags.isEPD) 1927 sdev->no_write_same = 1; 1928 1929 } else if (instance->use_seqnum_jbod_fp) { 1930 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1931 sdev->id; 1932 pd_sync = (void *)fusion->pd_seq_sync 1933 [(instance->pd_seq_map_id - 1) & 1]; 1934 mr_device_priv_data->is_tm_capable = 1935 pd_sync->seq[pd_index].capability.tmCapable; 1936 } 1937 1938 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1939 /* 1940 * If FW provides a target reset timeout value, driver will use 1941 * it. If not set, fallback to default values. 1942 */ 1943 mr_device_priv_data->target_reset_tmo = 1944 min_t(u8, instance->max_reset_tmo, 1945 instance->tgt_prop->reset_tmo); 1946 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1947 } else { 1948 mr_device_priv_data->target_reset_tmo = 1949 MEGASAS_DEFAULT_TM_TIMEOUT; 1950 mr_device_priv_data->task_abort_tmo = 1951 MEGASAS_DEFAULT_TM_TIMEOUT; 1952 } 1953 } 1954 1955 /* 1956 * megasas_set_nvme_device_properties - 1957 * set nomerges=2 1958 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1959 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1960 * 1961 * MR firmware provides value in KB. Caller of this function converts 1962 * kb into bytes. 1963 * 1964 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1965 * MR firmware provides value 128 as (32 * 4K) = 128K. 1966 * 1967 * @sdev: scsi device 1968 * @max_io_size: maximum io transfer size 1969 * 1970 */ 1971 static inline void 1972 megasas_set_nvme_device_properties(struct scsi_device *sdev, 1973 struct queue_limits *lim, u32 max_io_size) 1974 { 1975 struct megasas_instance *instance; 1976 u32 mr_nvme_pg_size; 1977 1978 instance = (struct megasas_instance *)sdev->host->hostdata; 1979 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1980 MR_DEFAULT_NVME_PAGE_SIZE); 1981 1982 lim->max_hw_sectors = max_io_size / 512; 1983 lim->virt_boundary_mask = mr_nvme_pg_size - 1; 1984 1985 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); 1986 } 1987 1988 /* 1989 * megasas_set_fw_assisted_qd - 1990 * set device queue depth to can_queue 1991 * set device queue depth to fw assisted qd 1992 * 1993 * @sdev: scsi device 1994 * @is_target_prop true, if fw provided target properties. 1995 */ 1996 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, 1997 bool is_target_prop) 1998 { 1999 u8 interface_type; 2000 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 2001 u32 tgt_device_qd; 2002 struct megasas_instance *instance; 2003 struct MR_PRIV_DEVICE *mr_device_priv_data; 2004 2005 instance = megasas_lookup_instance(sdev->host->host_no); 2006 mr_device_priv_data = sdev->hostdata; 2007 interface_type = mr_device_priv_data->interface_type; 2008 2009 switch (interface_type) { 2010 case SAS_PD: 2011 device_qd = MEGASAS_SAS_QD; 2012 break; 2013 case SATA_PD: 2014 device_qd = MEGASAS_SATA_QD; 2015 break; 2016 case NVME_PD: 2017 device_qd = MEGASAS_NVME_QD; 2018 break; 2019 } 2020 2021 if (is_target_prop) { 2022 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 2023 if (tgt_device_qd) 2024 device_qd = min(instance->host->can_queue, 2025 (int)tgt_device_qd); 2026 } 2027 2028 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) 2029 device_qd = instance->host->can_queue; 2030 2031 scsi_change_queue_depth(sdev, device_qd); 2032 } 2033 2034 /* 2035 * megasas_set_static_target_properties - 2036 * Device property set by driver are static and it is not required to be 2037 * updated after OCR. 2038 * 2039 * set io timeout 2040 * set device queue depth 2041 * set nvme device properties. see - megasas_set_nvme_device_properties 2042 * 2043 * @sdev: scsi device 2044 * @is_target_prop true, if fw provided target properties. 2045 */ 2046 static void megasas_set_static_target_properties(struct scsi_device *sdev, 2047 struct queue_limits *lim, bool is_target_prop) 2048 { 2049 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 2050 struct megasas_instance *instance; 2051 2052 instance = megasas_lookup_instance(sdev->host->host_no); 2053 2054 /* 2055 * The RAID firmware may require extended timeouts. 2056 */ 2057 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 2058 2059 /* max_io_size_kb will be set to non zero for 2060 * nvme based vd and syspd. 2061 */ 2062 if (is_target_prop) 2063 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 2064 2065 if (instance->nvme_page_size && max_io_size_kb) 2066 megasas_set_nvme_device_properties(sdev, lim, 2067 max_io_size_kb << 10); 2068 2069 megasas_set_fw_assisted_qd(sdev, is_target_prop); 2070 } 2071 2072 2073 static int megasas_device_configure(struct scsi_device *sdev, 2074 struct queue_limits *lim) 2075 { 2076 u16 pd_index = 0; 2077 struct megasas_instance *instance; 2078 int ret_target_prop = DCMD_FAILED; 2079 bool is_target_prop = false; 2080 2081 instance = megasas_lookup_instance(sdev->host->host_no); 2082 if (instance->pd_list_not_supported) { 2083 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 2084 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2085 sdev->id; 2086 if (instance->pd_list[pd_index].driveState != 2087 MR_PD_STATE_SYSTEM) 2088 return -ENXIO; 2089 } 2090 } 2091 2092 mutex_lock(&instance->reset_mutex); 2093 /* Send DCMD to Firmware and cache the information */ 2094 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2095 megasas_get_pd_info(instance, sdev); 2096 2097 /* Some ventura firmware may not have instance->nvme_page_size set. 2098 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2099 */ 2100 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2101 ret_target_prop = megasas_get_target_prop(instance, sdev); 2102 2103 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2104 megasas_set_static_target_properties(sdev, lim, is_target_prop); 2105 2106 /* This sdev property may change post OCR */ 2107 megasas_set_dynamic_target_properties(sdev, lim, is_target_prop); 2108 2109 mutex_unlock(&instance->reset_mutex); 2110 2111 return 0; 2112 } 2113 2114 static int megasas_slave_alloc(struct scsi_device *sdev) 2115 { 2116 u16 pd_index = 0, ld_tgt_id; 2117 struct megasas_instance *instance ; 2118 struct MR_PRIV_DEVICE *mr_device_priv_data; 2119 2120 instance = megasas_lookup_instance(sdev->host->host_no); 2121 if (!MEGASAS_IS_LOGICAL(sdev)) { 2122 /* 2123 * Open the OS scan to the SYSTEM PD 2124 */ 2125 pd_index = 2126 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2127 sdev->id; 2128 if ((instance->pd_list_not_supported || 2129 instance->pd_list[pd_index].driveState == 2130 MR_PD_STATE_SYSTEM)) { 2131 goto scan_target; 2132 } 2133 return -ENXIO; 2134 } else if (!MEGASAS_IS_LUN_VALID(sdev)) { 2135 sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2136 return -ENXIO; 2137 } 2138 2139 scan_target: 2140 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2141 GFP_KERNEL); 2142 if (!mr_device_priv_data) 2143 return -ENOMEM; 2144 2145 if (MEGASAS_IS_LOGICAL(sdev)) { 2146 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2147 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE; 2148 if (megasas_dbg_lvl & LD_PD_DEBUG) 2149 sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id); 2150 } 2151 2152 sdev->hostdata = mr_device_priv_data; 2153 2154 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2155 instance->r1_ldio_hint_default); 2156 return 0; 2157 } 2158 2159 static void megasas_slave_destroy(struct scsi_device *sdev) 2160 { 2161 u16 ld_tgt_id; 2162 struct megasas_instance *instance; 2163 2164 instance = megasas_lookup_instance(sdev->host->host_no); 2165 2166 if (MEGASAS_IS_LOGICAL(sdev)) { 2167 if (!MEGASAS_IS_LUN_VALID(sdev)) { 2168 sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2169 return; 2170 } 2171 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2172 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; 2173 if (megasas_dbg_lvl & LD_PD_DEBUG) 2174 sdev_printk(KERN_INFO, sdev, 2175 "LD target ID %d removed from OS stack\n", ld_tgt_id); 2176 } 2177 2178 kfree(sdev->hostdata); 2179 sdev->hostdata = NULL; 2180 } 2181 2182 /* 2183 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2184 * kill adapter 2185 * @instance: Adapter soft state 2186 * 2187 */ 2188 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2189 { 2190 int i; 2191 struct megasas_cmd *cmd_mfi; 2192 struct megasas_cmd_fusion *cmd_fusion; 2193 struct fusion_context *fusion = instance->ctrl_context; 2194 2195 /* Find all outstanding ioctls */ 2196 if (fusion) { 2197 for (i = 0; i < instance->max_fw_cmds; i++) { 2198 cmd_fusion = fusion->cmd_list[i]; 2199 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2200 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2201 if (cmd_mfi->sync_cmd && 2202 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2203 cmd_mfi->frame->hdr.cmd_status = 2204 MFI_STAT_WRONG_STATE; 2205 megasas_complete_cmd(instance, 2206 cmd_mfi, DID_OK); 2207 } 2208 } 2209 } 2210 } else { 2211 for (i = 0; i < instance->max_fw_cmds; i++) { 2212 cmd_mfi = instance->cmd_list[i]; 2213 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2214 MFI_CMD_ABORT) 2215 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2216 } 2217 } 2218 } 2219 2220 2221 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2222 { 2223 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2224 dev_warn(&instance->pdev->dev, 2225 "Adapter already dead, skipping kill HBA\n"); 2226 return; 2227 } 2228 2229 /* Set critical error to block I/O & ioctls in case caller didn't */ 2230 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2231 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2232 msleep(1000); 2233 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2234 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2235 (instance->adapter_type != MFI_SERIES)) { 2236 if (!instance->requestorId) { 2237 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2238 /* Flush */ 2239 readl(&instance->reg_set->doorbell); 2240 } 2241 if (instance->requestorId && instance->peerIsPresent) 2242 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2243 } else { 2244 writel(MFI_STOP_ADP, 2245 &instance->reg_set->inbound_doorbell); 2246 } 2247 /* Complete outstanding ioctls when adapter is killed */ 2248 megasas_complete_outstanding_ioctls(instance); 2249 } 2250 2251 /** 2252 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2253 * restored to max value 2254 * @instance: Adapter soft state 2255 * 2256 */ 2257 void 2258 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2259 { 2260 unsigned long flags; 2261 2262 if (instance->flag & MEGASAS_FW_BUSY 2263 && time_after(jiffies, instance->last_time + 5 * HZ) 2264 && atomic_read(&instance->fw_outstanding) < 2265 instance->throttlequeuedepth + 1) { 2266 2267 spin_lock_irqsave(instance->host->host_lock, flags); 2268 instance->flag &= ~MEGASAS_FW_BUSY; 2269 2270 instance->host->can_queue = instance->cur_can_queue; 2271 spin_unlock_irqrestore(instance->host->host_lock, flags); 2272 } 2273 } 2274 2275 /** 2276 * megasas_complete_cmd_dpc - Returns FW's controller structure 2277 * @instance_addr: Address of adapter soft state 2278 * 2279 * Tasklet to complete cmds 2280 */ 2281 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2282 { 2283 u32 producer; 2284 u32 consumer; 2285 u32 context; 2286 struct megasas_cmd *cmd; 2287 struct megasas_instance *instance = 2288 (struct megasas_instance *)instance_addr; 2289 unsigned long flags; 2290 2291 /* If we have already declared adapter dead, donot complete cmds */ 2292 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2293 return; 2294 2295 spin_lock_irqsave(&instance->completion_lock, flags); 2296 2297 producer = le32_to_cpu(*instance->producer); 2298 consumer = le32_to_cpu(*instance->consumer); 2299 2300 while (consumer != producer) { 2301 context = le32_to_cpu(instance->reply_queue[consumer]); 2302 if (context >= instance->max_fw_cmds) { 2303 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2304 context); 2305 BUG(); 2306 } 2307 2308 cmd = instance->cmd_list[context]; 2309 2310 megasas_complete_cmd(instance, cmd, DID_OK); 2311 2312 consumer++; 2313 if (consumer == (instance->max_fw_cmds + 1)) { 2314 consumer = 0; 2315 } 2316 } 2317 2318 *instance->consumer = cpu_to_le32(producer); 2319 2320 spin_unlock_irqrestore(&instance->completion_lock, flags); 2321 2322 /* 2323 * Check if we can restore can_queue 2324 */ 2325 megasas_check_and_restore_queue_depth(instance); 2326 } 2327 2328 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2329 2330 /** 2331 * megasas_start_timer - Initializes sriov heartbeat timer object 2332 * @instance: Adapter soft state 2333 * 2334 */ 2335 void megasas_start_timer(struct megasas_instance *instance) 2336 { 2337 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2338 2339 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2340 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2341 add_timer(timer); 2342 } 2343 2344 static void 2345 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2346 2347 static void 2348 process_fw_state_change_wq(struct work_struct *work); 2349 2350 static void megasas_do_ocr(struct megasas_instance *instance) 2351 { 2352 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2353 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2354 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2355 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2356 } 2357 instance->instancet->disable_intr(instance); 2358 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2359 instance->issuepend_done = 0; 2360 2361 atomic_set(&instance->fw_outstanding, 0); 2362 megasas_internal_reset_defer_cmds(instance); 2363 process_fw_state_change_wq(&instance->work_init); 2364 } 2365 2366 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2367 int initial) 2368 { 2369 struct megasas_cmd *cmd; 2370 struct megasas_dcmd_frame *dcmd; 2371 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2372 dma_addr_t new_affiliation_111_h; 2373 int ld, retval = 0; 2374 u8 thisVf; 2375 2376 cmd = megasas_get_cmd(instance); 2377 2378 if (!cmd) { 2379 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2380 "Failed to get cmd for scsi%d\n", 2381 instance->host->host_no); 2382 return -ENOMEM; 2383 } 2384 2385 dcmd = &cmd->frame->dcmd; 2386 2387 if (!instance->vf_affiliation_111) { 2388 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2389 "affiliation for scsi%d\n", instance->host->host_no); 2390 megasas_return_cmd(instance, cmd); 2391 return -ENOMEM; 2392 } 2393 2394 if (initial) 2395 memset(instance->vf_affiliation_111, 0, 2396 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2397 else { 2398 new_affiliation_111 = 2399 dma_alloc_coherent(&instance->pdev->dev, 2400 sizeof(struct MR_LD_VF_AFFILIATION_111), 2401 &new_affiliation_111_h, GFP_KERNEL); 2402 if (!new_affiliation_111) { 2403 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2404 "memory for new affiliation for scsi%d\n", 2405 instance->host->host_no); 2406 megasas_return_cmd(instance, cmd); 2407 return -ENOMEM; 2408 } 2409 } 2410 2411 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2412 2413 dcmd->cmd = MFI_CMD_DCMD; 2414 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2415 dcmd->sge_count = 1; 2416 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2417 dcmd->timeout = 0; 2418 dcmd->pad_0 = 0; 2419 dcmd->data_xfer_len = 2420 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2421 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2422 2423 if (initial) 2424 dcmd->sgl.sge32[0].phys_addr = 2425 cpu_to_le32(instance->vf_affiliation_111_h); 2426 else 2427 dcmd->sgl.sge32[0].phys_addr = 2428 cpu_to_le32(new_affiliation_111_h); 2429 2430 dcmd->sgl.sge32[0].length = cpu_to_le32( 2431 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2432 2433 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2434 "scsi%d\n", instance->host->host_no); 2435 2436 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2437 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2438 " failed with status 0x%x for scsi%d\n", 2439 dcmd->cmd_status, instance->host->host_no); 2440 retval = 1; /* Do a scan if we couldn't get affiliation */ 2441 goto out; 2442 } 2443 2444 if (!initial) { 2445 thisVf = new_affiliation_111->thisVf; 2446 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2447 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2448 new_affiliation_111->map[ld].policy[thisVf]) { 2449 dev_warn(&instance->pdev->dev, "SR-IOV: " 2450 "Got new LD/VF affiliation for scsi%d\n", 2451 instance->host->host_no); 2452 memcpy(instance->vf_affiliation_111, 2453 new_affiliation_111, 2454 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2455 retval = 1; 2456 goto out; 2457 } 2458 } 2459 out: 2460 if (new_affiliation_111) { 2461 dma_free_coherent(&instance->pdev->dev, 2462 sizeof(struct MR_LD_VF_AFFILIATION_111), 2463 new_affiliation_111, 2464 new_affiliation_111_h); 2465 } 2466 2467 megasas_return_cmd(instance, cmd); 2468 2469 return retval; 2470 } 2471 2472 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2473 int initial) 2474 { 2475 struct megasas_cmd *cmd; 2476 struct megasas_dcmd_frame *dcmd; 2477 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2478 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2479 dma_addr_t new_affiliation_h; 2480 int i, j, retval = 0, found = 0, doscan = 0; 2481 u8 thisVf; 2482 2483 cmd = megasas_get_cmd(instance); 2484 2485 if (!cmd) { 2486 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2487 "Failed to get cmd for scsi%d\n", 2488 instance->host->host_no); 2489 return -ENOMEM; 2490 } 2491 2492 dcmd = &cmd->frame->dcmd; 2493 2494 if (!instance->vf_affiliation) { 2495 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2496 "affiliation for scsi%d\n", instance->host->host_no); 2497 megasas_return_cmd(instance, cmd); 2498 return -ENOMEM; 2499 } 2500 2501 if (initial) 2502 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2503 sizeof(struct MR_LD_VF_AFFILIATION)); 2504 else { 2505 new_affiliation = 2506 dma_alloc_coherent(&instance->pdev->dev, 2507 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2508 &new_affiliation_h, GFP_KERNEL); 2509 if (!new_affiliation) { 2510 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2511 "memory for new affiliation for scsi%d\n", 2512 instance->host->host_no); 2513 megasas_return_cmd(instance, cmd); 2514 return -ENOMEM; 2515 } 2516 } 2517 2518 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2519 2520 dcmd->cmd = MFI_CMD_DCMD; 2521 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2522 dcmd->sge_count = 1; 2523 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2524 dcmd->timeout = 0; 2525 dcmd->pad_0 = 0; 2526 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2527 sizeof(struct MR_LD_VF_AFFILIATION)); 2528 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2529 2530 if (initial) 2531 dcmd->sgl.sge32[0].phys_addr = 2532 cpu_to_le32(instance->vf_affiliation_h); 2533 else 2534 dcmd->sgl.sge32[0].phys_addr = 2535 cpu_to_le32(new_affiliation_h); 2536 2537 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2538 sizeof(struct MR_LD_VF_AFFILIATION)); 2539 2540 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2541 "scsi%d\n", instance->host->host_no); 2542 2543 2544 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2545 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2546 " failed with status 0x%x for scsi%d\n", 2547 dcmd->cmd_status, instance->host->host_no); 2548 retval = 1; /* Do a scan if we couldn't get affiliation */ 2549 goto out; 2550 } 2551 2552 if (!initial) { 2553 if (!new_affiliation->ldCount) { 2554 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2555 "affiliation for passive path for scsi%d\n", 2556 instance->host->host_no); 2557 retval = 1; 2558 goto out; 2559 } 2560 newmap = new_affiliation->map; 2561 savedmap = instance->vf_affiliation->map; 2562 thisVf = new_affiliation->thisVf; 2563 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2564 found = 0; 2565 for (j = 0; j < instance->vf_affiliation->ldCount; 2566 j++) { 2567 if (newmap->ref.targetId == 2568 savedmap->ref.targetId) { 2569 found = 1; 2570 if (newmap->policy[thisVf] != 2571 savedmap->policy[thisVf]) { 2572 doscan = 1; 2573 goto out; 2574 } 2575 } 2576 savedmap = (struct MR_LD_VF_MAP *) 2577 ((unsigned char *)savedmap + 2578 savedmap->size); 2579 } 2580 if (!found && newmap->policy[thisVf] != 2581 MR_LD_ACCESS_HIDDEN) { 2582 doscan = 1; 2583 goto out; 2584 } 2585 newmap = (struct MR_LD_VF_MAP *) 2586 ((unsigned char *)newmap + newmap->size); 2587 } 2588 2589 newmap = new_affiliation->map; 2590 savedmap = instance->vf_affiliation->map; 2591 2592 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2593 found = 0; 2594 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2595 if (savedmap->ref.targetId == 2596 newmap->ref.targetId) { 2597 found = 1; 2598 if (savedmap->policy[thisVf] != 2599 newmap->policy[thisVf]) { 2600 doscan = 1; 2601 goto out; 2602 } 2603 } 2604 newmap = (struct MR_LD_VF_MAP *) 2605 ((unsigned char *)newmap + 2606 newmap->size); 2607 } 2608 if (!found && savedmap->policy[thisVf] != 2609 MR_LD_ACCESS_HIDDEN) { 2610 doscan = 1; 2611 goto out; 2612 } 2613 savedmap = (struct MR_LD_VF_MAP *) 2614 ((unsigned char *)savedmap + 2615 savedmap->size); 2616 } 2617 } 2618 out: 2619 if (doscan) { 2620 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2621 "affiliation for scsi%d\n", instance->host->host_no); 2622 memcpy(instance->vf_affiliation, new_affiliation, 2623 new_affiliation->size); 2624 retval = 1; 2625 } 2626 2627 if (new_affiliation) 2628 dma_free_coherent(&instance->pdev->dev, 2629 (MAX_LOGICAL_DRIVES + 1) * 2630 sizeof(struct MR_LD_VF_AFFILIATION), 2631 new_affiliation, new_affiliation_h); 2632 megasas_return_cmd(instance, cmd); 2633 2634 return retval; 2635 } 2636 2637 /* This function will get the current SR-IOV LD/VF affiliation */ 2638 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2639 int initial) 2640 { 2641 int retval; 2642 2643 if (instance->PlasmaFW111) 2644 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2645 else 2646 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2647 return retval; 2648 } 2649 2650 /* This function will tell FW to start the SR-IOV heartbeat */ 2651 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2652 int initial) 2653 { 2654 struct megasas_cmd *cmd; 2655 struct megasas_dcmd_frame *dcmd; 2656 int retval = 0; 2657 2658 cmd = megasas_get_cmd(instance); 2659 2660 if (!cmd) { 2661 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2662 "Failed to get cmd for scsi%d\n", 2663 instance->host->host_no); 2664 return -ENOMEM; 2665 } 2666 2667 dcmd = &cmd->frame->dcmd; 2668 2669 if (initial) { 2670 instance->hb_host_mem = 2671 dma_alloc_coherent(&instance->pdev->dev, 2672 sizeof(struct MR_CTRL_HB_HOST_MEM), 2673 &instance->hb_host_mem_h, 2674 GFP_KERNEL); 2675 if (!instance->hb_host_mem) { 2676 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2677 " memory for heartbeat host memory for scsi%d\n", 2678 instance->host->host_no); 2679 retval = -ENOMEM; 2680 goto out; 2681 } 2682 } 2683 2684 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2685 2686 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2687 dcmd->cmd = MFI_CMD_DCMD; 2688 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2689 dcmd->sge_count = 1; 2690 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2691 dcmd->timeout = 0; 2692 dcmd->pad_0 = 0; 2693 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2694 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2695 2696 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2697 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2698 2699 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2700 instance->host->host_no); 2701 2702 if ((instance->adapter_type != MFI_SERIES) && 2703 !instance->mask_interrupts) 2704 retval = megasas_issue_blocked_cmd(instance, cmd, 2705 MEGASAS_ROUTINE_WAIT_TIME_VF); 2706 else 2707 retval = megasas_issue_polled(instance, cmd); 2708 2709 if (retval) { 2710 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2711 "_MEM_ALLOC DCMD %s for scsi%d\n", 2712 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2713 "timed out" : "failed", instance->host->host_no); 2714 retval = 1; 2715 } 2716 2717 out: 2718 megasas_return_cmd(instance, cmd); 2719 2720 return retval; 2721 } 2722 2723 /* Handler for SR-IOV heartbeat */ 2724 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2725 { 2726 struct megasas_instance *instance = 2727 from_timer(instance, t, sriov_heartbeat_timer); 2728 2729 if (instance->hb_host_mem->HB.fwCounter != 2730 instance->hb_host_mem->HB.driverCounter) { 2731 instance->hb_host_mem->HB.driverCounter = 2732 instance->hb_host_mem->HB.fwCounter; 2733 mod_timer(&instance->sriov_heartbeat_timer, 2734 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2735 } else { 2736 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2737 "completed for scsi%d\n", instance->host->host_no); 2738 schedule_work(&instance->work_init); 2739 } 2740 } 2741 2742 /** 2743 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2744 * @instance: Adapter soft state 2745 * 2746 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2747 * complete all its outstanding commands. Returns error if one or more IOs 2748 * are pending after this time period. It also marks the controller dead. 2749 */ 2750 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2751 { 2752 int i, sl, outstanding; 2753 u32 reset_index; 2754 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2755 unsigned long flags; 2756 struct list_head clist_local; 2757 struct megasas_cmd *reset_cmd; 2758 u32 fw_state; 2759 2760 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2761 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2762 __func__, __LINE__); 2763 return FAILED; 2764 } 2765 2766 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2767 2768 INIT_LIST_HEAD(&clist_local); 2769 spin_lock_irqsave(&instance->hba_lock, flags); 2770 list_splice_init(&instance->internal_reset_pending_q, 2771 &clist_local); 2772 spin_unlock_irqrestore(&instance->hba_lock, flags); 2773 2774 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2775 for (i = 0; i < wait_time; i++) { 2776 msleep(1000); 2777 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2778 break; 2779 } 2780 2781 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2782 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2783 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2784 return FAILED; 2785 } 2786 2787 reset_index = 0; 2788 while (!list_empty(&clist_local)) { 2789 reset_cmd = list_entry((&clist_local)->next, 2790 struct megasas_cmd, list); 2791 list_del_init(&reset_cmd->list); 2792 if (reset_cmd->scmd) { 2793 reset_cmd->scmd->result = DID_REQUEUE << 16; 2794 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2795 reset_index, reset_cmd, 2796 reset_cmd->scmd->cmnd[0]); 2797 2798 scsi_done(reset_cmd->scmd); 2799 megasas_return_cmd(instance, reset_cmd); 2800 } else if (reset_cmd->sync_cmd) { 2801 dev_notice(&instance->pdev->dev, "%p synch cmds" 2802 "reset queue\n", 2803 reset_cmd); 2804 2805 reset_cmd->cmd_status_drv = DCMD_INIT; 2806 instance->instancet->fire_cmd(instance, 2807 reset_cmd->frame_phys_addr, 2808 0, instance->reg_set); 2809 } else { 2810 dev_notice(&instance->pdev->dev, "%p unexpected" 2811 "cmds lst\n", 2812 reset_cmd); 2813 } 2814 reset_index++; 2815 } 2816 2817 return SUCCESS; 2818 } 2819 2820 for (i = 0; i < resetwaittime; i++) { 2821 outstanding = atomic_read(&instance->fw_outstanding); 2822 2823 if (!outstanding) 2824 break; 2825 2826 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2827 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2828 "commands to complete\n",i,outstanding); 2829 /* 2830 * Call cmd completion routine. Cmd to be 2831 * be completed directly without depending on isr. 2832 */ 2833 megasas_complete_cmd_dpc((unsigned long)instance); 2834 } 2835 2836 msleep(1000); 2837 } 2838 2839 i = 0; 2840 outstanding = atomic_read(&instance->fw_outstanding); 2841 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2842 2843 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2844 goto no_outstanding; 2845 2846 if (instance->disableOnlineCtrlReset) 2847 goto kill_hba_and_failed; 2848 do { 2849 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2850 dev_info(&instance->pdev->dev, 2851 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2852 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2853 if (i == 3) 2854 goto kill_hba_and_failed; 2855 megasas_do_ocr(instance); 2856 2857 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2858 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2859 __func__, __LINE__); 2860 return FAILED; 2861 } 2862 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2863 __func__, __LINE__); 2864 2865 for (sl = 0; sl < 10; sl++) 2866 msleep(500); 2867 2868 outstanding = atomic_read(&instance->fw_outstanding); 2869 2870 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2871 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2872 goto no_outstanding; 2873 } 2874 i++; 2875 } while (i <= 3); 2876 2877 no_outstanding: 2878 2879 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2880 __func__, __LINE__); 2881 return SUCCESS; 2882 2883 kill_hba_and_failed: 2884 2885 /* Reset not supported, kill adapter */ 2886 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2887 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2888 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2889 atomic_read(&instance->fw_outstanding)); 2890 megasas_dump_pending_frames(instance); 2891 megaraid_sas_kill_hba(instance); 2892 2893 return FAILED; 2894 } 2895 2896 /** 2897 * megasas_generic_reset - Generic reset routine 2898 * @scmd: Mid-layer SCSI command 2899 * 2900 * This routine implements a generic reset handler for device, bus and host 2901 * reset requests. Device, bus and host specific reset handlers can use this 2902 * function after they do their specific tasks. 2903 */ 2904 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2905 { 2906 int ret_val; 2907 struct megasas_instance *instance; 2908 2909 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2910 2911 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2912 scmd->cmnd[0], scmd->retries); 2913 2914 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2915 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2916 return FAILED; 2917 } 2918 2919 ret_val = megasas_wait_for_outstanding(instance); 2920 if (ret_val == SUCCESS) 2921 dev_notice(&instance->pdev->dev, "reset successful\n"); 2922 else 2923 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2924 2925 return ret_val; 2926 } 2927 2928 /** 2929 * megasas_reset_timer - quiesce the adapter if required 2930 * @scmd: scsi cmnd 2931 * 2932 * Sets the FW busy flag and reduces the host->can_queue if the 2933 * cmd has not been completed within the timeout period. 2934 */ 2935 static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd) 2936 { 2937 struct megasas_instance *instance; 2938 unsigned long flags; 2939 2940 if (time_after(jiffies, scmd->jiffies_at_alloc + 2941 (scmd_timeout * 2) * HZ)) { 2942 return SCSI_EH_NOT_HANDLED; 2943 } 2944 2945 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2946 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2947 /* FW is busy, throttle IO */ 2948 spin_lock_irqsave(instance->host->host_lock, flags); 2949 2950 instance->host->can_queue = instance->throttlequeuedepth; 2951 instance->last_time = jiffies; 2952 instance->flag |= MEGASAS_FW_BUSY; 2953 2954 spin_unlock_irqrestore(instance->host->host_lock, flags); 2955 } 2956 return SCSI_EH_RESET_TIMER; 2957 } 2958 2959 /** 2960 * megasas_dump - This function will print hexdump of provided buffer. 2961 * @buf: Buffer to be dumped 2962 * @sz: Size in bytes 2963 * @format: Different formats of dumping e.g. format=n will 2964 * cause only 'n' 32 bit words to be dumped in a single 2965 * line. 2966 */ 2967 inline void 2968 megasas_dump(void *buf, int sz, int format) 2969 { 2970 int i; 2971 __le32 *buf_loc = (__le32 *)buf; 2972 2973 for (i = 0; i < (sz / sizeof(__le32)); i++) { 2974 if ((i % format) == 0) { 2975 if (i != 0) 2976 printk(KERN_CONT "\n"); 2977 printk(KERN_CONT "%08x: ", (i * 4)); 2978 } 2979 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); 2980 } 2981 printk(KERN_CONT "\n"); 2982 } 2983 2984 /** 2985 * megasas_dump_reg_set - This function will print hexdump of register set 2986 * @reg_set: Register set to be dumped 2987 */ 2988 inline void 2989 megasas_dump_reg_set(void __iomem *reg_set) 2990 { 2991 unsigned int i, sz = 256; 2992 u32 __iomem *reg = (u32 __iomem *)reg_set; 2993 2994 for (i = 0; i < (sz / sizeof(u32)); i++) 2995 printk("%08x: %08x\n", (i * 4), readl(®[i])); 2996 } 2997 2998 /** 2999 * megasas_dump_fusion_io - This function will print key details 3000 * of SCSI IO 3001 * @scmd: SCSI command pointer of SCSI IO 3002 */ 3003 void 3004 megasas_dump_fusion_io(struct scsi_cmnd *scmd) 3005 { 3006 struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv; 3007 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3008 struct megasas_instance *instance; 3009 3010 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3011 3012 scmd_printk(KERN_INFO, scmd, 3013 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", 3014 scmd, scmd->retries, scmd->allowed); 3015 scsi_print_command(scmd); 3016 3017 if (cmd) { 3018 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 3019 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); 3020 scmd_printk(KERN_INFO, scmd, 3021 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", 3022 req_desc->SCSIIO.RequestFlags, 3023 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, 3024 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); 3025 3026 printk(KERN_INFO "IO request frame:\n"); 3027 megasas_dump(cmd->io_request, 3028 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); 3029 printk(KERN_INFO "Chain frame:\n"); 3030 megasas_dump(cmd->sg_frame, 3031 instance->max_chain_frame_sz, 8); 3032 } 3033 3034 } 3035 3036 /* 3037 * megasas_dump_sys_regs - This function will dump system registers through 3038 * sysfs. 3039 * @reg_set: Pointer to System register set. 3040 * @buf: Buffer to which output is to be written. 3041 * @return: Number of bytes written to buffer. 3042 */ 3043 static inline ssize_t 3044 megasas_dump_sys_regs(void __iomem *reg_set, char *buf) 3045 { 3046 unsigned int i, sz = 256; 3047 int bytes_wrote = 0; 3048 char *loc = (char *)buf; 3049 u32 __iomem *reg = (u32 __iomem *)reg_set; 3050 3051 for (i = 0; i < sz / sizeof(u32); i++) { 3052 bytes_wrote += scnprintf(loc + bytes_wrote, 3053 PAGE_SIZE - bytes_wrote, 3054 "%08x: %08x\n", (i * 4), 3055 readl(®[i])); 3056 } 3057 return bytes_wrote; 3058 } 3059 3060 /** 3061 * megasas_reset_bus_host - Bus & host reset handler entry point 3062 * @scmd: Mid-layer SCSI command 3063 */ 3064 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 3065 { 3066 int ret; 3067 struct megasas_instance *instance; 3068 3069 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3070 3071 scmd_printk(KERN_INFO, scmd, 3072 "OCR is requested due to IO timeout!!\n"); 3073 3074 scmd_printk(KERN_INFO, scmd, 3075 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", 3076 scmd->device->host->shost_state, 3077 scsi_host_busy(scmd->device->host), 3078 atomic_read(&instance->fw_outstanding)); 3079 /* 3080 * First wait for all commands to complete 3081 */ 3082 if (instance->adapter_type == MFI_SERIES) { 3083 ret = megasas_generic_reset(scmd); 3084 } else { 3085 megasas_dump_fusion_io(scmd); 3086 ret = megasas_reset_fusion(scmd->device->host, 3087 SCSIIO_TIMEOUT_OCR); 3088 } 3089 3090 return ret; 3091 } 3092 3093 /** 3094 * megasas_task_abort - Issues task abort request to firmware 3095 * (supported only for fusion adapters) 3096 * @scmd: SCSI command pointer 3097 */ 3098 static int megasas_task_abort(struct scsi_cmnd *scmd) 3099 { 3100 int ret; 3101 struct megasas_instance *instance; 3102 3103 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3104 3105 if (instance->adapter_type != MFI_SERIES) 3106 ret = megasas_task_abort_fusion(scmd); 3107 else { 3108 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 3109 ret = FAILED; 3110 } 3111 3112 return ret; 3113 } 3114 3115 /** 3116 * megasas_reset_target: Issues target reset request to firmware 3117 * (supported only for fusion adapters) 3118 * @scmd: SCSI command pointer 3119 */ 3120 static int megasas_reset_target(struct scsi_cmnd *scmd) 3121 { 3122 int ret; 3123 struct megasas_instance *instance; 3124 3125 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3126 3127 if (instance->adapter_type != MFI_SERIES) 3128 ret = megasas_reset_target_fusion(scmd); 3129 else { 3130 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 3131 ret = FAILED; 3132 } 3133 3134 return ret; 3135 } 3136 3137 /** 3138 * megasas_bios_param - Returns disk geometry for a disk 3139 * @sdev: device handle 3140 * @bdev: block device 3141 * @capacity: drive capacity 3142 * @geom: geometry parameters 3143 */ 3144 static int 3145 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 3146 sector_t capacity, int geom[]) 3147 { 3148 int heads; 3149 int sectors; 3150 sector_t cylinders; 3151 unsigned long tmp; 3152 3153 /* Default heads (64) & sectors (32) */ 3154 heads = 64; 3155 sectors = 32; 3156 3157 tmp = heads * sectors; 3158 cylinders = capacity; 3159 3160 sector_div(cylinders, tmp); 3161 3162 /* 3163 * Handle extended translation size for logical drives > 1Gb 3164 */ 3165 3166 if (capacity >= 0x200000) { 3167 heads = 255; 3168 sectors = 63; 3169 tmp = heads*sectors; 3170 cylinders = capacity; 3171 sector_div(cylinders, tmp); 3172 } 3173 3174 geom[0] = heads; 3175 geom[1] = sectors; 3176 geom[2] = cylinders; 3177 3178 return 0; 3179 } 3180 3181 static void megasas_map_queues(struct Scsi_Host *shost) 3182 { 3183 struct megasas_instance *instance; 3184 int qoff = 0, offset; 3185 struct blk_mq_queue_map *map; 3186 3187 instance = (struct megasas_instance *)shost->hostdata; 3188 3189 if (shost->nr_hw_queues == 1) 3190 return; 3191 3192 offset = instance->low_latency_index_start; 3193 3194 /* Setup Default hctx */ 3195 map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3196 map->nr_queues = instance->msix_vectors - offset; 3197 map->queue_offset = 0; 3198 blk_mq_pci_map_queues(map, instance->pdev, offset); 3199 qoff += map->nr_queues; 3200 offset += map->nr_queues; 3201 3202 /* we never use READ queue, so can't cheat blk-mq */ 3203 shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0; 3204 3205 /* Setup Poll hctx */ 3206 map = &shost->tag_set.map[HCTX_TYPE_POLL]; 3207 map->nr_queues = instance->iopoll_q_count; 3208 if (map->nr_queues) { 3209 /* 3210 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3211 * affinity), so use the regular blk-mq cpu mapping 3212 */ 3213 map->queue_offset = qoff; 3214 blk_mq_map_queues(map); 3215 } 3216 } 3217 3218 static void megasas_aen_polling(struct work_struct *work); 3219 3220 /** 3221 * megasas_service_aen - Processes an event notification 3222 * @instance: Adapter soft state 3223 * @cmd: AEN command completed by the ISR 3224 * 3225 * For AEN, driver sends a command down to FW that is held by the FW till an 3226 * event occurs. When an event of interest occurs, FW completes the command 3227 * that it was previously holding. 3228 * 3229 * This routines sends SIGIO signal to processes that have registered with the 3230 * driver for AEN. 3231 */ 3232 static void 3233 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 3234 { 3235 unsigned long flags; 3236 3237 /* 3238 * Don't signal app if it is just an aborted previously registered aen 3239 */ 3240 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3241 spin_lock_irqsave(&poll_aen_lock, flags); 3242 megasas_poll_wait_aen = 1; 3243 spin_unlock_irqrestore(&poll_aen_lock, flags); 3244 wake_up(&megasas_poll_wait); 3245 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3246 } 3247 else 3248 cmd->abort_aen = 0; 3249 3250 instance->aen_cmd = NULL; 3251 3252 megasas_return_cmd(instance, cmd); 3253 3254 if ((instance->unload == 0) && 3255 ((instance->issuepend_done == 1))) { 3256 struct megasas_aen_event *ev; 3257 3258 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3259 if (!ev) { 3260 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3261 } else { 3262 ev->instance = instance; 3263 instance->ev = ev; 3264 INIT_DELAYED_WORK(&ev->hotplug_work, 3265 megasas_aen_polling); 3266 schedule_delayed_work(&ev->hotplug_work, 0); 3267 } 3268 } 3269 } 3270 3271 static ssize_t 3272 fw_crash_buffer_store(struct device *cdev, 3273 struct device_attribute *attr, const char *buf, size_t count) 3274 { 3275 struct Scsi_Host *shost = class_to_shost(cdev); 3276 struct megasas_instance *instance = 3277 (struct megasas_instance *) shost->hostdata; 3278 int val = 0; 3279 3280 if (kstrtoint(buf, 0, &val) != 0) 3281 return -EINVAL; 3282 3283 mutex_lock(&instance->crashdump_lock); 3284 instance->fw_crash_buffer_offset = val; 3285 mutex_unlock(&instance->crashdump_lock); 3286 return strlen(buf); 3287 } 3288 3289 static ssize_t 3290 fw_crash_buffer_show(struct device *cdev, 3291 struct device_attribute *attr, char *buf) 3292 { 3293 struct Scsi_Host *shost = class_to_shost(cdev); 3294 struct megasas_instance *instance = 3295 (struct megasas_instance *) shost->hostdata; 3296 u32 size; 3297 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3298 unsigned long chunk_left_bytes; 3299 unsigned long src_addr; 3300 u32 buff_offset; 3301 3302 mutex_lock(&instance->crashdump_lock); 3303 buff_offset = instance->fw_crash_buffer_offset; 3304 if (!instance->crash_dump_buf || 3305 !((instance->fw_crash_state == AVAILABLE) || 3306 (instance->fw_crash_state == COPYING))) { 3307 dev_err(&instance->pdev->dev, 3308 "Firmware crash dump is not available\n"); 3309 mutex_unlock(&instance->crashdump_lock); 3310 return -EINVAL; 3311 } 3312 3313 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3314 dev_err(&instance->pdev->dev, 3315 "Firmware crash dump offset is out of range\n"); 3316 mutex_unlock(&instance->crashdump_lock); 3317 return 0; 3318 } 3319 3320 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3321 chunk_left_bytes = dmachunk - (buff_offset % dmachunk); 3322 size = (size > chunk_left_bytes) ? chunk_left_bytes : size; 3323 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3324 3325 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3326 (buff_offset % dmachunk); 3327 memcpy(buf, (void *)src_addr, size); 3328 mutex_unlock(&instance->crashdump_lock); 3329 3330 return size; 3331 } 3332 3333 static ssize_t 3334 fw_crash_buffer_size_show(struct device *cdev, 3335 struct device_attribute *attr, char *buf) 3336 { 3337 struct Scsi_Host *shost = class_to_shost(cdev); 3338 struct megasas_instance *instance = 3339 (struct megasas_instance *) shost->hostdata; 3340 3341 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3342 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3343 } 3344 3345 static ssize_t 3346 fw_crash_state_store(struct device *cdev, 3347 struct device_attribute *attr, const char *buf, size_t count) 3348 { 3349 struct Scsi_Host *shost = class_to_shost(cdev); 3350 struct megasas_instance *instance = 3351 (struct megasas_instance *) shost->hostdata; 3352 int val = 0; 3353 3354 if (kstrtoint(buf, 0, &val) != 0) 3355 return -EINVAL; 3356 3357 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3358 dev_err(&instance->pdev->dev, "application updates invalid " 3359 "firmware crash state\n"); 3360 return -EINVAL; 3361 } 3362 3363 instance->fw_crash_state = val; 3364 3365 if ((val == COPIED) || (val == COPY_ERROR)) { 3366 mutex_lock(&instance->crashdump_lock); 3367 megasas_free_host_crash_buffer(instance); 3368 mutex_unlock(&instance->crashdump_lock); 3369 if (val == COPY_ERROR) 3370 dev_info(&instance->pdev->dev, "application failed to " 3371 "copy Firmware crash dump\n"); 3372 else 3373 dev_info(&instance->pdev->dev, "Firmware crash dump " 3374 "copied successfully\n"); 3375 } 3376 return strlen(buf); 3377 } 3378 3379 static ssize_t 3380 fw_crash_state_show(struct device *cdev, 3381 struct device_attribute *attr, char *buf) 3382 { 3383 struct Scsi_Host *shost = class_to_shost(cdev); 3384 struct megasas_instance *instance = 3385 (struct megasas_instance *) shost->hostdata; 3386 3387 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3388 } 3389 3390 static ssize_t 3391 page_size_show(struct device *cdev, 3392 struct device_attribute *attr, char *buf) 3393 { 3394 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3395 } 3396 3397 static ssize_t 3398 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3399 char *buf) 3400 { 3401 struct Scsi_Host *shost = class_to_shost(cdev); 3402 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3403 3404 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3405 } 3406 3407 static ssize_t 3408 fw_cmds_outstanding_show(struct device *cdev, 3409 struct device_attribute *attr, char *buf) 3410 { 3411 struct Scsi_Host *shost = class_to_shost(cdev); 3412 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3413 3414 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3415 } 3416 3417 static ssize_t 3418 enable_sdev_max_qd_show(struct device *cdev, 3419 struct device_attribute *attr, char *buf) 3420 { 3421 struct Scsi_Host *shost = class_to_shost(cdev); 3422 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3423 3424 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); 3425 } 3426 3427 static ssize_t 3428 enable_sdev_max_qd_store(struct device *cdev, 3429 struct device_attribute *attr, const char *buf, size_t count) 3430 { 3431 struct Scsi_Host *shost = class_to_shost(cdev); 3432 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3433 u32 val = 0; 3434 bool is_target_prop; 3435 int ret_target_prop = DCMD_FAILED; 3436 struct scsi_device *sdev; 3437 3438 if (kstrtou32(buf, 0, &val) != 0) { 3439 pr_err("megasas: could not set enable_sdev_max_qd\n"); 3440 return -EINVAL; 3441 } 3442 3443 mutex_lock(&instance->reset_mutex); 3444 if (val) 3445 instance->enable_sdev_max_qd = true; 3446 else 3447 instance->enable_sdev_max_qd = false; 3448 3449 shost_for_each_device(sdev, shost) { 3450 ret_target_prop = megasas_get_target_prop(instance, sdev); 3451 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 3452 megasas_set_fw_assisted_qd(sdev, is_target_prop); 3453 } 3454 mutex_unlock(&instance->reset_mutex); 3455 3456 return strlen(buf); 3457 } 3458 3459 static ssize_t 3460 dump_system_regs_show(struct device *cdev, 3461 struct device_attribute *attr, char *buf) 3462 { 3463 struct Scsi_Host *shost = class_to_shost(cdev); 3464 struct megasas_instance *instance = 3465 (struct megasas_instance *)shost->hostdata; 3466 3467 return megasas_dump_sys_regs(instance->reg_set, buf); 3468 } 3469 3470 static ssize_t 3471 raid_map_id_show(struct device *cdev, struct device_attribute *attr, 3472 char *buf) 3473 { 3474 struct Scsi_Host *shost = class_to_shost(cdev); 3475 struct megasas_instance *instance = 3476 (struct megasas_instance *)shost->hostdata; 3477 3478 return snprintf(buf, PAGE_SIZE, "%ld\n", 3479 (unsigned long)instance->map_id); 3480 } 3481 3482 static DEVICE_ATTR_RW(fw_crash_buffer); 3483 static DEVICE_ATTR_RO(fw_crash_buffer_size); 3484 static DEVICE_ATTR_RW(fw_crash_state); 3485 static DEVICE_ATTR_RO(page_size); 3486 static DEVICE_ATTR_RO(ldio_outstanding); 3487 static DEVICE_ATTR_RO(fw_cmds_outstanding); 3488 static DEVICE_ATTR_RW(enable_sdev_max_qd); 3489 static DEVICE_ATTR_RO(dump_system_regs); 3490 static DEVICE_ATTR_RO(raid_map_id); 3491 3492 static struct attribute *megaraid_host_attrs[] = { 3493 &dev_attr_fw_crash_buffer_size.attr, 3494 &dev_attr_fw_crash_buffer.attr, 3495 &dev_attr_fw_crash_state.attr, 3496 &dev_attr_page_size.attr, 3497 &dev_attr_ldio_outstanding.attr, 3498 &dev_attr_fw_cmds_outstanding.attr, 3499 &dev_attr_enable_sdev_max_qd.attr, 3500 &dev_attr_dump_system_regs.attr, 3501 &dev_attr_raid_map_id.attr, 3502 NULL, 3503 }; 3504 3505 ATTRIBUTE_GROUPS(megaraid_host); 3506 3507 /* 3508 * Scsi host template for megaraid_sas driver 3509 */ 3510 static const struct scsi_host_template megasas_template = { 3511 3512 .module = THIS_MODULE, 3513 .name = "Avago SAS based MegaRAID driver", 3514 .proc_name = "megaraid_sas", 3515 .device_configure = megasas_device_configure, 3516 .slave_alloc = megasas_slave_alloc, 3517 .slave_destroy = megasas_slave_destroy, 3518 .queuecommand = megasas_queue_command, 3519 .eh_target_reset_handler = megasas_reset_target, 3520 .eh_abort_handler = megasas_task_abort, 3521 .eh_host_reset_handler = megasas_reset_bus_host, 3522 .eh_timed_out = megasas_reset_timer, 3523 .shost_groups = megaraid_host_groups, 3524 .bios_param = megasas_bios_param, 3525 .map_queues = megasas_map_queues, 3526 .mq_poll = megasas_blk_mq_poll, 3527 .change_queue_depth = scsi_change_queue_depth, 3528 .max_segment_size = 0xffffffff, 3529 .cmd_size = sizeof(struct megasas_cmd_priv), 3530 }; 3531 3532 /** 3533 * megasas_complete_int_cmd - Completes an internal command 3534 * @instance: Adapter soft state 3535 * @cmd: Command to be completed 3536 * 3537 * The megasas_issue_blocked_cmd() function waits for a command to complete 3538 * after it issues a command. This function wakes up that waiting routine by 3539 * calling wake_up() on the wait queue. 3540 */ 3541 static void 3542 megasas_complete_int_cmd(struct megasas_instance *instance, 3543 struct megasas_cmd *cmd) 3544 { 3545 if (cmd->cmd_status_drv == DCMD_INIT) 3546 cmd->cmd_status_drv = 3547 (cmd->frame->io.cmd_status == MFI_STAT_OK) ? 3548 DCMD_SUCCESS : DCMD_FAILED; 3549 3550 wake_up(&instance->int_cmd_wait_q); 3551 } 3552 3553 /** 3554 * megasas_complete_abort - Completes aborting a command 3555 * @instance: Adapter soft state 3556 * @cmd: Cmd that was issued to abort another cmd 3557 * 3558 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3559 * after it issues an abort on a previously issued command. This function 3560 * wakes up all functions waiting on the same wait queue. 3561 */ 3562 static void 3563 megasas_complete_abort(struct megasas_instance *instance, 3564 struct megasas_cmd *cmd) 3565 { 3566 if (cmd->sync_cmd) { 3567 cmd->sync_cmd = 0; 3568 cmd->cmd_status_drv = DCMD_SUCCESS; 3569 wake_up(&instance->abort_cmd_wait_q); 3570 } 3571 } 3572 3573 static void 3574 megasas_set_ld_removed_by_fw(struct megasas_instance *instance) 3575 { 3576 uint i; 3577 3578 for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) { 3579 if (instance->ld_ids_prev[i] != 0xff && 3580 instance->ld_ids_from_raidmap[i] == 0xff) { 3581 if (megasas_dbg_lvl & LD_PD_DEBUG) 3582 dev_info(&instance->pdev->dev, 3583 "LD target ID %d removed from RAID map\n", i); 3584 instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED; 3585 } 3586 } 3587 } 3588 3589 /** 3590 * megasas_complete_cmd - Completes a command 3591 * @instance: Adapter soft state 3592 * @cmd: Command to be completed 3593 * @alt_status: If non-zero, use this value as status to 3594 * SCSI mid-layer instead of the value returned 3595 * by the FW. This should be used if caller wants 3596 * an alternate status (as in the case of aborted 3597 * commands) 3598 */ 3599 void 3600 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3601 u8 alt_status) 3602 { 3603 int exception = 0; 3604 struct megasas_header *hdr = &cmd->frame->hdr; 3605 unsigned long flags; 3606 struct fusion_context *fusion = instance->ctrl_context; 3607 u32 opcode, status; 3608 3609 /* flag for the retry reset */ 3610 cmd->retry_for_fw_reset = 0; 3611 3612 if (cmd->scmd) 3613 megasas_priv(cmd->scmd)->cmd_priv = NULL; 3614 3615 switch (hdr->cmd) { 3616 case MFI_CMD_INVALID: 3617 /* Some older 1068 controller FW may keep a pended 3618 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3619 when booting the kdump kernel. Ignore this command to 3620 prevent a kernel panic on shutdown of the kdump kernel. */ 3621 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3622 "completed\n"); 3623 dev_warn(&instance->pdev->dev, "If you have a controller " 3624 "other than PERC5, please upgrade your firmware\n"); 3625 break; 3626 case MFI_CMD_PD_SCSI_IO: 3627 case MFI_CMD_LD_SCSI_IO: 3628 3629 /* 3630 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3631 * issued either through an IO path or an IOCTL path. If it 3632 * was via IOCTL, we will send it to internal completion. 3633 */ 3634 if (cmd->sync_cmd) { 3635 cmd->sync_cmd = 0; 3636 megasas_complete_int_cmd(instance, cmd); 3637 break; 3638 } 3639 fallthrough; 3640 3641 case MFI_CMD_LD_READ: 3642 case MFI_CMD_LD_WRITE: 3643 3644 if (alt_status) { 3645 cmd->scmd->result = alt_status << 16; 3646 exception = 1; 3647 } 3648 3649 if (exception) { 3650 3651 atomic_dec(&instance->fw_outstanding); 3652 3653 scsi_dma_unmap(cmd->scmd); 3654 scsi_done(cmd->scmd); 3655 megasas_return_cmd(instance, cmd); 3656 3657 break; 3658 } 3659 3660 switch (hdr->cmd_status) { 3661 3662 case MFI_STAT_OK: 3663 cmd->scmd->result = DID_OK << 16; 3664 break; 3665 3666 case MFI_STAT_SCSI_IO_FAILED: 3667 case MFI_STAT_LD_INIT_IN_PROGRESS: 3668 cmd->scmd->result = 3669 (DID_ERROR << 16) | hdr->scsi_status; 3670 break; 3671 3672 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3673 3674 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3675 3676 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3677 memset(cmd->scmd->sense_buffer, 0, 3678 SCSI_SENSE_BUFFERSIZE); 3679 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3680 hdr->sense_len); 3681 } 3682 3683 break; 3684 3685 case MFI_STAT_LD_OFFLINE: 3686 case MFI_STAT_DEVICE_NOT_FOUND: 3687 cmd->scmd->result = DID_BAD_TARGET << 16; 3688 break; 3689 3690 default: 3691 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3692 hdr->cmd_status); 3693 cmd->scmd->result = DID_ERROR << 16; 3694 break; 3695 } 3696 3697 atomic_dec(&instance->fw_outstanding); 3698 3699 scsi_dma_unmap(cmd->scmd); 3700 scsi_done(cmd->scmd); 3701 megasas_return_cmd(instance, cmd); 3702 3703 break; 3704 3705 case MFI_CMD_SMP: 3706 case MFI_CMD_STP: 3707 case MFI_CMD_NVME: 3708 case MFI_CMD_TOOLBOX: 3709 megasas_complete_int_cmd(instance, cmd); 3710 break; 3711 3712 case MFI_CMD_DCMD: 3713 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3714 /* Check for LD map update */ 3715 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3716 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3717 fusion->fast_path_io = 0; 3718 spin_lock_irqsave(instance->host->host_lock, flags); 3719 status = cmd->frame->hdr.cmd_status; 3720 instance->map_update_cmd = NULL; 3721 if (status != MFI_STAT_OK) { 3722 if (status != MFI_STAT_NOT_FOUND) 3723 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3724 cmd->frame->hdr.cmd_status); 3725 else { 3726 megasas_return_cmd(instance, cmd); 3727 spin_unlock_irqrestore( 3728 instance->host->host_lock, 3729 flags); 3730 break; 3731 } 3732 } 3733 3734 megasas_return_cmd(instance, cmd); 3735 3736 /* 3737 * Set fast path IO to ZERO. 3738 * Validate Map will set proper value. 3739 * Meanwhile all IOs will go as LD IO. 3740 */ 3741 if (status == MFI_STAT_OK && 3742 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3743 instance->map_id++; 3744 fusion->fast_path_io = 1; 3745 } else { 3746 fusion->fast_path_io = 0; 3747 } 3748 3749 if (instance->adapter_type >= INVADER_SERIES) 3750 megasas_set_ld_removed_by_fw(instance); 3751 3752 megasas_sync_map_info(instance); 3753 spin_unlock_irqrestore(instance->host->host_lock, 3754 flags); 3755 3756 break; 3757 } 3758 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3759 opcode == MR_DCMD_CTRL_EVENT_GET) { 3760 spin_lock_irqsave(&poll_aen_lock, flags); 3761 megasas_poll_wait_aen = 0; 3762 spin_unlock_irqrestore(&poll_aen_lock, flags); 3763 } 3764 3765 /* FW has an updated PD sequence */ 3766 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3767 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3768 3769 spin_lock_irqsave(instance->host->host_lock, flags); 3770 status = cmd->frame->hdr.cmd_status; 3771 instance->jbod_seq_cmd = NULL; 3772 megasas_return_cmd(instance, cmd); 3773 3774 if (status == MFI_STAT_OK) { 3775 instance->pd_seq_map_id++; 3776 /* Re-register a pd sync seq num cmd */ 3777 if (megasas_sync_pd_seq_num(instance, true)) 3778 instance->use_seqnum_jbod_fp = false; 3779 } else 3780 instance->use_seqnum_jbod_fp = false; 3781 3782 spin_unlock_irqrestore(instance->host->host_lock, flags); 3783 break; 3784 } 3785 3786 /* 3787 * See if got an event notification 3788 */ 3789 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3790 megasas_service_aen(instance, cmd); 3791 else 3792 megasas_complete_int_cmd(instance, cmd); 3793 3794 break; 3795 3796 case MFI_CMD_ABORT: 3797 /* 3798 * Cmd issued to abort another cmd returned 3799 */ 3800 megasas_complete_abort(instance, cmd); 3801 break; 3802 3803 default: 3804 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3805 hdr->cmd); 3806 megasas_complete_int_cmd(instance, cmd); 3807 break; 3808 } 3809 } 3810 3811 /** 3812 * megasas_issue_pending_cmds_again - issue all pending cmds 3813 * in FW again because of the fw reset 3814 * @instance: Adapter soft state 3815 */ 3816 static inline void 3817 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3818 { 3819 struct megasas_cmd *cmd; 3820 struct list_head clist_local; 3821 union megasas_evt_class_locale class_locale; 3822 unsigned long flags; 3823 u32 seq_num; 3824 3825 INIT_LIST_HEAD(&clist_local); 3826 spin_lock_irqsave(&instance->hba_lock, flags); 3827 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3828 spin_unlock_irqrestore(&instance->hba_lock, flags); 3829 3830 while (!list_empty(&clist_local)) { 3831 cmd = list_entry((&clist_local)->next, 3832 struct megasas_cmd, list); 3833 list_del_init(&cmd->list); 3834 3835 if (cmd->sync_cmd || cmd->scmd) { 3836 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3837 "detected to be pending while HBA reset\n", 3838 cmd, cmd->scmd, cmd->sync_cmd); 3839 3840 cmd->retry_for_fw_reset++; 3841 3842 if (cmd->retry_for_fw_reset == 3) { 3843 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3844 "was tried multiple times during reset." 3845 "Shutting down the HBA\n", 3846 cmd, cmd->scmd, cmd->sync_cmd); 3847 instance->instancet->disable_intr(instance); 3848 atomic_set(&instance->fw_reset_no_pci_access, 1); 3849 megaraid_sas_kill_hba(instance); 3850 return; 3851 } 3852 } 3853 3854 if (cmd->sync_cmd == 1) { 3855 if (cmd->scmd) { 3856 dev_notice(&instance->pdev->dev, "unexpected" 3857 "cmd attached to internal command!\n"); 3858 } 3859 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3860 "on the internal reset queue," 3861 "issue it again.\n", cmd); 3862 cmd->cmd_status_drv = DCMD_INIT; 3863 instance->instancet->fire_cmd(instance, 3864 cmd->frame_phys_addr, 3865 0, instance->reg_set); 3866 } else if (cmd->scmd) { 3867 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3868 "detected on the internal queue, issue again.\n", 3869 cmd, cmd->scmd->cmnd[0]); 3870 3871 atomic_inc(&instance->fw_outstanding); 3872 instance->instancet->fire_cmd(instance, 3873 cmd->frame_phys_addr, 3874 cmd->frame_count-1, instance->reg_set); 3875 } else { 3876 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3877 "internal reset defer list while re-issue!!\n", 3878 cmd); 3879 } 3880 } 3881 3882 if (instance->aen_cmd) { 3883 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3884 megasas_return_cmd(instance, instance->aen_cmd); 3885 3886 instance->aen_cmd = NULL; 3887 } 3888 3889 /* 3890 * Initiate AEN (Asynchronous Event Notification) 3891 */ 3892 seq_num = instance->last_seq_num; 3893 class_locale.members.reserved = 0; 3894 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3895 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3896 3897 megasas_register_aen(instance, seq_num, class_locale.word); 3898 } 3899 3900 /* 3901 * Move the internal reset pending commands to a deferred queue. 3902 * 3903 * We move the commands pending at internal reset time to a 3904 * pending queue. This queue would be flushed after successful 3905 * completion of the internal reset sequence. if the internal reset 3906 * did not complete in time, the kernel reset handler would flush 3907 * these commands. 3908 */ 3909 static void 3910 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3911 { 3912 struct megasas_cmd *cmd; 3913 int i; 3914 u16 max_cmd = instance->max_fw_cmds; 3915 u32 defer_index; 3916 unsigned long flags; 3917 3918 defer_index = 0; 3919 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3920 for (i = 0; i < max_cmd; i++) { 3921 cmd = instance->cmd_list[i]; 3922 if (cmd->sync_cmd == 1 || cmd->scmd) { 3923 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3924 "on the defer queue as internal\n", 3925 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3926 3927 if (!list_empty(&cmd->list)) { 3928 dev_notice(&instance->pdev->dev, "ERROR while" 3929 " moving this cmd:%p, %d %p, it was" 3930 "discovered on some list?\n", 3931 cmd, cmd->sync_cmd, cmd->scmd); 3932 3933 list_del_init(&cmd->list); 3934 } 3935 defer_index++; 3936 list_add_tail(&cmd->list, 3937 &instance->internal_reset_pending_q); 3938 } 3939 } 3940 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3941 } 3942 3943 3944 static void 3945 process_fw_state_change_wq(struct work_struct *work) 3946 { 3947 struct megasas_instance *instance = 3948 container_of(work, struct megasas_instance, work_init); 3949 u32 wait; 3950 unsigned long flags; 3951 3952 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3953 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3954 atomic_read(&instance->adprecovery)); 3955 return ; 3956 } 3957 3958 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3959 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3960 "state, restarting it...\n"); 3961 3962 instance->instancet->disable_intr(instance); 3963 atomic_set(&instance->fw_outstanding, 0); 3964 3965 atomic_set(&instance->fw_reset_no_pci_access, 1); 3966 instance->instancet->adp_reset(instance, instance->reg_set); 3967 atomic_set(&instance->fw_reset_no_pci_access, 0); 3968 3969 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3970 "initiating next stage...\n"); 3971 3972 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3973 "state 2 starting...\n"); 3974 3975 /* waiting for about 20 second before start the second init */ 3976 for (wait = 0; wait < 30; wait++) { 3977 msleep(1000); 3978 } 3979 3980 if (megasas_transition_to_ready(instance, 1)) { 3981 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3982 3983 atomic_set(&instance->fw_reset_no_pci_access, 1); 3984 megaraid_sas_kill_hba(instance); 3985 return ; 3986 } 3987 3988 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3989 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3990 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3991 ) { 3992 *instance->consumer = *instance->producer; 3993 } else { 3994 *instance->consumer = 0; 3995 *instance->producer = 0; 3996 } 3997 3998 megasas_issue_init_mfi(instance); 3999 4000 spin_lock_irqsave(&instance->hba_lock, flags); 4001 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 4002 spin_unlock_irqrestore(&instance->hba_lock, flags); 4003 instance->instancet->enable_intr(instance); 4004 4005 megasas_issue_pending_cmds_again(instance); 4006 instance->issuepend_done = 1; 4007 } 4008 } 4009 4010 /** 4011 * megasas_deplete_reply_queue - Processes all completed commands 4012 * @instance: Adapter soft state 4013 * @alt_status: Alternate status to be returned to 4014 * SCSI mid-layer instead of the status 4015 * returned by the FW 4016 * Note: this must be called with hba lock held 4017 */ 4018 static int 4019 megasas_deplete_reply_queue(struct megasas_instance *instance, 4020 u8 alt_status) 4021 { 4022 u32 mfiStatus; 4023 u32 fw_state; 4024 4025 if (instance->instancet->check_reset(instance, instance->reg_set) == 1) 4026 return IRQ_HANDLED; 4027 4028 mfiStatus = instance->instancet->clear_intr(instance); 4029 if (mfiStatus == 0) { 4030 /* Hardware may not set outbound_intr_status in MSI-X mode */ 4031 if (!instance->msix_vectors) 4032 return IRQ_NONE; 4033 } 4034 4035 instance->mfiStatus = mfiStatus; 4036 4037 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 4038 fw_state = instance->instancet->read_fw_status_reg( 4039 instance) & MFI_STATE_MASK; 4040 4041 if (fw_state != MFI_STATE_FAULT) { 4042 dev_notice(&instance->pdev->dev, "fw state:%x\n", 4043 fw_state); 4044 } 4045 4046 if ((fw_state == MFI_STATE_FAULT) && 4047 (instance->disableOnlineCtrlReset == 0)) { 4048 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 4049 4050 if ((instance->pdev->device == 4051 PCI_DEVICE_ID_LSI_SAS1064R) || 4052 (instance->pdev->device == 4053 PCI_DEVICE_ID_DELL_PERC5) || 4054 (instance->pdev->device == 4055 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 4056 4057 *instance->consumer = 4058 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 4059 } 4060 4061 4062 instance->instancet->disable_intr(instance); 4063 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4064 instance->issuepend_done = 0; 4065 4066 atomic_set(&instance->fw_outstanding, 0); 4067 megasas_internal_reset_defer_cmds(instance); 4068 4069 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 4070 fw_state, atomic_read(&instance->adprecovery)); 4071 4072 schedule_work(&instance->work_init); 4073 return IRQ_HANDLED; 4074 4075 } else { 4076 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 4077 fw_state, instance->disableOnlineCtrlReset); 4078 } 4079 } 4080 4081 tasklet_schedule(&instance->isr_tasklet); 4082 return IRQ_HANDLED; 4083 } 4084 4085 /** 4086 * megasas_isr - isr entry point 4087 * @irq: IRQ number 4088 * @devp: IRQ context address 4089 */ 4090 static irqreturn_t megasas_isr(int irq, void *devp) 4091 { 4092 struct megasas_irq_context *irq_context = devp; 4093 struct megasas_instance *instance = irq_context->instance; 4094 unsigned long flags; 4095 irqreturn_t rc; 4096 4097 if (atomic_read(&instance->fw_reset_no_pci_access)) 4098 return IRQ_HANDLED; 4099 4100 spin_lock_irqsave(&instance->hba_lock, flags); 4101 rc = megasas_deplete_reply_queue(instance, DID_OK); 4102 spin_unlock_irqrestore(&instance->hba_lock, flags); 4103 4104 return rc; 4105 } 4106 4107 /** 4108 * megasas_transition_to_ready - Move the FW to READY state 4109 * @instance: Adapter soft state 4110 * @ocr: Adapter reset state 4111 * 4112 * During the initialization, FW passes can potentially be in any one of 4113 * several possible states. If the FW in operational, waiting-for-handshake 4114 * states, driver must take steps to bring it to ready state. Otherwise, it 4115 * has to wait for the ready state. 4116 */ 4117 int 4118 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 4119 { 4120 int i; 4121 u8 max_wait; 4122 u32 fw_state; 4123 u32 abs_state, curr_abs_state; 4124 4125 abs_state = instance->instancet->read_fw_status_reg(instance); 4126 fw_state = abs_state & MFI_STATE_MASK; 4127 4128 if (fw_state != MFI_STATE_READY) 4129 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 4130 " state\n"); 4131 4132 while (fw_state != MFI_STATE_READY) { 4133 4134 switch (fw_state) { 4135 4136 case MFI_STATE_FAULT: 4137 dev_printk(KERN_ERR, &instance->pdev->dev, 4138 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", 4139 abs_state & MFI_STATE_FAULT_CODE, 4140 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4141 if (ocr) { 4142 max_wait = MEGASAS_RESET_WAIT_TIME; 4143 break; 4144 } else { 4145 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4146 megasas_dump_reg_set(instance->reg_set); 4147 return -ENODEV; 4148 } 4149 4150 case MFI_STATE_WAIT_HANDSHAKE: 4151 /* 4152 * Set the CLR bit in inbound doorbell 4153 */ 4154 if ((instance->pdev->device == 4155 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4156 (instance->pdev->device == 4157 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4158 (instance->adapter_type != MFI_SERIES)) 4159 writel( 4160 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4161 &instance->reg_set->doorbell); 4162 else 4163 writel( 4164 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4165 &instance->reg_set->inbound_doorbell); 4166 4167 max_wait = MEGASAS_RESET_WAIT_TIME; 4168 break; 4169 4170 case MFI_STATE_BOOT_MESSAGE_PENDING: 4171 if ((instance->pdev->device == 4172 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4173 (instance->pdev->device == 4174 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4175 (instance->adapter_type != MFI_SERIES)) 4176 writel(MFI_INIT_HOTPLUG, 4177 &instance->reg_set->doorbell); 4178 else 4179 writel(MFI_INIT_HOTPLUG, 4180 &instance->reg_set->inbound_doorbell); 4181 4182 max_wait = MEGASAS_RESET_WAIT_TIME; 4183 break; 4184 4185 case MFI_STATE_OPERATIONAL: 4186 /* 4187 * Bring it to READY state; assuming max wait 10 secs 4188 */ 4189 instance->instancet->disable_intr(instance); 4190 if ((instance->pdev->device == 4191 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4192 (instance->pdev->device == 4193 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4194 (instance->adapter_type != MFI_SERIES)) { 4195 writel(MFI_RESET_FLAGS, 4196 &instance->reg_set->doorbell); 4197 4198 if (instance->adapter_type != MFI_SERIES) { 4199 for (i = 0; i < (10 * 1000); i += 20) { 4200 if (megasas_readl( 4201 instance, 4202 &instance-> 4203 reg_set-> 4204 doorbell) & 1) 4205 msleep(20); 4206 else 4207 break; 4208 } 4209 } 4210 } else 4211 writel(MFI_RESET_FLAGS, 4212 &instance->reg_set->inbound_doorbell); 4213 4214 max_wait = MEGASAS_RESET_WAIT_TIME; 4215 break; 4216 4217 case MFI_STATE_UNDEFINED: 4218 /* 4219 * This state should not last for more than 2 seconds 4220 */ 4221 max_wait = MEGASAS_RESET_WAIT_TIME; 4222 break; 4223 4224 case MFI_STATE_BB_INIT: 4225 max_wait = MEGASAS_RESET_WAIT_TIME; 4226 break; 4227 4228 case MFI_STATE_FW_INIT: 4229 max_wait = MEGASAS_RESET_WAIT_TIME; 4230 break; 4231 4232 case MFI_STATE_FW_INIT_2: 4233 max_wait = MEGASAS_RESET_WAIT_TIME; 4234 break; 4235 4236 case MFI_STATE_DEVICE_SCAN: 4237 max_wait = MEGASAS_RESET_WAIT_TIME; 4238 break; 4239 4240 case MFI_STATE_FLUSH_CACHE: 4241 max_wait = MEGASAS_RESET_WAIT_TIME; 4242 break; 4243 4244 default: 4245 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 4246 fw_state); 4247 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4248 megasas_dump_reg_set(instance->reg_set); 4249 return -ENODEV; 4250 } 4251 4252 /* 4253 * The cur_state should not last for more than max_wait secs 4254 */ 4255 for (i = 0; i < max_wait * 50; i++) { 4256 curr_abs_state = instance->instancet-> 4257 read_fw_status_reg(instance); 4258 4259 if (abs_state == curr_abs_state) { 4260 msleep(20); 4261 } else 4262 break; 4263 } 4264 4265 /* 4266 * Return error if fw_state hasn't changed after max_wait 4267 */ 4268 if (curr_abs_state == abs_state) { 4269 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 4270 "in %d secs\n", fw_state, max_wait); 4271 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4272 megasas_dump_reg_set(instance->reg_set); 4273 return -ENODEV; 4274 } 4275 4276 abs_state = curr_abs_state; 4277 fw_state = curr_abs_state & MFI_STATE_MASK; 4278 } 4279 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 4280 4281 return 0; 4282 } 4283 4284 /** 4285 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 4286 * @instance: Adapter soft state 4287 */ 4288 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 4289 { 4290 int i; 4291 u16 max_cmd = instance->max_mfi_cmds; 4292 struct megasas_cmd *cmd; 4293 4294 if (!instance->frame_dma_pool) 4295 return; 4296 4297 /* 4298 * Return all frames to pool 4299 */ 4300 for (i = 0; i < max_cmd; i++) { 4301 4302 cmd = instance->cmd_list[i]; 4303 4304 if (cmd->frame) 4305 dma_pool_free(instance->frame_dma_pool, cmd->frame, 4306 cmd->frame_phys_addr); 4307 4308 if (cmd->sense) 4309 dma_pool_free(instance->sense_dma_pool, cmd->sense, 4310 cmd->sense_phys_addr); 4311 } 4312 4313 /* 4314 * Now destroy the pool itself 4315 */ 4316 dma_pool_destroy(instance->frame_dma_pool); 4317 dma_pool_destroy(instance->sense_dma_pool); 4318 4319 instance->frame_dma_pool = NULL; 4320 instance->sense_dma_pool = NULL; 4321 } 4322 4323 /** 4324 * megasas_create_frame_pool - Creates DMA pool for cmd frames 4325 * @instance: Adapter soft state 4326 * 4327 * Each command packet has an embedded DMA memory buffer that is used for 4328 * filling MFI frame and the SG list that immediately follows the frame. This 4329 * function creates those DMA memory buffers for each command packet by using 4330 * PCI pool facility. 4331 */ 4332 static int megasas_create_frame_pool(struct megasas_instance *instance) 4333 { 4334 int i; 4335 u16 max_cmd; 4336 u32 frame_count; 4337 struct megasas_cmd *cmd; 4338 4339 max_cmd = instance->max_mfi_cmds; 4340 4341 /* 4342 * For MFI controllers. 4343 * max_num_sge = 60 4344 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4345 * Total 960 byte (15 MFI frame of 64 byte) 4346 * 4347 * Fusion adapter require only 3 extra frame. 4348 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4349 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4350 * Total 192 byte (3 MFI frame of 64 byte) 4351 */ 4352 frame_count = (instance->adapter_type == MFI_SERIES) ? 4353 (15 + 1) : (3 + 1); 4354 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4355 /* 4356 * Use DMA pool facility provided by PCI layer 4357 */ 4358 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4359 &instance->pdev->dev, 4360 instance->mfi_frame_size, 256, 0); 4361 4362 if (!instance->frame_dma_pool) { 4363 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4364 return -ENOMEM; 4365 } 4366 4367 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4368 &instance->pdev->dev, 128, 4369 4, 0); 4370 4371 if (!instance->sense_dma_pool) { 4372 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4373 4374 dma_pool_destroy(instance->frame_dma_pool); 4375 instance->frame_dma_pool = NULL; 4376 4377 return -ENOMEM; 4378 } 4379 4380 /* 4381 * Allocate and attach a frame to each of the commands in cmd_list. 4382 * By making cmd->index as the context instead of the &cmd, we can 4383 * always use 32bit context regardless of the architecture 4384 */ 4385 for (i = 0; i < max_cmd; i++) { 4386 4387 cmd = instance->cmd_list[i]; 4388 4389 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4390 GFP_KERNEL, &cmd->frame_phys_addr); 4391 4392 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4393 GFP_KERNEL, &cmd->sense_phys_addr); 4394 4395 /* 4396 * megasas_teardown_frame_pool() takes care of freeing 4397 * whatever has been allocated 4398 */ 4399 if (!cmd->frame || !cmd->sense) { 4400 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4401 megasas_teardown_frame_pool(instance); 4402 return -ENOMEM; 4403 } 4404 4405 cmd->frame->io.context = cpu_to_le32(cmd->index); 4406 cmd->frame->io.pad_0 = 0; 4407 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4408 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4409 } 4410 4411 return 0; 4412 } 4413 4414 /** 4415 * megasas_free_cmds - Free all the cmds in the free cmd pool 4416 * @instance: Adapter soft state 4417 */ 4418 void megasas_free_cmds(struct megasas_instance *instance) 4419 { 4420 int i; 4421 4422 /* First free the MFI frame pool */ 4423 megasas_teardown_frame_pool(instance); 4424 4425 /* Free all the commands in the cmd_list */ 4426 for (i = 0; i < instance->max_mfi_cmds; i++) 4427 4428 kfree(instance->cmd_list[i]); 4429 4430 /* Free the cmd_list buffer itself */ 4431 kfree(instance->cmd_list); 4432 instance->cmd_list = NULL; 4433 4434 INIT_LIST_HEAD(&instance->cmd_pool); 4435 } 4436 4437 /** 4438 * megasas_alloc_cmds - Allocates the command packets 4439 * @instance: Adapter soft state 4440 * 4441 * Each command that is issued to the FW, whether IO commands from the OS or 4442 * internal commands like IOCTLs, are wrapped in local data structure called 4443 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4444 * the FW. 4445 * 4446 * Each frame has a 32-bit field called context (tag). This context is used 4447 * to get back the megasas_cmd from the frame when a frame gets completed in 4448 * the ISR. Typically the address of the megasas_cmd itself would be used as 4449 * the context. But we wanted to keep the differences between 32 and 64 bit 4450 * systems to the mininum. We always use 32 bit integers for the context. In 4451 * this driver, the 32 bit values are the indices into an array cmd_list. 4452 * This array is used only to look up the megasas_cmd given the context. The 4453 * free commands themselves are maintained in a linked list called cmd_pool. 4454 */ 4455 int megasas_alloc_cmds(struct megasas_instance *instance) 4456 { 4457 int i; 4458 int j; 4459 u16 max_cmd; 4460 struct megasas_cmd *cmd; 4461 4462 max_cmd = instance->max_mfi_cmds; 4463 4464 /* 4465 * instance->cmd_list is an array of struct megasas_cmd pointers. 4466 * Allocate the dynamic array first and then allocate individual 4467 * commands. 4468 */ 4469 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4470 4471 if (!instance->cmd_list) { 4472 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4473 return -ENOMEM; 4474 } 4475 4476 for (i = 0; i < max_cmd; i++) { 4477 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4478 GFP_KERNEL); 4479 4480 if (!instance->cmd_list[i]) { 4481 4482 for (j = 0; j < i; j++) 4483 kfree(instance->cmd_list[j]); 4484 4485 kfree(instance->cmd_list); 4486 instance->cmd_list = NULL; 4487 4488 return -ENOMEM; 4489 } 4490 } 4491 4492 for (i = 0; i < max_cmd; i++) { 4493 cmd = instance->cmd_list[i]; 4494 memset(cmd, 0, sizeof(struct megasas_cmd)); 4495 cmd->index = i; 4496 cmd->scmd = NULL; 4497 cmd->instance = instance; 4498 4499 list_add_tail(&cmd->list, &instance->cmd_pool); 4500 } 4501 4502 /* 4503 * Create a frame pool and assign one frame to each cmd 4504 */ 4505 if (megasas_create_frame_pool(instance)) { 4506 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4507 megasas_free_cmds(instance); 4508 return -ENOMEM; 4509 } 4510 4511 return 0; 4512 } 4513 4514 /* 4515 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4516 * @instance: Adapter soft state 4517 * 4518 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4519 * or FW is not under OCR. 4520 */ 4521 inline int 4522 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4523 4524 if (instance->adapter_type == MFI_SERIES) 4525 return KILL_ADAPTER; 4526 else if (instance->unload || 4527 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, 4528 &instance->reset_flags)) 4529 return IGNORE_TIMEOUT; 4530 else 4531 return INITIATE_OCR; 4532 } 4533 4534 static void 4535 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4536 { 4537 int ret; 4538 struct megasas_cmd *cmd; 4539 struct megasas_dcmd_frame *dcmd; 4540 4541 struct MR_PRIV_DEVICE *mr_device_priv_data; 4542 u16 device_id = 0; 4543 4544 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4545 cmd = megasas_get_cmd(instance); 4546 4547 if (!cmd) { 4548 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4549 return; 4550 } 4551 4552 dcmd = &cmd->frame->dcmd; 4553 4554 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4555 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4556 4557 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4558 dcmd->cmd = MFI_CMD_DCMD; 4559 dcmd->cmd_status = 0xFF; 4560 dcmd->sge_count = 1; 4561 dcmd->flags = MFI_FRAME_DIR_READ; 4562 dcmd->timeout = 0; 4563 dcmd->pad_0 = 0; 4564 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4565 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4566 4567 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4568 sizeof(struct MR_PD_INFO)); 4569 4570 if ((instance->adapter_type != MFI_SERIES) && 4571 !instance->mask_interrupts) 4572 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4573 else 4574 ret = megasas_issue_polled(instance, cmd); 4575 4576 switch (ret) { 4577 case DCMD_SUCCESS: 4578 mr_device_priv_data = sdev->hostdata; 4579 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4580 mr_device_priv_data->interface_type = 4581 instance->pd_info->state.ddf.pdType.intf; 4582 break; 4583 4584 case DCMD_TIMEOUT: 4585 4586 switch (dcmd_timeout_ocr_possible(instance)) { 4587 case INITIATE_OCR: 4588 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4589 mutex_unlock(&instance->reset_mutex); 4590 megasas_reset_fusion(instance->host, 4591 MFI_IO_TIMEOUT_OCR); 4592 mutex_lock(&instance->reset_mutex); 4593 break; 4594 case KILL_ADAPTER: 4595 megaraid_sas_kill_hba(instance); 4596 break; 4597 case IGNORE_TIMEOUT: 4598 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4599 __func__, __LINE__); 4600 break; 4601 } 4602 4603 break; 4604 } 4605 4606 if (ret != DCMD_TIMEOUT) 4607 megasas_return_cmd(instance, cmd); 4608 4609 return; 4610 } 4611 /* 4612 * megasas_get_pd_list_info - Returns FW's pd_list structure 4613 * @instance: Adapter soft state 4614 * @pd_list: pd_list structure 4615 * 4616 * Issues an internal command (DCMD) to get the FW's controller PD 4617 * list structure. This information is mainly used to find out SYSTEM 4618 * supported by the FW. 4619 */ 4620 static int 4621 megasas_get_pd_list(struct megasas_instance *instance) 4622 { 4623 int ret = 0, pd_index = 0; 4624 struct megasas_cmd *cmd; 4625 struct megasas_dcmd_frame *dcmd; 4626 struct MR_PD_LIST *ci; 4627 struct MR_PD_ADDRESS *pd_addr; 4628 4629 if (instance->pd_list_not_supported) { 4630 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4631 "not supported by firmware\n"); 4632 return ret; 4633 } 4634 4635 ci = instance->pd_list_buf; 4636 4637 cmd = megasas_get_cmd(instance); 4638 4639 if (!cmd) { 4640 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4641 return -ENOMEM; 4642 } 4643 4644 dcmd = &cmd->frame->dcmd; 4645 4646 memset(ci, 0, sizeof(*ci)); 4647 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4648 4649 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4650 dcmd->mbox.b[1] = 0; 4651 dcmd->cmd = MFI_CMD_DCMD; 4652 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4653 dcmd->sge_count = 1; 4654 dcmd->flags = MFI_FRAME_DIR_READ; 4655 dcmd->timeout = 0; 4656 dcmd->pad_0 = 0; 4657 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4658 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4659 4660 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4661 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4662 4663 if ((instance->adapter_type != MFI_SERIES) && 4664 !instance->mask_interrupts) 4665 ret = megasas_issue_blocked_cmd(instance, cmd, 4666 MFI_IO_TIMEOUT_SECS); 4667 else 4668 ret = megasas_issue_polled(instance, cmd); 4669 4670 switch (ret) { 4671 case DCMD_FAILED: 4672 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4673 "failed/not supported by firmware\n"); 4674 4675 if (instance->adapter_type != MFI_SERIES) 4676 megaraid_sas_kill_hba(instance); 4677 else 4678 instance->pd_list_not_supported = 1; 4679 break; 4680 case DCMD_TIMEOUT: 4681 4682 switch (dcmd_timeout_ocr_possible(instance)) { 4683 case INITIATE_OCR: 4684 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4685 /* 4686 * DCMD failed from AEN path. 4687 * AEN path already hold reset_mutex to avoid PCI access 4688 * while OCR is in progress. 4689 */ 4690 mutex_unlock(&instance->reset_mutex); 4691 megasas_reset_fusion(instance->host, 4692 MFI_IO_TIMEOUT_OCR); 4693 mutex_lock(&instance->reset_mutex); 4694 break; 4695 case KILL_ADAPTER: 4696 megaraid_sas_kill_hba(instance); 4697 break; 4698 case IGNORE_TIMEOUT: 4699 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4700 __func__, __LINE__); 4701 break; 4702 } 4703 4704 break; 4705 4706 case DCMD_SUCCESS: 4707 pd_addr = ci->addr; 4708 if (megasas_dbg_lvl & LD_PD_DEBUG) 4709 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", 4710 __func__, le32_to_cpu(ci->count)); 4711 4712 if ((le32_to_cpu(ci->count) > 4713 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4714 break; 4715 4716 memset(instance->local_pd_list, 0, 4717 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4718 4719 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4720 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4721 le16_to_cpu(pd_addr->deviceId); 4722 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4723 pd_addr->scsiDevType; 4724 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4725 MR_PD_STATE_SYSTEM; 4726 if (megasas_dbg_lvl & LD_PD_DEBUG) 4727 dev_info(&instance->pdev->dev, 4728 "PD%d: targetID: 0x%03x deviceType:0x%x\n", 4729 pd_index, le16_to_cpu(pd_addr->deviceId), 4730 pd_addr->scsiDevType); 4731 pd_addr++; 4732 } 4733 4734 memcpy(instance->pd_list, instance->local_pd_list, 4735 sizeof(instance->pd_list)); 4736 break; 4737 4738 } 4739 4740 if (ret != DCMD_TIMEOUT) 4741 megasas_return_cmd(instance, cmd); 4742 4743 return ret; 4744 } 4745 4746 /* 4747 * megasas_get_ld_list_info - Returns FW's ld_list structure 4748 * @instance: Adapter soft state 4749 * @ld_list: ld_list structure 4750 * 4751 * Issues an internal command (DCMD) to get the FW's controller PD 4752 * list structure. This information is mainly used to find out SYSTEM 4753 * supported by the FW. 4754 */ 4755 static int 4756 megasas_get_ld_list(struct megasas_instance *instance) 4757 { 4758 int ret = 0, ld_index = 0, ids = 0; 4759 struct megasas_cmd *cmd; 4760 struct megasas_dcmd_frame *dcmd; 4761 struct MR_LD_LIST *ci; 4762 dma_addr_t ci_h = 0; 4763 u32 ld_count; 4764 4765 ci = instance->ld_list_buf; 4766 ci_h = instance->ld_list_buf_h; 4767 4768 cmd = megasas_get_cmd(instance); 4769 4770 if (!cmd) { 4771 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4772 return -ENOMEM; 4773 } 4774 4775 dcmd = &cmd->frame->dcmd; 4776 4777 memset(ci, 0, sizeof(*ci)); 4778 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4779 4780 if (instance->supportmax256vd) 4781 dcmd->mbox.b[0] = 1; 4782 dcmd->cmd = MFI_CMD_DCMD; 4783 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4784 dcmd->sge_count = 1; 4785 dcmd->flags = MFI_FRAME_DIR_READ; 4786 dcmd->timeout = 0; 4787 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4788 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4789 dcmd->pad_0 = 0; 4790 4791 megasas_set_dma_settings(instance, dcmd, ci_h, 4792 sizeof(struct MR_LD_LIST)); 4793 4794 if ((instance->adapter_type != MFI_SERIES) && 4795 !instance->mask_interrupts) 4796 ret = megasas_issue_blocked_cmd(instance, cmd, 4797 MFI_IO_TIMEOUT_SECS); 4798 else 4799 ret = megasas_issue_polled(instance, cmd); 4800 4801 ld_count = le32_to_cpu(ci->ldCount); 4802 4803 switch (ret) { 4804 case DCMD_FAILED: 4805 megaraid_sas_kill_hba(instance); 4806 break; 4807 case DCMD_TIMEOUT: 4808 4809 switch (dcmd_timeout_ocr_possible(instance)) { 4810 case INITIATE_OCR: 4811 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4812 /* 4813 * DCMD failed from AEN path. 4814 * AEN path already hold reset_mutex to avoid PCI access 4815 * while OCR is in progress. 4816 */ 4817 mutex_unlock(&instance->reset_mutex); 4818 megasas_reset_fusion(instance->host, 4819 MFI_IO_TIMEOUT_OCR); 4820 mutex_lock(&instance->reset_mutex); 4821 break; 4822 case KILL_ADAPTER: 4823 megaraid_sas_kill_hba(instance); 4824 break; 4825 case IGNORE_TIMEOUT: 4826 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4827 __func__, __LINE__); 4828 break; 4829 } 4830 4831 break; 4832 4833 case DCMD_SUCCESS: 4834 if (megasas_dbg_lvl & LD_PD_DEBUG) 4835 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4836 __func__, ld_count); 4837 4838 if (ld_count > instance->fw_supported_vd_count) 4839 break; 4840 4841 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4842 4843 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4844 if (ci->ldList[ld_index].state != 0) { 4845 ids = ci->ldList[ld_index].ref.targetId; 4846 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4847 if (megasas_dbg_lvl & LD_PD_DEBUG) 4848 dev_info(&instance->pdev->dev, 4849 "LD%d: targetID: 0x%03x\n", 4850 ld_index, ids); 4851 } 4852 } 4853 4854 break; 4855 } 4856 4857 if (ret != DCMD_TIMEOUT) 4858 megasas_return_cmd(instance, cmd); 4859 4860 return ret; 4861 } 4862 4863 /** 4864 * megasas_ld_list_query - Returns FW's ld_list structure 4865 * @instance: Adapter soft state 4866 * @query_type: ld_list structure type 4867 * 4868 * Issues an internal command (DCMD) to get the FW's controller PD 4869 * list structure. This information is mainly used to find out SYSTEM 4870 * supported by the FW. 4871 */ 4872 static int 4873 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4874 { 4875 int ret = 0, ld_index = 0, ids = 0; 4876 struct megasas_cmd *cmd; 4877 struct megasas_dcmd_frame *dcmd; 4878 struct MR_LD_TARGETID_LIST *ci; 4879 dma_addr_t ci_h = 0; 4880 u32 tgtid_count; 4881 4882 ci = instance->ld_targetid_list_buf; 4883 ci_h = instance->ld_targetid_list_buf_h; 4884 4885 cmd = megasas_get_cmd(instance); 4886 4887 if (!cmd) { 4888 dev_warn(&instance->pdev->dev, 4889 "megasas_ld_list_query: Failed to get cmd\n"); 4890 return -ENOMEM; 4891 } 4892 4893 dcmd = &cmd->frame->dcmd; 4894 4895 memset(ci, 0, sizeof(*ci)); 4896 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4897 4898 dcmd->mbox.b[0] = query_type; 4899 if (instance->supportmax256vd) 4900 dcmd->mbox.b[2] = 1; 4901 4902 dcmd->cmd = MFI_CMD_DCMD; 4903 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4904 dcmd->sge_count = 1; 4905 dcmd->flags = MFI_FRAME_DIR_READ; 4906 dcmd->timeout = 0; 4907 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4908 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4909 dcmd->pad_0 = 0; 4910 4911 megasas_set_dma_settings(instance, dcmd, ci_h, 4912 sizeof(struct MR_LD_TARGETID_LIST)); 4913 4914 if ((instance->adapter_type != MFI_SERIES) && 4915 !instance->mask_interrupts) 4916 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4917 else 4918 ret = megasas_issue_polled(instance, cmd); 4919 4920 switch (ret) { 4921 case DCMD_FAILED: 4922 dev_info(&instance->pdev->dev, 4923 "DCMD not supported by firmware - %s %d\n", 4924 __func__, __LINE__); 4925 ret = megasas_get_ld_list(instance); 4926 break; 4927 case DCMD_TIMEOUT: 4928 switch (dcmd_timeout_ocr_possible(instance)) { 4929 case INITIATE_OCR: 4930 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4931 /* 4932 * DCMD failed from AEN path. 4933 * AEN path already hold reset_mutex to avoid PCI access 4934 * while OCR is in progress. 4935 */ 4936 mutex_unlock(&instance->reset_mutex); 4937 megasas_reset_fusion(instance->host, 4938 MFI_IO_TIMEOUT_OCR); 4939 mutex_lock(&instance->reset_mutex); 4940 break; 4941 case KILL_ADAPTER: 4942 megaraid_sas_kill_hba(instance); 4943 break; 4944 case IGNORE_TIMEOUT: 4945 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4946 __func__, __LINE__); 4947 break; 4948 } 4949 4950 break; 4951 case DCMD_SUCCESS: 4952 tgtid_count = le32_to_cpu(ci->count); 4953 4954 if (megasas_dbg_lvl & LD_PD_DEBUG) 4955 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4956 __func__, tgtid_count); 4957 4958 if ((tgtid_count > (instance->fw_supported_vd_count))) 4959 break; 4960 4961 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4962 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4963 ids = ci->targetId[ld_index]; 4964 instance->ld_ids[ids] = ci->targetId[ld_index]; 4965 if (megasas_dbg_lvl & LD_PD_DEBUG) 4966 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", 4967 ld_index, ci->targetId[ld_index]); 4968 } 4969 4970 break; 4971 } 4972 4973 if (ret != DCMD_TIMEOUT) 4974 megasas_return_cmd(instance, cmd); 4975 4976 return ret; 4977 } 4978 4979 /** 4980 * megasas_host_device_list_query 4981 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4982 * dcmd.mbox - reserved 4983 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4984 * Desc: This DCMD will return the combined device list 4985 * Status: MFI_STAT_OK - List returned successfully 4986 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4987 * disabled 4988 * @instance: Adapter soft state 4989 * @is_probe: Driver probe check 4990 * Return: 0 if DCMD succeeded 4991 * non-zero if failed 4992 */ 4993 static int 4994 megasas_host_device_list_query(struct megasas_instance *instance, 4995 bool is_probe) 4996 { 4997 int ret, i, target_id; 4998 struct megasas_cmd *cmd; 4999 struct megasas_dcmd_frame *dcmd; 5000 struct MR_HOST_DEVICE_LIST *ci; 5001 u32 count; 5002 dma_addr_t ci_h; 5003 5004 ci = instance->host_device_list_buf; 5005 ci_h = instance->host_device_list_buf_h; 5006 5007 cmd = megasas_get_cmd(instance); 5008 5009 if (!cmd) { 5010 dev_warn(&instance->pdev->dev, 5011 "%s: failed to get cmd\n", 5012 __func__); 5013 return -ENOMEM; 5014 } 5015 5016 dcmd = &cmd->frame->dcmd; 5017 5018 memset(ci, 0, sizeof(*ci)); 5019 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5020 5021 dcmd->mbox.b[0] = is_probe ? 0 : 1; 5022 dcmd->cmd = MFI_CMD_DCMD; 5023 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5024 dcmd->sge_count = 1; 5025 dcmd->flags = MFI_FRAME_DIR_READ; 5026 dcmd->timeout = 0; 5027 dcmd->pad_0 = 0; 5028 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 5029 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 5030 5031 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 5032 5033 if (!instance->mask_interrupts) { 5034 ret = megasas_issue_blocked_cmd(instance, cmd, 5035 MFI_IO_TIMEOUT_SECS); 5036 } else { 5037 ret = megasas_issue_polled(instance, cmd); 5038 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5039 } 5040 5041 switch (ret) { 5042 case DCMD_SUCCESS: 5043 /* Fill the internal pd_list and ld_ids array based on 5044 * targetIds returned by FW 5045 */ 5046 count = le32_to_cpu(ci->count); 5047 5048 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) 5049 break; 5050 5051 if (megasas_dbg_lvl & LD_PD_DEBUG) 5052 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", 5053 __func__, count); 5054 5055 memset(instance->local_pd_list, 0, 5056 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 5057 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 5058 for (i = 0; i < count; i++) { 5059 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 5060 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 5061 instance->local_pd_list[target_id].tid = target_id; 5062 instance->local_pd_list[target_id].driveType = 5063 ci->host_device_list[i].scsi_type; 5064 instance->local_pd_list[target_id].driveState = 5065 MR_PD_STATE_SYSTEM; 5066 if (megasas_dbg_lvl & LD_PD_DEBUG) 5067 dev_info(&instance->pdev->dev, 5068 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", 5069 i, target_id, ci->host_device_list[i].scsi_type); 5070 } else { 5071 instance->ld_ids[target_id] = target_id; 5072 if (megasas_dbg_lvl & LD_PD_DEBUG) 5073 dev_info(&instance->pdev->dev, 5074 "Device %d: LD targetID: 0x%03x\n", 5075 i, target_id); 5076 } 5077 } 5078 5079 memcpy(instance->pd_list, instance->local_pd_list, 5080 sizeof(instance->pd_list)); 5081 break; 5082 5083 case DCMD_TIMEOUT: 5084 switch (dcmd_timeout_ocr_possible(instance)) { 5085 case INITIATE_OCR: 5086 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5087 mutex_unlock(&instance->reset_mutex); 5088 megasas_reset_fusion(instance->host, 5089 MFI_IO_TIMEOUT_OCR); 5090 mutex_lock(&instance->reset_mutex); 5091 break; 5092 case KILL_ADAPTER: 5093 megaraid_sas_kill_hba(instance); 5094 break; 5095 case IGNORE_TIMEOUT: 5096 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5097 __func__, __LINE__); 5098 break; 5099 } 5100 break; 5101 case DCMD_FAILED: 5102 dev_err(&instance->pdev->dev, 5103 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 5104 __func__); 5105 break; 5106 } 5107 5108 if (ret != DCMD_TIMEOUT) 5109 megasas_return_cmd(instance, cmd); 5110 5111 return ret; 5112 } 5113 5114 /* 5115 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 5116 * instance : Controller's instance 5117 */ 5118 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 5119 { 5120 struct fusion_context *fusion; 5121 u32 ventura_map_sz = 0; 5122 5123 fusion = instance->ctrl_context; 5124 /* For MFI based controllers return dummy success */ 5125 if (!fusion) 5126 return; 5127 5128 instance->supportmax256vd = 5129 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 5130 /* Below is additional check to address future FW enhancement */ 5131 if (instance->ctrl_info_buf->max_lds > 64) 5132 instance->supportmax256vd = 1; 5133 5134 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 5135 * MEGASAS_MAX_DEV_PER_CHANNEL; 5136 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 5137 * MEGASAS_MAX_DEV_PER_CHANNEL; 5138 if (instance->supportmax256vd) { 5139 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 5140 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5141 } else { 5142 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5143 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5144 } 5145 5146 dev_info(&instance->pdev->dev, 5147 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 5148 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 5149 instance->ctrl_info_buf->max_lds); 5150 5151 if (instance->max_raid_mapsize) { 5152 ventura_map_sz = instance->max_raid_mapsize * 5153 MR_MIN_MAP_SIZE; /* 64k */ 5154 fusion->current_map_sz = ventura_map_sz; 5155 fusion->max_map_sz = ventura_map_sz; 5156 } else { 5157 fusion->old_map_sz = 5158 struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap, 5159 instance->fw_supported_vd_count); 5160 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 5161 5162 fusion->max_map_sz = 5163 max(fusion->old_map_sz, fusion->new_map_sz); 5164 5165 if (instance->supportmax256vd) 5166 fusion->current_map_sz = fusion->new_map_sz; 5167 else 5168 fusion->current_map_sz = fusion->old_map_sz; 5169 } 5170 /* irrespective of FW raid maps, driver raid map is constant */ 5171 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 5172 } 5173 5174 /* 5175 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 5176 * dcmd.hdr.length - number of bytes to read 5177 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 5178 * Desc: Fill in snapdump properties 5179 * Status: MFI_STAT_OK- Command successful 5180 */ 5181 void megasas_get_snapdump_properties(struct megasas_instance *instance) 5182 { 5183 int ret = 0; 5184 struct megasas_cmd *cmd; 5185 struct megasas_dcmd_frame *dcmd; 5186 struct MR_SNAPDUMP_PROPERTIES *ci; 5187 dma_addr_t ci_h = 0; 5188 5189 ci = instance->snapdump_prop; 5190 ci_h = instance->snapdump_prop_h; 5191 5192 if (!ci) 5193 return; 5194 5195 cmd = megasas_get_cmd(instance); 5196 5197 if (!cmd) { 5198 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 5199 return; 5200 } 5201 5202 dcmd = &cmd->frame->dcmd; 5203 5204 memset(ci, 0, sizeof(*ci)); 5205 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5206 5207 dcmd->cmd = MFI_CMD_DCMD; 5208 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5209 dcmd->sge_count = 1; 5210 dcmd->flags = MFI_FRAME_DIR_READ; 5211 dcmd->timeout = 0; 5212 dcmd->pad_0 = 0; 5213 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5214 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 5215 5216 megasas_set_dma_settings(instance, dcmd, ci_h, 5217 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5218 5219 if (!instance->mask_interrupts) { 5220 ret = megasas_issue_blocked_cmd(instance, cmd, 5221 MFI_IO_TIMEOUT_SECS); 5222 } else { 5223 ret = megasas_issue_polled(instance, cmd); 5224 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5225 } 5226 5227 switch (ret) { 5228 case DCMD_SUCCESS: 5229 instance->snapdump_wait_time = 5230 min_t(u8, ci->trigger_min_num_sec_before_ocr, 5231 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 5232 break; 5233 5234 case DCMD_TIMEOUT: 5235 switch (dcmd_timeout_ocr_possible(instance)) { 5236 case INITIATE_OCR: 5237 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5238 mutex_unlock(&instance->reset_mutex); 5239 megasas_reset_fusion(instance->host, 5240 MFI_IO_TIMEOUT_OCR); 5241 mutex_lock(&instance->reset_mutex); 5242 break; 5243 case KILL_ADAPTER: 5244 megaraid_sas_kill_hba(instance); 5245 break; 5246 case IGNORE_TIMEOUT: 5247 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5248 __func__, __LINE__); 5249 break; 5250 } 5251 } 5252 5253 if (ret != DCMD_TIMEOUT) 5254 megasas_return_cmd(instance, cmd); 5255 } 5256 5257 /** 5258 * megasas_get_ctrl_info - Returns FW's controller structure 5259 * @instance: Adapter soft state 5260 * 5261 * Issues an internal command (DCMD) to get the FW's controller structure. 5262 * This information is mainly used to find out the maximum IO transfer per 5263 * command supported by the FW. 5264 */ 5265 int 5266 megasas_get_ctrl_info(struct megasas_instance *instance) 5267 { 5268 int ret = 0; 5269 struct megasas_cmd *cmd; 5270 struct megasas_dcmd_frame *dcmd; 5271 struct megasas_ctrl_info *ci; 5272 dma_addr_t ci_h = 0; 5273 5274 ci = instance->ctrl_info_buf; 5275 ci_h = instance->ctrl_info_buf_h; 5276 5277 cmd = megasas_get_cmd(instance); 5278 5279 if (!cmd) { 5280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 5281 return -ENOMEM; 5282 } 5283 5284 dcmd = &cmd->frame->dcmd; 5285 5286 memset(ci, 0, sizeof(*ci)); 5287 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5288 5289 dcmd->cmd = MFI_CMD_DCMD; 5290 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5291 dcmd->sge_count = 1; 5292 dcmd->flags = MFI_FRAME_DIR_READ; 5293 dcmd->timeout = 0; 5294 dcmd->pad_0 = 0; 5295 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 5296 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 5297 dcmd->mbox.b[0] = 1; 5298 5299 megasas_set_dma_settings(instance, dcmd, ci_h, 5300 sizeof(struct megasas_ctrl_info)); 5301 5302 if ((instance->adapter_type != MFI_SERIES) && 5303 !instance->mask_interrupts) { 5304 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5305 } else { 5306 ret = megasas_issue_polled(instance, cmd); 5307 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5308 } 5309 5310 switch (ret) { 5311 case DCMD_SUCCESS: 5312 /* Save required controller information in 5313 * CPU endianness format. 5314 */ 5315 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 5316 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 5317 le32_to_cpus((u32 *)&ci->adapterOperations2); 5318 le32_to_cpus((u32 *)&ci->adapterOperations3); 5319 le16_to_cpus((u16 *)&ci->adapter_operations4); 5320 le32_to_cpus((u32 *)&ci->adapter_operations5); 5321 5322 /* Update the latest Ext VD info. 5323 * From Init path, store current firmware details. 5324 * From OCR path, detect any firmware properties changes. 5325 * in case of Firmware upgrade without system reboot. 5326 */ 5327 megasas_update_ext_vd_details(instance); 5328 instance->support_seqnum_jbod_fp = 5329 ci->adapterOperations3.useSeqNumJbodFP; 5330 instance->support_morethan256jbod = 5331 ci->adapter_operations4.support_pd_map_target_id; 5332 instance->support_nvme_passthru = 5333 ci->adapter_operations4.support_nvme_passthru; 5334 instance->support_pci_lane_margining = 5335 ci->adapter_operations5.support_pci_lane_margining; 5336 instance->task_abort_tmo = ci->TaskAbortTO; 5337 instance->max_reset_tmo = ci->MaxResetTO; 5338 5339 /*Check whether controller is iMR or MR */ 5340 instance->is_imr = (ci->memory_size ? 0 : 1); 5341 5342 instance->snapdump_wait_time = 5343 (ci->properties.on_off_properties2.enable_snap_dump ? 5344 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 5345 5346 instance->enable_fw_dev_list = 5347 ci->properties.on_off_properties2.enable_fw_dev_list; 5348 5349 dev_info(&instance->pdev->dev, 5350 "controller type\t: %s(%dMB)\n", 5351 instance->is_imr ? "iMR" : "MR", 5352 le16_to_cpu(ci->memory_size)); 5353 5354 instance->disableOnlineCtrlReset = 5355 ci->properties.OnOffProperties.disableOnlineCtrlReset; 5356 instance->secure_jbod_support = 5357 ci->adapterOperations3.supportSecurityonJBOD; 5358 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 5359 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 5360 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 5361 instance->secure_jbod_support ? "Yes" : "No"); 5362 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 5363 instance->support_nvme_passthru ? "Yes" : "No"); 5364 dev_info(&instance->pdev->dev, 5365 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5366 instance->task_abort_tmo, instance->max_reset_tmo); 5367 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", 5368 instance->support_seqnum_jbod_fp ? "Yes" : "No"); 5369 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", 5370 instance->support_pci_lane_margining ? "Yes" : "No"); 5371 5372 break; 5373 5374 case DCMD_TIMEOUT: 5375 switch (dcmd_timeout_ocr_possible(instance)) { 5376 case INITIATE_OCR: 5377 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5378 mutex_unlock(&instance->reset_mutex); 5379 megasas_reset_fusion(instance->host, 5380 MFI_IO_TIMEOUT_OCR); 5381 mutex_lock(&instance->reset_mutex); 5382 break; 5383 case KILL_ADAPTER: 5384 megaraid_sas_kill_hba(instance); 5385 break; 5386 case IGNORE_TIMEOUT: 5387 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5388 __func__, __LINE__); 5389 break; 5390 } 5391 break; 5392 case DCMD_FAILED: 5393 megaraid_sas_kill_hba(instance); 5394 break; 5395 5396 } 5397 5398 if (ret != DCMD_TIMEOUT) 5399 megasas_return_cmd(instance, cmd); 5400 5401 return ret; 5402 } 5403 5404 /* 5405 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5406 * to firmware 5407 * 5408 * @instance: Adapter soft state 5409 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5410 MR_CRASH_BUF_TURN_OFF = 0 5411 MR_CRASH_BUF_TURN_ON = 1 5412 * @return 0 on success non-zero on failure. 5413 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5414 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5415 * that driver supports crash dump feature. This DCMD will be sent only if 5416 * crash dump feature is supported by the FW. 5417 * 5418 */ 5419 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5420 u8 crash_buf_state) 5421 { 5422 int ret = 0; 5423 struct megasas_cmd *cmd; 5424 struct megasas_dcmd_frame *dcmd; 5425 5426 cmd = megasas_get_cmd(instance); 5427 5428 if (!cmd) { 5429 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5430 return -ENOMEM; 5431 } 5432 5433 5434 dcmd = &cmd->frame->dcmd; 5435 5436 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5437 dcmd->mbox.b[0] = crash_buf_state; 5438 dcmd->cmd = MFI_CMD_DCMD; 5439 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5440 dcmd->sge_count = 1; 5441 dcmd->flags = MFI_FRAME_DIR_NONE; 5442 dcmd->timeout = 0; 5443 dcmd->pad_0 = 0; 5444 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5445 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5446 5447 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5448 CRASH_DMA_BUF_SIZE); 5449 5450 if ((instance->adapter_type != MFI_SERIES) && 5451 !instance->mask_interrupts) 5452 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5453 else 5454 ret = megasas_issue_polled(instance, cmd); 5455 5456 if (ret == DCMD_TIMEOUT) { 5457 switch (dcmd_timeout_ocr_possible(instance)) { 5458 case INITIATE_OCR: 5459 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5460 megasas_reset_fusion(instance->host, 5461 MFI_IO_TIMEOUT_OCR); 5462 break; 5463 case KILL_ADAPTER: 5464 megaraid_sas_kill_hba(instance); 5465 break; 5466 case IGNORE_TIMEOUT: 5467 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5468 __func__, __LINE__); 5469 break; 5470 } 5471 } else 5472 megasas_return_cmd(instance, cmd); 5473 5474 return ret; 5475 } 5476 5477 /** 5478 * megasas_issue_init_mfi - Initializes the FW 5479 * @instance: Adapter soft state 5480 * 5481 * Issues the INIT MFI cmd 5482 */ 5483 static int 5484 megasas_issue_init_mfi(struct megasas_instance *instance) 5485 { 5486 __le32 context; 5487 struct megasas_cmd *cmd; 5488 struct megasas_init_frame *init_frame; 5489 struct megasas_init_queue_info *initq_info; 5490 dma_addr_t init_frame_h; 5491 dma_addr_t initq_info_h; 5492 5493 /* 5494 * Prepare a init frame. Note the init frame points to queue info 5495 * structure. Each frame has SGL allocated after first 64 bytes. For 5496 * this frame - since we don't need any SGL - we use SGL's space as 5497 * queue info structure 5498 * 5499 * We will not get a NULL command below. We just created the pool. 5500 */ 5501 cmd = megasas_get_cmd(instance); 5502 5503 init_frame = (struct megasas_init_frame *)cmd->frame; 5504 initq_info = (struct megasas_init_queue_info *) 5505 ((unsigned long)init_frame + 64); 5506 5507 init_frame_h = cmd->frame_phys_addr; 5508 initq_info_h = init_frame_h + 64; 5509 5510 context = init_frame->context; 5511 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5512 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5513 init_frame->context = context; 5514 5515 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5516 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5517 5518 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5519 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5520 5521 init_frame->cmd = MFI_CMD_INIT; 5522 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5523 init_frame->queue_info_new_phys_addr_lo = 5524 cpu_to_le32(lower_32_bits(initq_info_h)); 5525 init_frame->queue_info_new_phys_addr_hi = 5526 cpu_to_le32(upper_32_bits(initq_info_h)); 5527 5528 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5529 5530 /* 5531 * disable the intr before firing the init frame to FW 5532 */ 5533 instance->instancet->disable_intr(instance); 5534 5535 /* 5536 * Issue the init frame in polled mode 5537 */ 5538 5539 if (megasas_issue_polled(instance, cmd)) { 5540 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5541 megasas_return_cmd(instance, cmd); 5542 goto fail_fw_init; 5543 } 5544 5545 megasas_return_cmd(instance, cmd); 5546 5547 return 0; 5548 5549 fail_fw_init: 5550 return -EINVAL; 5551 } 5552 5553 static u32 5554 megasas_init_adapter_mfi(struct megasas_instance *instance) 5555 { 5556 u32 context_sz; 5557 u32 reply_q_sz; 5558 5559 /* 5560 * Get various operational parameters from status register 5561 */ 5562 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5563 /* 5564 * Reduce the max supported cmds by 1. This is to ensure that the 5565 * reply_q_sz (1 more than the max cmd that driver may send) 5566 * does not exceed max cmds that the FW can support 5567 */ 5568 instance->max_fw_cmds = instance->max_fw_cmds-1; 5569 instance->max_mfi_cmds = instance->max_fw_cmds; 5570 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5571 0x10; 5572 /* 5573 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5574 * are reserved for IOCTL + driver's internal DCMDs. 5575 */ 5576 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5577 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5578 instance->max_scsi_cmds = (instance->max_fw_cmds - 5579 MEGASAS_SKINNY_INT_CMDS); 5580 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5581 } else { 5582 instance->max_scsi_cmds = (instance->max_fw_cmds - 5583 MEGASAS_INT_CMDS); 5584 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5585 } 5586 5587 instance->cur_can_queue = instance->max_scsi_cmds; 5588 /* 5589 * Create a pool of commands 5590 */ 5591 if (megasas_alloc_cmds(instance)) 5592 goto fail_alloc_cmds; 5593 5594 /* 5595 * Allocate memory for reply queue. Length of reply queue should 5596 * be _one_ more than the maximum commands handled by the firmware. 5597 * 5598 * Note: When FW completes commands, it places corresponding contex 5599 * values in this circular reply queue. This circular queue is a fairly 5600 * typical producer-consumer queue. FW is the producer (of completed 5601 * commands) and the driver is the consumer. 5602 */ 5603 context_sz = sizeof(u32); 5604 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5605 5606 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5607 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5608 5609 if (!instance->reply_queue) { 5610 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5611 goto fail_reply_queue; 5612 } 5613 5614 if (megasas_issue_init_mfi(instance)) 5615 goto fail_fw_init; 5616 5617 if (megasas_get_ctrl_info(instance)) { 5618 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5619 "Fail from %s %d\n", instance->unique_id, 5620 __func__, __LINE__); 5621 goto fail_fw_init; 5622 } 5623 5624 instance->fw_support_ieee = 0; 5625 instance->fw_support_ieee = 5626 (instance->instancet->read_fw_status_reg(instance) & 5627 0x04000000); 5628 5629 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5630 instance->fw_support_ieee); 5631 5632 if (instance->fw_support_ieee) 5633 instance->flag_ieee = 1; 5634 5635 return 0; 5636 5637 fail_fw_init: 5638 5639 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5640 instance->reply_queue, instance->reply_queue_h); 5641 fail_reply_queue: 5642 megasas_free_cmds(instance); 5643 5644 fail_alloc_cmds: 5645 return 1; 5646 } 5647 5648 static 5649 void megasas_setup_irq_poll(struct megasas_instance *instance) 5650 { 5651 struct megasas_irq_context *irq_ctx; 5652 u32 count, i; 5653 5654 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5655 5656 /* Initialize IRQ poll */ 5657 for (i = 0; i < count; i++) { 5658 irq_ctx = &instance->irq_context[i]; 5659 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); 5660 irq_ctx->irq_poll_scheduled = false; 5661 irq_poll_init(&irq_ctx->irqpoll, 5662 instance->threshold_reply_count, 5663 megasas_irqpoll); 5664 } 5665 } 5666 5667 /* 5668 * megasas_setup_irqs_ioapic - register legacy interrupts. 5669 * @instance: Adapter soft state 5670 * 5671 * Do not enable interrupt, only setup ISRs. 5672 * 5673 * Return 0 on success. 5674 */ 5675 static int 5676 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5677 { 5678 struct pci_dev *pdev; 5679 5680 pdev = instance->pdev; 5681 instance->irq_context[0].instance = instance; 5682 instance->irq_context[0].MSIxIndex = 0; 5683 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", 5684 "megasas", instance->host->host_no); 5685 if (request_irq(pci_irq_vector(pdev, 0), 5686 instance->instancet->service_isr, IRQF_SHARED, 5687 instance->irq_context->name, &instance->irq_context[0])) { 5688 dev_err(&instance->pdev->dev, 5689 "Failed to register IRQ from %s %d\n", 5690 __func__, __LINE__); 5691 return -1; 5692 } 5693 instance->perf_mode = MR_LATENCY_PERF_MODE; 5694 instance->low_latency_index_start = 0; 5695 return 0; 5696 } 5697 5698 /** 5699 * megasas_setup_irqs_msix - register MSI-x interrupts. 5700 * @instance: Adapter soft state 5701 * @is_probe: Driver probe check 5702 * 5703 * Do not enable interrupt, only setup ISRs. 5704 * 5705 * Return 0 on success. 5706 */ 5707 static int 5708 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5709 { 5710 int i, j; 5711 struct pci_dev *pdev; 5712 5713 pdev = instance->pdev; 5714 5715 /* Try MSI-x */ 5716 for (i = 0; i < instance->msix_vectors; i++) { 5717 instance->irq_context[i].instance = instance; 5718 instance->irq_context[i].MSIxIndex = i; 5719 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", 5720 "megasas", instance->host->host_no, i); 5721 if (request_irq(pci_irq_vector(pdev, i), 5722 instance->instancet->service_isr, 0, instance->irq_context[i].name, 5723 &instance->irq_context[i])) { 5724 dev_err(&instance->pdev->dev, 5725 "Failed to register IRQ for vector %d.\n", i); 5726 for (j = 0; j < i; j++) { 5727 if (j < instance->low_latency_index_start) 5728 irq_update_affinity_hint( 5729 pci_irq_vector(pdev, j), NULL); 5730 free_irq(pci_irq_vector(pdev, j), 5731 &instance->irq_context[j]); 5732 } 5733 /* Retry irq register for IO_APIC*/ 5734 instance->msix_vectors = 0; 5735 instance->msix_load_balance = false; 5736 if (is_probe) { 5737 pci_free_irq_vectors(instance->pdev); 5738 return megasas_setup_irqs_ioapic(instance); 5739 } else { 5740 return -1; 5741 } 5742 } 5743 } 5744 5745 return 0; 5746 } 5747 5748 /* 5749 * megasas_destroy_irqs- unregister interrupts. 5750 * @instance: Adapter soft state 5751 * return: void 5752 */ 5753 static void 5754 megasas_destroy_irqs(struct megasas_instance *instance) { 5755 5756 int i; 5757 int count; 5758 struct megasas_irq_context *irq_ctx; 5759 5760 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5761 if (instance->adapter_type != MFI_SERIES) { 5762 for (i = 0; i < count; i++) { 5763 irq_ctx = &instance->irq_context[i]; 5764 irq_poll_disable(&irq_ctx->irqpoll); 5765 } 5766 } 5767 5768 if (instance->msix_vectors) 5769 for (i = 0; i < instance->msix_vectors; i++) { 5770 if (i < instance->low_latency_index_start) 5771 irq_update_affinity_hint( 5772 pci_irq_vector(instance->pdev, i), NULL); 5773 free_irq(pci_irq_vector(instance->pdev, i), 5774 &instance->irq_context[i]); 5775 } 5776 else 5777 free_irq(pci_irq_vector(instance->pdev, 0), 5778 &instance->irq_context[0]); 5779 } 5780 5781 /** 5782 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5783 * @instance: Adapter soft state 5784 * 5785 * Return 0 on success. 5786 */ 5787 void 5788 megasas_setup_jbod_map(struct megasas_instance *instance) 5789 { 5790 int i; 5791 struct fusion_context *fusion = instance->ctrl_context; 5792 size_t pd_seq_map_sz; 5793 5794 pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq, 5795 MAX_PHYSICAL_DEVICES); 5796 5797 instance->use_seqnum_jbod_fp = 5798 instance->support_seqnum_jbod_fp; 5799 if (reset_devices || !fusion || 5800 !instance->support_seqnum_jbod_fp) { 5801 dev_info(&instance->pdev->dev, 5802 "JBOD sequence map is disabled %s %d\n", 5803 __func__, __LINE__); 5804 instance->use_seqnum_jbod_fp = false; 5805 return; 5806 } 5807 5808 if (fusion->pd_seq_sync[0]) 5809 goto skip_alloc; 5810 5811 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5812 fusion->pd_seq_sync[i] = dma_alloc_coherent 5813 (&instance->pdev->dev, pd_seq_map_sz, 5814 &fusion->pd_seq_phys[i], GFP_KERNEL); 5815 if (!fusion->pd_seq_sync[i]) { 5816 dev_err(&instance->pdev->dev, 5817 "Failed to allocate memory from %s %d\n", 5818 __func__, __LINE__); 5819 if (i == 1) { 5820 dma_free_coherent(&instance->pdev->dev, 5821 pd_seq_map_sz, fusion->pd_seq_sync[0], 5822 fusion->pd_seq_phys[0]); 5823 fusion->pd_seq_sync[0] = NULL; 5824 } 5825 instance->use_seqnum_jbod_fp = false; 5826 return; 5827 } 5828 } 5829 5830 skip_alloc: 5831 if (!megasas_sync_pd_seq_num(instance, false) && 5832 !megasas_sync_pd_seq_num(instance, true)) 5833 instance->use_seqnum_jbod_fp = true; 5834 else 5835 instance->use_seqnum_jbod_fp = false; 5836 } 5837 5838 static void megasas_setup_reply_map(struct megasas_instance *instance) 5839 { 5840 const struct cpumask *mask; 5841 unsigned int queue, cpu, low_latency_index_start; 5842 5843 low_latency_index_start = instance->low_latency_index_start; 5844 5845 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { 5846 mask = pci_irq_get_affinity(instance->pdev, queue); 5847 if (!mask) 5848 goto fallback; 5849 5850 for_each_cpu(cpu, mask) 5851 instance->reply_map[cpu] = queue; 5852 } 5853 return; 5854 5855 fallback: 5856 queue = low_latency_index_start; 5857 for_each_possible_cpu(cpu) { 5858 instance->reply_map[cpu] = queue; 5859 if (queue == (instance->msix_vectors - 1)) 5860 queue = low_latency_index_start; 5861 else 5862 queue++; 5863 } 5864 } 5865 5866 /** 5867 * megasas_get_device_list - Get the PD and LD device list from FW. 5868 * @instance: Adapter soft state 5869 * @return: Success or failure 5870 * 5871 * Issue DCMDs to Firmware to get the PD and LD list. 5872 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5873 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5874 */ 5875 static 5876 int megasas_get_device_list(struct megasas_instance *instance) 5877 { 5878 if (instance->enable_fw_dev_list) { 5879 if (megasas_host_device_list_query(instance, true)) 5880 return FAILED; 5881 } else { 5882 if (megasas_get_pd_list(instance) < 0) { 5883 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5884 return FAILED; 5885 } 5886 5887 if (megasas_ld_list_query(instance, 5888 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5889 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5890 return FAILED; 5891 } 5892 } 5893 5894 return SUCCESS; 5895 } 5896 5897 /** 5898 * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint 5899 * for high IOPS queues 5900 * @instance: Adapter soft state 5901 * return: void 5902 */ 5903 static inline void 5904 megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) 5905 { 5906 int i; 5907 unsigned int irq; 5908 const struct cpumask *mask; 5909 5910 if (instance->perf_mode == MR_BALANCED_PERF_MODE) { 5911 mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); 5912 5913 for (i = 0; i < instance->low_latency_index_start; i++) { 5914 irq = pci_irq_vector(instance->pdev, i); 5915 irq_set_affinity_and_hint(irq, mask); 5916 } 5917 } 5918 } 5919 5920 static int 5921 __megasas_alloc_irq_vectors(struct megasas_instance *instance) 5922 { 5923 int i, irq_flags; 5924 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; 5925 struct irq_affinity *descp = &desc; 5926 5927 irq_flags = PCI_IRQ_MSIX; 5928 5929 if (instance->smp_affinity_enable) 5930 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 5931 else 5932 descp = NULL; 5933 5934 /* Do not allocate msix vectors for poll_queues. 5935 * msix_vectors is always within a range of FW supported reply queue. 5936 */ 5937 i = pci_alloc_irq_vectors_affinity(instance->pdev, 5938 instance->low_latency_index_start, 5939 instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); 5940 5941 return i; 5942 } 5943 5944 /** 5945 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors 5946 * @instance: Adapter soft state 5947 * return: void 5948 */ 5949 static void 5950 megasas_alloc_irq_vectors(struct megasas_instance *instance) 5951 { 5952 int i; 5953 unsigned int num_msix_req; 5954 5955 instance->iopoll_q_count = 0; 5956 if ((instance->adapter_type != MFI_SERIES) && 5957 poll_queues) { 5958 5959 instance->perf_mode = MR_LATENCY_PERF_MODE; 5960 instance->low_latency_index_start = 1; 5961 5962 /* reserve for default and non-mananged pre-vector. */ 5963 if (instance->msix_vectors > (poll_queues + 2)) 5964 instance->iopoll_q_count = poll_queues; 5965 else 5966 instance->iopoll_q_count = 0; 5967 5968 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5969 instance->msix_vectors = min(num_msix_req, 5970 instance->msix_vectors); 5971 5972 } 5973 5974 i = __megasas_alloc_irq_vectors(instance); 5975 5976 if (((instance->perf_mode == MR_BALANCED_PERF_MODE) 5977 || instance->iopoll_q_count) && 5978 (i != (instance->msix_vectors - instance->iopoll_q_count))) { 5979 if (instance->msix_vectors) 5980 pci_free_irq_vectors(instance->pdev); 5981 /* Disable Balanced IOPS mode and try realloc vectors */ 5982 instance->perf_mode = MR_LATENCY_PERF_MODE; 5983 instance->low_latency_index_start = 1; 5984 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5985 5986 instance->msix_vectors = min(num_msix_req, 5987 instance->msix_vectors); 5988 5989 instance->iopoll_q_count = 0; 5990 i = __megasas_alloc_irq_vectors(instance); 5991 5992 } 5993 5994 dev_info(&instance->pdev->dev, 5995 "requested/available msix %d/%d poll_queue %d\n", 5996 instance->msix_vectors - instance->iopoll_q_count, 5997 i, instance->iopoll_q_count); 5998 5999 if (i > 0) 6000 instance->msix_vectors = i; 6001 else 6002 instance->msix_vectors = 0; 6003 6004 if (instance->smp_affinity_enable) 6005 megasas_set_high_iops_queue_affinity_and_hint(instance); 6006 } 6007 6008 /** 6009 * megasas_init_fw - Initializes the FW 6010 * @instance: Adapter soft state 6011 * 6012 * This is the main function for initializing firmware 6013 */ 6014 6015 static int megasas_init_fw(struct megasas_instance *instance) 6016 { 6017 u32 max_sectors_1; 6018 u32 max_sectors_2, tmp_sectors, msix_enable; 6019 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 6020 resource_size_t base_addr; 6021 void *base_addr_phys; 6022 struct megasas_ctrl_info *ctrl_info = NULL; 6023 unsigned long bar_list; 6024 int i, j, loop; 6025 struct IOV_111 *iovPtr; 6026 struct fusion_context *fusion; 6027 bool intr_coalescing; 6028 unsigned int num_msix_req; 6029 u16 lnksta, speed; 6030 6031 fusion = instance->ctrl_context; 6032 6033 /* Find first memory bar */ 6034 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 6035 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 6036 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 6037 "megasas: LSI")) { 6038 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 6039 return -EBUSY; 6040 } 6041 6042 base_addr = pci_resource_start(instance->pdev, instance->bar); 6043 instance->reg_set = ioremap(base_addr, 8192); 6044 6045 if (!instance->reg_set) { 6046 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 6047 goto fail_ioremap; 6048 } 6049 6050 base_addr_phys = &base_addr; 6051 dev_printk(KERN_DEBUG, &instance->pdev->dev, 6052 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", 6053 instance->bar, base_addr_phys, instance->reg_set); 6054 6055 if (instance->adapter_type != MFI_SERIES) 6056 instance->instancet = &megasas_instance_template_fusion; 6057 else { 6058 switch (instance->pdev->device) { 6059 case PCI_DEVICE_ID_LSI_SAS1078R: 6060 case PCI_DEVICE_ID_LSI_SAS1078DE: 6061 instance->instancet = &megasas_instance_template_ppc; 6062 break; 6063 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 6064 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 6065 instance->instancet = &megasas_instance_template_gen2; 6066 break; 6067 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 6068 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 6069 instance->instancet = &megasas_instance_template_skinny; 6070 break; 6071 case PCI_DEVICE_ID_LSI_SAS1064R: 6072 case PCI_DEVICE_ID_DELL_PERC5: 6073 default: 6074 instance->instancet = &megasas_instance_template_xscale; 6075 instance->pd_list_not_supported = 1; 6076 break; 6077 } 6078 } 6079 6080 if (megasas_transition_to_ready(instance, 0)) { 6081 dev_info(&instance->pdev->dev, 6082 "Failed to transition controller to ready from %s!\n", 6083 __func__); 6084 if (instance->adapter_type != MFI_SERIES) { 6085 status_reg = instance->instancet->read_fw_status_reg( 6086 instance); 6087 if (status_reg & MFI_RESET_ADAPTER) { 6088 if (megasas_adp_reset_wait_for_ready 6089 (instance, true, 0) == FAILED) 6090 goto fail_ready_state; 6091 } else { 6092 goto fail_ready_state; 6093 } 6094 } else { 6095 atomic_set(&instance->fw_reset_no_pci_access, 1); 6096 instance->instancet->adp_reset 6097 (instance, instance->reg_set); 6098 atomic_set(&instance->fw_reset_no_pci_access, 0); 6099 6100 /*waiting for about 30 second before retry*/ 6101 ssleep(30); 6102 6103 if (megasas_transition_to_ready(instance, 0)) 6104 goto fail_ready_state; 6105 } 6106 6107 dev_info(&instance->pdev->dev, 6108 "FW restarted successfully from %s!\n", 6109 __func__); 6110 } 6111 6112 megasas_init_ctrl_params(instance); 6113 6114 if (megasas_set_dma_mask(instance)) 6115 goto fail_ready_state; 6116 6117 if (megasas_alloc_ctrl_mem(instance)) 6118 goto fail_alloc_dma_buf; 6119 6120 if (megasas_alloc_ctrl_dma_buffers(instance)) 6121 goto fail_alloc_dma_buf; 6122 6123 fusion = instance->ctrl_context; 6124 6125 if (instance->adapter_type >= VENTURA_SERIES) { 6126 scratch_pad_2 = 6127 megasas_readl(instance, 6128 &instance->reg_set->outbound_scratch_pad_2); 6129 instance->max_raid_mapsize = ((scratch_pad_2 >> 6130 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 6131 MR_MAX_RAID_MAP_SIZE_MASK); 6132 } 6133 6134 instance->enable_sdev_max_qd = enable_sdev_max_qd; 6135 6136 switch (instance->adapter_type) { 6137 case VENTURA_SERIES: 6138 fusion->pcie_bw_limitation = true; 6139 break; 6140 case AERO_SERIES: 6141 fusion->r56_div_offload = true; 6142 break; 6143 default: 6144 break; 6145 } 6146 6147 /* Check if MSI-X is supported while in ready state */ 6148 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 6149 0x4000000) >> 0x1a; 6150 if (msix_enable && !msix_disable) { 6151 6152 scratch_pad_1 = megasas_readl 6153 (instance, &instance->reg_set->outbound_scratch_pad_1); 6154 /* Check max MSI-X vectors */ 6155 if (fusion) { 6156 if (instance->adapter_type == THUNDERBOLT_SERIES) { 6157 /* Thunderbolt Series*/ 6158 instance->msix_vectors = (scratch_pad_1 6159 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 6160 } else { 6161 instance->msix_vectors = ((scratch_pad_1 6162 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 6163 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 6164 6165 /* 6166 * For Invader series, > 8 MSI-x vectors 6167 * supported by FW/HW implies combined 6168 * reply queue mode is enabled. 6169 * For Ventura series, > 16 MSI-x vectors 6170 * supported by FW/HW implies combined 6171 * reply queue mode is enabled. 6172 */ 6173 switch (instance->adapter_type) { 6174 case INVADER_SERIES: 6175 if (instance->msix_vectors > 8) 6176 instance->msix_combined = true; 6177 break; 6178 case AERO_SERIES: 6179 case VENTURA_SERIES: 6180 if (instance->msix_vectors > 16) 6181 instance->msix_combined = true; 6182 break; 6183 } 6184 6185 if (rdpq_enable) 6186 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 6187 1 : 0; 6188 6189 if (instance->adapter_type >= INVADER_SERIES && 6190 !instance->msix_combined) { 6191 instance->msix_load_balance = true; 6192 instance->smp_affinity_enable = false; 6193 } 6194 6195 /* Save 1-15 reply post index address to local memory 6196 * Index 0 is already saved from reg offset 6197 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 6198 */ 6199 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 6200 instance->reply_post_host_index_addr[loop] = 6201 (u32 __iomem *) 6202 ((u8 __iomem *)instance->reg_set + 6203 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 6204 + (loop * 0x10)); 6205 } 6206 } 6207 6208 dev_info(&instance->pdev->dev, 6209 "firmware supports msix\t: (%d)", 6210 instance->msix_vectors); 6211 if (msix_vectors) 6212 instance->msix_vectors = min(msix_vectors, 6213 instance->msix_vectors); 6214 } else /* MFI adapters */ 6215 instance->msix_vectors = 1; 6216 6217 6218 /* 6219 * For Aero (if some conditions are met), driver will configure a 6220 * few additional reply queues with interrupt coalescing enabled. 6221 * These queues with interrupt coalescing enabled are called 6222 * High IOPS queues and rest of reply queues (based on number of 6223 * logical CPUs) are termed as Low latency queues. 6224 * 6225 * Total Number of reply queues = High IOPS queues + low latency queues 6226 * 6227 * For rest of fusion adapters, 1 additional reply queue will be 6228 * reserved for management commands, rest of reply queues 6229 * (based on number of logical CPUs) will be used for IOs and 6230 * referenced as IO queues. 6231 * Total Number of reply queues = 1 + IO queues 6232 * 6233 * MFI adapters supports single MSI-x so single reply queue 6234 * will be used for IO and management commands. 6235 */ 6236 6237 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 6238 true : false; 6239 if (intr_coalescing && 6240 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && 6241 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) 6242 instance->perf_mode = MR_BALANCED_PERF_MODE; 6243 else 6244 instance->perf_mode = MR_LATENCY_PERF_MODE; 6245 6246 6247 if (instance->adapter_type == AERO_SERIES) { 6248 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); 6249 speed = lnksta & PCI_EXP_LNKSTA_CLS; 6250 6251 /* 6252 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate 6253 * in latency perf mode and enable R1 PCI bandwidth algorithm 6254 */ 6255 if (speed < 0x4) { 6256 instance->perf_mode = MR_LATENCY_PERF_MODE; 6257 fusion->pcie_bw_limitation = true; 6258 } 6259 6260 /* 6261 * Performance mode settings provided through module parameter-perf_mode will 6262 * take affect only for: 6263 * 1. Aero family of adapters. 6264 * 2. When user sets module parameter- perf_mode in range of 0-2. 6265 */ 6266 if ((perf_mode >= MR_BALANCED_PERF_MODE) && 6267 (perf_mode <= MR_LATENCY_PERF_MODE)) 6268 instance->perf_mode = perf_mode; 6269 /* 6270 * If intr coalescing is not supported by controller FW, then IOPS 6271 * and Balanced modes are not feasible. 6272 */ 6273 if (!intr_coalescing) 6274 instance->perf_mode = MR_LATENCY_PERF_MODE; 6275 6276 } 6277 6278 if (instance->perf_mode == MR_BALANCED_PERF_MODE) 6279 instance->low_latency_index_start = 6280 MR_HIGH_IOPS_QUEUE_COUNT; 6281 else 6282 instance->low_latency_index_start = 1; 6283 6284 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 6285 6286 instance->msix_vectors = min(num_msix_req, 6287 instance->msix_vectors); 6288 6289 megasas_alloc_irq_vectors(instance); 6290 if (!instance->msix_vectors) 6291 instance->msix_load_balance = false; 6292 } 6293 /* 6294 * MSI-X host index 0 is common for all adapter. 6295 * It is used for all MPT based Adapters. 6296 */ 6297 if (instance->msix_combined) { 6298 instance->reply_post_host_index_addr[0] = 6299 (u32 *)((u8 *)instance->reg_set + 6300 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 6301 } else { 6302 instance->reply_post_host_index_addr[0] = 6303 (u32 *)((u8 *)instance->reg_set + 6304 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 6305 } 6306 6307 if (!instance->msix_vectors) { 6308 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_INTX); 6309 if (i < 0) 6310 goto fail_init_adapter; 6311 } 6312 6313 megasas_setup_reply_map(instance); 6314 6315 dev_info(&instance->pdev->dev, 6316 "current msix/online cpus\t: (%d/%d)\n", 6317 instance->msix_vectors, (unsigned int)num_online_cpus()); 6318 dev_info(&instance->pdev->dev, 6319 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 6320 6321 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6322 (unsigned long)instance); 6323 6324 /* 6325 * Below are default value for legacy Firmware. 6326 * non-fusion based controllers 6327 */ 6328 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 6329 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 6330 /* Get operational params, sge flags, send init cmd to controller */ 6331 if (instance->instancet->init_adapter(instance)) 6332 goto fail_init_adapter; 6333 6334 if (instance->adapter_type >= VENTURA_SERIES) { 6335 scratch_pad_3 = 6336 megasas_readl(instance, 6337 &instance->reg_set->outbound_scratch_pad_3); 6338 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 6339 MR_DEFAULT_NVME_PAGE_SHIFT) 6340 instance->nvme_page_size = 6341 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 6342 6343 dev_info(&instance->pdev->dev, 6344 "NVME page size\t: (%d)\n", instance->nvme_page_size); 6345 } 6346 6347 if (instance->msix_vectors ? 6348 megasas_setup_irqs_msix(instance, 1) : 6349 megasas_setup_irqs_ioapic(instance)) 6350 goto fail_init_adapter; 6351 6352 if (instance->adapter_type != MFI_SERIES) 6353 megasas_setup_irq_poll(instance); 6354 6355 instance->instancet->enable_intr(instance); 6356 6357 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 6358 6359 megasas_setup_jbod_map(instance); 6360 6361 if (megasas_get_device_list(instance) != SUCCESS) { 6362 dev_err(&instance->pdev->dev, 6363 "%s: megasas_get_device_list failed\n", 6364 __func__); 6365 goto fail_get_ld_pd_list; 6366 } 6367 6368 /* stream detection initialization */ 6369 if (instance->adapter_type >= VENTURA_SERIES) { 6370 fusion->stream_detect_by_ld = 6371 kcalloc(MAX_LOGICAL_DRIVES_EXT, 6372 sizeof(struct LD_STREAM_DETECT *), 6373 GFP_KERNEL); 6374 if (!fusion->stream_detect_by_ld) { 6375 dev_err(&instance->pdev->dev, 6376 "unable to allocate stream detection for pool of LDs\n"); 6377 goto fail_get_ld_pd_list; 6378 } 6379 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 6380 fusion->stream_detect_by_ld[i] = 6381 kzalloc(sizeof(struct LD_STREAM_DETECT), 6382 GFP_KERNEL); 6383 if (!fusion->stream_detect_by_ld[i]) { 6384 dev_err(&instance->pdev->dev, 6385 "unable to allocate stream detect by LD\n "); 6386 for (j = 0; j < i; ++j) 6387 kfree(fusion->stream_detect_by_ld[j]); 6388 kfree(fusion->stream_detect_by_ld); 6389 fusion->stream_detect_by_ld = NULL; 6390 goto fail_get_ld_pd_list; 6391 } 6392 fusion->stream_detect_by_ld[i]->mru_bit_map 6393 = MR_STREAM_BITMAP; 6394 } 6395 } 6396 6397 /* 6398 * Compute the max allowed sectors per IO: The controller info has two 6399 * limits on max sectors. Driver should use the minimum of these two. 6400 * 6401 * 1 << stripe_sz_ops.min = max sectors per strip 6402 * 6403 * Note that older firmwares ( < FW ver 30) didn't report information 6404 * to calculate max_sectors_1. So the number ended up as zero always. 6405 */ 6406 tmp_sectors = 0; 6407 ctrl_info = instance->ctrl_info_buf; 6408 6409 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 6410 le16_to_cpu(ctrl_info->max_strips_per_io); 6411 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 6412 6413 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 6414 6415 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 6416 instance->passive = ctrl_info->cluster.passive; 6417 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 6418 instance->UnevenSpanSupport = 6419 ctrl_info->adapterOperations2.supportUnevenSpans; 6420 if (instance->UnevenSpanSupport) { 6421 struct fusion_context *fusion = instance->ctrl_context; 6422 if (MR_ValidateMapInfo(instance, instance->map_id)) 6423 fusion->fast_path_io = 1; 6424 else 6425 fusion->fast_path_io = 0; 6426 6427 } 6428 if (ctrl_info->host_interface.SRIOV) { 6429 instance->requestorId = ctrl_info->iov.requestorId; 6430 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 6431 if (!ctrl_info->adapterOperations2.activePassive) 6432 instance->PlasmaFW111 = 1; 6433 6434 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 6435 instance->PlasmaFW111 ? "1.11" : "new"); 6436 6437 if (instance->PlasmaFW111) { 6438 iovPtr = (struct IOV_111 *) 6439 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 6440 instance->requestorId = iovPtr->requestorId; 6441 } 6442 } 6443 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 6444 instance->requestorId); 6445 } 6446 6447 instance->crash_dump_fw_support = 6448 ctrl_info->adapterOperations3.supportCrashDump; 6449 instance->crash_dump_drv_support = 6450 (instance->crash_dump_fw_support && 6451 instance->crash_dump_buf); 6452 if (instance->crash_dump_drv_support) 6453 megasas_set_crash_dump_params(instance, 6454 MR_CRASH_BUF_TURN_OFF); 6455 6456 else { 6457 if (instance->crash_dump_buf) 6458 dma_free_coherent(&instance->pdev->dev, 6459 CRASH_DMA_BUF_SIZE, 6460 instance->crash_dump_buf, 6461 instance->crash_dump_h); 6462 instance->crash_dump_buf = NULL; 6463 } 6464 6465 if (instance->snapdump_wait_time) { 6466 megasas_get_snapdump_properties(instance); 6467 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6468 instance->snapdump_wait_time); 6469 } 6470 6471 dev_info(&instance->pdev->dev, 6472 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 6473 le16_to_cpu(ctrl_info->pci.vendor_id), 6474 le16_to_cpu(ctrl_info->pci.device_id), 6475 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 6476 le16_to_cpu(ctrl_info->pci.sub_device_id)); 6477 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 6478 instance->UnevenSpanSupport ? "yes" : "no"); 6479 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 6480 instance->crash_dump_drv_support ? "yes" : "no"); 6481 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", 6482 instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); 6483 6484 instance->max_sectors_per_req = instance->max_num_sge * 6485 SGE_BUFFER_SIZE / 512; 6486 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 6487 instance->max_sectors_per_req = tmp_sectors; 6488 6489 /* Check for valid throttlequeuedepth module parameter */ 6490 if (throttlequeuedepth && 6491 throttlequeuedepth <= instance->max_scsi_cmds) 6492 instance->throttlequeuedepth = throttlequeuedepth; 6493 else 6494 instance->throttlequeuedepth = 6495 MEGASAS_THROTTLE_QUEUE_DEPTH; 6496 6497 if ((resetwaittime < 1) || 6498 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 6499 resetwaittime = MEGASAS_RESET_WAIT_TIME; 6500 6501 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 6502 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 6503 6504 /* Launch SR-IOV heartbeat timer */ 6505 if (instance->requestorId) { 6506 if (!megasas_sriov_start_heartbeat(instance, 1)) { 6507 megasas_start_timer(instance); 6508 } else { 6509 instance->skip_heartbeat_timer_del = 1; 6510 goto fail_get_ld_pd_list; 6511 } 6512 } 6513 6514 /* 6515 * Create and start watchdog thread which will monitor 6516 * controller state every 1 sec and trigger OCR when 6517 * it enters fault state 6518 */ 6519 if (instance->adapter_type != MFI_SERIES) 6520 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 6521 goto fail_start_watchdog; 6522 6523 return 0; 6524 6525 fail_start_watchdog: 6526 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6527 del_timer_sync(&instance->sriov_heartbeat_timer); 6528 fail_get_ld_pd_list: 6529 instance->instancet->disable_intr(instance); 6530 megasas_destroy_irqs(instance); 6531 fail_init_adapter: 6532 if (instance->msix_vectors) 6533 pci_free_irq_vectors(instance->pdev); 6534 instance->msix_vectors = 0; 6535 fail_alloc_dma_buf: 6536 megasas_free_ctrl_dma_buffers(instance); 6537 megasas_free_ctrl_mem(instance); 6538 fail_ready_state: 6539 iounmap(instance->reg_set); 6540 6541 fail_ioremap: 6542 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6543 6544 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6545 __func__, __LINE__); 6546 return -EINVAL; 6547 } 6548 6549 /** 6550 * megasas_release_mfi - Reverses the FW initialization 6551 * @instance: Adapter soft state 6552 */ 6553 static void megasas_release_mfi(struct megasas_instance *instance) 6554 { 6555 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 6556 6557 if (instance->reply_queue) 6558 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 6559 instance->reply_queue, instance->reply_queue_h); 6560 6561 megasas_free_cmds(instance); 6562 6563 iounmap(instance->reg_set); 6564 6565 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6566 } 6567 6568 /** 6569 * megasas_get_seq_num - Gets latest event sequence numbers 6570 * @instance: Adapter soft state 6571 * @eli: FW event log sequence numbers information 6572 * 6573 * FW maintains a log of all events in a non-volatile area. Upper layers would 6574 * usually find out the latest sequence number of the events, the seq number at 6575 * the boot etc. They would "read" all the events below the latest seq number 6576 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 6577 * number), they would subsribe to AEN (asynchronous event notification) and 6578 * wait for the events to happen. 6579 */ 6580 static int 6581 megasas_get_seq_num(struct megasas_instance *instance, 6582 struct megasas_evt_log_info *eli) 6583 { 6584 struct megasas_cmd *cmd; 6585 struct megasas_dcmd_frame *dcmd; 6586 struct megasas_evt_log_info *el_info; 6587 dma_addr_t el_info_h = 0; 6588 int ret; 6589 6590 cmd = megasas_get_cmd(instance); 6591 6592 if (!cmd) { 6593 return -ENOMEM; 6594 } 6595 6596 dcmd = &cmd->frame->dcmd; 6597 el_info = dma_alloc_coherent(&instance->pdev->dev, 6598 sizeof(struct megasas_evt_log_info), 6599 &el_info_h, GFP_KERNEL); 6600 if (!el_info) { 6601 megasas_return_cmd(instance, cmd); 6602 return -ENOMEM; 6603 } 6604 6605 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6606 6607 dcmd->cmd = MFI_CMD_DCMD; 6608 dcmd->cmd_status = 0x0; 6609 dcmd->sge_count = 1; 6610 dcmd->flags = MFI_FRAME_DIR_READ; 6611 dcmd->timeout = 0; 6612 dcmd->pad_0 = 0; 6613 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 6614 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 6615 6616 megasas_set_dma_settings(instance, dcmd, el_info_h, 6617 sizeof(struct megasas_evt_log_info)); 6618 6619 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 6620 if (ret != DCMD_SUCCESS) { 6621 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6622 __func__, __LINE__); 6623 goto dcmd_failed; 6624 } 6625 6626 /* 6627 * Copy the data back into callers buffer 6628 */ 6629 eli->newest_seq_num = el_info->newest_seq_num; 6630 eli->oldest_seq_num = el_info->oldest_seq_num; 6631 eli->clear_seq_num = el_info->clear_seq_num; 6632 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6633 eli->boot_seq_num = el_info->boot_seq_num; 6634 6635 dcmd_failed: 6636 dma_free_coherent(&instance->pdev->dev, 6637 sizeof(struct megasas_evt_log_info), 6638 el_info, el_info_h); 6639 6640 megasas_return_cmd(instance, cmd); 6641 6642 return ret; 6643 } 6644 6645 /** 6646 * megasas_register_aen - Registers for asynchronous event notification 6647 * @instance: Adapter soft state 6648 * @seq_num: The starting sequence number 6649 * @class_locale_word: Class of the event 6650 * 6651 * This function subscribes for AEN for events beyond the @seq_num. It requests 6652 * to be notified if and only if the event is of type @class_locale 6653 */ 6654 static int 6655 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6656 u32 class_locale_word) 6657 { 6658 int ret_val; 6659 struct megasas_cmd *cmd; 6660 struct megasas_dcmd_frame *dcmd; 6661 union megasas_evt_class_locale curr_aen; 6662 union megasas_evt_class_locale prev_aen; 6663 6664 /* 6665 * If there an AEN pending already (aen_cmd), check if the 6666 * class_locale of that pending AEN is inclusive of the new 6667 * AEN request we currently have. If it is, then we don't have 6668 * to do anything. In other words, whichever events the current 6669 * AEN request is subscribing to, have already been subscribed 6670 * to. 6671 * 6672 * If the old_cmd is _not_ inclusive, then we have to abort 6673 * that command, form a class_locale that is superset of both 6674 * old and current and re-issue to the FW 6675 */ 6676 6677 curr_aen.word = class_locale_word; 6678 6679 if (instance->aen_cmd) { 6680 6681 prev_aen.word = 6682 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6683 6684 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6685 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6686 dev_info(&instance->pdev->dev, 6687 "%s %d out of range class %d send by application\n", 6688 __func__, __LINE__, curr_aen.members.class); 6689 return 0; 6690 } 6691 6692 /* 6693 * A class whose enum value is smaller is inclusive of all 6694 * higher values. If a PROGRESS (= -1) was previously 6695 * registered, then a new registration requests for higher 6696 * classes need not be sent to FW. They are automatically 6697 * included. 6698 * 6699 * Locale numbers don't have such hierarchy. They are bitmap 6700 * values 6701 */ 6702 if ((prev_aen.members.class <= curr_aen.members.class) && 6703 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6704 curr_aen.members.locale)) { 6705 /* 6706 * Previously issued event registration includes 6707 * current request. Nothing to do. 6708 */ 6709 return 0; 6710 } else { 6711 curr_aen.members.locale |= prev_aen.members.locale; 6712 6713 if (prev_aen.members.class < curr_aen.members.class) 6714 curr_aen.members.class = prev_aen.members.class; 6715 6716 instance->aen_cmd->abort_aen = 1; 6717 ret_val = megasas_issue_blocked_abort_cmd(instance, 6718 instance-> 6719 aen_cmd, 30); 6720 6721 if (ret_val) { 6722 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6723 "previous AEN command\n"); 6724 return ret_val; 6725 } 6726 } 6727 } 6728 6729 cmd = megasas_get_cmd(instance); 6730 6731 if (!cmd) 6732 return -ENOMEM; 6733 6734 dcmd = &cmd->frame->dcmd; 6735 6736 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6737 6738 /* 6739 * Prepare DCMD for aen registration 6740 */ 6741 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6742 6743 dcmd->cmd = MFI_CMD_DCMD; 6744 dcmd->cmd_status = 0x0; 6745 dcmd->sge_count = 1; 6746 dcmd->flags = MFI_FRAME_DIR_READ; 6747 dcmd->timeout = 0; 6748 dcmd->pad_0 = 0; 6749 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6750 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6751 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6752 instance->last_seq_num = seq_num; 6753 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6754 6755 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6756 sizeof(struct megasas_evt_detail)); 6757 6758 if (instance->aen_cmd != NULL) { 6759 megasas_return_cmd(instance, cmd); 6760 return 0; 6761 } 6762 6763 /* 6764 * Store reference to the cmd used to register for AEN. When an 6765 * application wants us to register for AEN, we have to abort this 6766 * cmd and re-register with a new EVENT LOCALE supplied by that app 6767 */ 6768 instance->aen_cmd = cmd; 6769 6770 /* 6771 * Issue the aen registration frame 6772 */ 6773 instance->instancet->issue_dcmd(instance, cmd); 6774 6775 return 0; 6776 } 6777 6778 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6779 * 6780 * This DCMD will fetch few properties of LD/system PD defined 6781 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6782 * 6783 * DCMD send by drivers whenever new target is added to the OS. 6784 * 6785 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6786 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6787 * 0 = system PD, 1 = LD. 6788 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6789 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6790 * 6791 * @instance: Adapter soft state 6792 * @sdev: OS provided scsi device 6793 * 6794 * Returns 0 on success non-zero on failure. 6795 */ 6796 int 6797 megasas_get_target_prop(struct megasas_instance *instance, 6798 struct scsi_device *sdev) 6799 { 6800 int ret; 6801 struct megasas_cmd *cmd; 6802 struct megasas_dcmd_frame *dcmd; 6803 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 6804 sdev->id; 6805 6806 cmd = megasas_get_cmd(instance); 6807 6808 if (!cmd) { 6809 dev_err(&instance->pdev->dev, 6810 "Failed to get cmd %s\n", __func__); 6811 return -ENOMEM; 6812 } 6813 6814 dcmd = &cmd->frame->dcmd; 6815 6816 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6817 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6818 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6819 6820 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6821 dcmd->cmd = MFI_CMD_DCMD; 6822 dcmd->cmd_status = 0xFF; 6823 dcmd->sge_count = 1; 6824 dcmd->flags = MFI_FRAME_DIR_READ; 6825 dcmd->timeout = 0; 6826 dcmd->pad_0 = 0; 6827 dcmd->data_xfer_len = 6828 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6829 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6830 6831 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6832 sizeof(struct MR_TARGET_PROPERTIES)); 6833 6834 if ((instance->adapter_type != MFI_SERIES) && 6835 !instance->mask_interrupts) 6836 ret = megasas_issue_blocked_cmd(instance, 6837 cmd, MFI_IO_TIMEOUT_SECS); 6838 else 6839 ret = megasas_issue_polled(instance, cmd); 6840 6841 switch (ret) { 6842 case DCMD_TIMEOUT: 6843 switch (dcmd_timeout_ocr_possible(instance)) { 6844 case INITIATE_OCR: 6845 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6846 mutex_unlock(&instance->reset_mutex); 6847 megasas_reset_fusion(instance->host, 6848 MFI_IO_TIMEOUT_OCR); 6849 mutex_lock(&instance->reset_mutex); 6850 break; 6851 case KILL_ADAPTER: 6852 megaraid_sas_kill_hba(instance); 6853 break; 6854 case IGNORE_TIMEOUT: 6855 dev_info(&instance->pdev->dev, 6856 "Ignore DCMD timeout: %s %d\n", 6857 __func__, __LINE__); 6858 break; 6859 } 6860 break; 6861 6862 default: 6863 megasas_return_cmd(instance, cmd); 6864 } 6865 if (ret != DCMD_SUCCESS) 6866 dev_err(&instance->pdev->dev, 6867 "return from %s %d return value %d\n", 6868 __func__, __LINE__, ret); 6869 6870 return ret; 6871 } 6872 6873 /** 6874 * megasas_start_aen - Subscribes to AEN during driver load time 6875 * @instance: Adapter soft state 6876 */ 6877 static int megasas_start_aen(struct megasas_instance *instance) 6878 { 6879 struct megasas_evt_log_info eli; 6880 union megasas_evt_class_locale class_locale; 6881 6882 /* 6883 * Get the latest sequence number from FW 6884 */ 6885 memset(&eli, 0, sizeof(eli)); 6886 6887 if (megasas_get_seq_num(instance, &eli)) 6888 return -1; 6889 6890 /* 6891 * Register AEN with FW for latest sequence number plus 1 6892 */ 6893 class_locale.members.reserved = 0; 6894 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6895 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6896 6897 return megasas_register_aen(instance, 6898 le32_to_cpu(eli.newest_seq_num) + 1, 6899 class_locale.word); 6900 } 6901 6902 /** 6903 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6904 * @instance: Adapter soft state 6905 */ 6906 static int megasas_io_attach(struct megasas_instance *instance) 6907 { 6908 struct Scsi_Host *host = instance->host; 6909 6910 /* 6911 * Export parameters required by SCSI mid-layer 6912 */ 6913 host->unique_id = instance->unique_id; 6914 host->can_queue = instance->max_scsi_cmds; 6915 host->this_id = instance->init_id; 6916 host->sg_tablesize = instance->max_num_sge; 6917 6918 if (instance->fw_support_ieee) 6919 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6920 6921 /* 6922 * Check if the module parameter value for max_sectors can be used 6923 */ 6924 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6925 instance->max_sectors_per_req = max_sectors; 6926 else { 6927 if (max_sectors) { 6928 if (((instance->pdev->device == 6929 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6930 (instance->pdev->device == 6931 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6932 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6933 instance->max_sectors_per_req = max_sectors; 6934 } else { 6935 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6936 "and <= %d (or < 1MB for GEN2 controller)\n", 6937 instance->max_sectors_per_req); 6938 } 6939 } 6940 } 6941 6942 host->max_sectors = instance->max_sectors_per_req; 6943 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6944 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6945 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6946 host->max_lun = MEGASAS_MAX_LUN; 6947 host->max_cmd_len = 16; 6948 6949 /* Use shared host tagset only for fusion adaptors 6950 * if there are managed interrupts (smp affinity enabled case). 6951 * Single msix_vectors in kdump, so shared host tag is also disabled. 6952 */ 6953 6954 host->host_tagset = 0; 6955 host->nr_hw_queues = 1; 6956 6957 if ((instance->adapter_type != MFI_SERIES) && 6958 (instance->msix_vectors > instance->low_latency_index_start) && 6959 host_tagset_enable && 6960 instance->smp_affinity_enable) { 6961 host->host_tagset = 1; 6962 host->nr_hw_queues = instance->msix_vectors - 6963 instance->low_latency_index_start + instance->iopoll_q_count; 6964 if (instance->iopoll_q_count) 6965 host->nr_maps = 3; 6966 } else { 6967 instance->iopoll_q_count = 0; 6968 } 6969 6970 dev_info(&instance->pdev->dev, 6971 "Max firmware commands: %d shared with default " 6972 "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, 6973 host->nr_hw_queues - instance->iopoll_q_count, 6974 instance->iopoll_q_count); 6975 /* 6976 * Notify the mid-layer about the new controller 6977 */ 6978 if (scsi_add_host(host, &instance->pdev->dev)) { 6979 dev_err(&instance->pdev->dev, 6980 "Failed to add host from %s %d\n", 6981 __func__, __LINE__); 6982 return -ENODEV; 6983 } 6984 6985 return 0; 6986 } 6987 6988 /** 6989 * megasas_set_dma_mask - Set DMA mask for supported controllers 6990 * 6991 * @instance: Adapter soft state 6992 * Description: 6993 * 6994 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6995 * 6996 * For invader- 6997 * By default, driver/FW will operate in 32bit DMA addresses 6998 * for consistent DMA mapping but if 32 bit consistent 6999 * DMA mask fails, driver will try with 63 bit consistent 7000 * mask provided FW is true 63bit DMA capable 7001 * 7002 * For older controllers(Thunderbolt and MFI based adapters)- 7003 * driver/FW will operate in 32 bit consistent DMA addresses. 7004 */ 7005 static int 7006 megasas_set_dma_mask(struct megasas_instance *instance) 7007 { 7008 u64 consistent_mask; 7009 struct pci_dev *pdev; 7010 u32 scratch_pad_1; 7011 7012 pdev = instance->pdev; 7013 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 7014 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 7015 7016 if (IS_DMA64) { 7017 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 7018 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7019 goto fail_set_dma_mask; 7020 7021 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 7022 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 7023 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 7024 /* 7025 * If 32 bit DMA mask fails, then try for 64 bit mask 7026 * for FW capable of handling 64 bit DMA. 7027 */ 7028 scratch_pad_1 = megasas_readl 7029 (instance, &instance->reg_set->outbound_scratch_pad_1); 7030 7031 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 7032 goto fail_set_dma_mask; 7033 else if (dma_set_mask_and_coherent(&pdev->dev, 7034 DMA_BIT_MASK(63))) 7035 goto fail_set_dma_mask; 7036 } 7037 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7038 goto fail_set_dma_mask; 7039 7040 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 7041 instance->consistent_mask_64bit = false; 7042 else 7043 instance->consistent_mask_64bit = true; 7044 7045 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 7046 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 7047 (instance->consistent_mask_64bit ? "63" : "32")); 7048 7049 return 0; 7050 7051 fail_set_dma_mask: 7052 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 7053 return -1; 7054 7055 } 7056 7057 /* 7058 * megasas_set_adapter_type - Set adapter type. 7059 * Supported controllers can be divided in 7060 * different categories- 7061 * enum MR_ADAPTER_TYPE { 7062 * MFI_SERIES = 1, 7063 * THUNDERBOLT_SERIES = 2, 7064 * INVADER_SERIES = 3, 7065 * VENTURA_SERIES = 4, 7066 * AERO_SERIES = 5, 7067 * }; 7068 * @instance: Adapter soft state 7069 * return: void 7070 */ 7071 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 7072 { 7073 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 7074 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 7075 instance->adapter_type = MFI_SERIES; 7076 } else { 7077 switch (instance->pdev->device) { 7078 case PCI_DEVICE_ID_LSI_AERO_10E1: 7079 case PCI_DEVICE_ID_LSI_AERO_10E2: 7080 case PCI_DEVICE_ID_LSI_AERO_10E5: 7081 case PCI_DEVICE_ID_LSI_AERO_10E6: 7082 instance->adapter_type = AERO_SERIES; 7083 break; 7084 case PCI_DEVICE_ID_LSI_VENTURA: 7085 case PCI_DEVICE_ID_LSI_CRUSADER: 7086 case PCI_DEVICE_ID_LSI_HARPOON: 7087 case PCI_DEVICE_ID_LSI_TOMCAT: 7088 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 7089 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 7090 instance->adapter_type = VENTURA_SERIES; 7091 break; 7092 case PCI_DEVICE_ID_LSI_FUSION: 7093 case PCI_DEVICE_ID_LSI_PLASMA: 7094 instance->adapter_type = THUNDERBOLT_SERIES; 7095 break; 7096 case PCI_DEVICE_ID_LSI_INVADER: 7097 case PCI_DEVICE_ID_LSI_INTRUDER: 7098 case PCI_DEVICE_ID_LSI_INTRUDER_24: 7099 case PCI_DEVICE_ID_LSI_CUTLASS_52: 7100 case PCI_DEVICE_ID_LSI_CUTLASS_53: 7101 case PCI_DEVICE_ID_LSI_FURY: 7102 instance->adapter_type = INVADER_SERIES; 7103 break; 7104 default: /* For all other supported controllers */ 7105 instance->adapter_type = MFI_SERIES; 7106 break; 7107 } 7108 } 7109 } 7110 7111 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 7112 { 7113 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 7114 sizeof(u32), &instance->producer_h, GFP_KERNEL); 7115 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 7116 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 7117 7118 if (!instance->producer || !instance->consumer) { 7119 dev_err(&instance->pdev->dev, 7120 "Failed to allocate memory for producer, consumer\n"); 7121 return -1; 7122 } 7123 7124 *instance->producer = 0; 7125 *instance->consumer = 0; 7126 return 0; 7127 } 7128 7129 /** 7130 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 7131 * structures which are not common across MFI 7132 * adapters and fusion adapters. 7133 * For MFI based adapters, allocate producer and 7134 * consumer buffers. For fusion adapters, allocate 7135 * memory for fusion context. 7136 * @instance: Adapter soft state 7137 * return: 0 for SUCCESS 7138 */ 7139 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 7140 { 7141 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 7142 GFP_KERNEL); 7143 if (!instance->reply_map) 7144 return -ENOMEM; 7145 7146 switch (instance->adapter_type) { 7147 case MFI_SERIES: 7148 if (megasas_alloc_mfi_ctrl_mem(instance)) 7149 return -ENOMEM; 7150 break; 7151 case AERO_SERIES: 7152 case VENTURA_SERIES: 7153 case THUNDERBOLT_SERIES: 7154 case INVADER_SERIES: 7155 if (megasas_alloc_fusion_context(instance)) 7156 return -ENOMEM; 7157 break; 7158 } 7159 7160 return 0; 7161 } 7162 7163 /* 7164 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 7165 * producer, consumer buffers for MFI adapters 7166 * 7167 * @instance - Adapter soft instance 7168 * 7169 */ 7170 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 7171 { 7172 kfree(instance->reply_map); 7173 if (instance->adapter_type == MFI_SERIES) { 7174 if (instance->producer) 7175 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7176 instance->producer, 7177 instance->producer_h); 7178 if (instance->consumer) 7179 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7180 instance->consumer, 7181 instance->consumer_h); 7182 } else { 7183 megasas_free_fusion_context(instance); 7184 } 7185 } 7186 7187 /** 7188 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 7189 * driver load time 7190 * 7191 * @instance: Adapter soft instance 7192 * 7193 * @return: O for SUCCESS 7194 */ 7195 static inline 7196 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 7197 { 7198 struct pci_dev *pdev = instance->pdev; 7199 struct fusion_context *fusion = instance->ctrl_context; 7200 7201 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 7202 sizeof(struct megasas_evt_detail), 7203 &instance->evt_detail_h, GFP_KERNEL); 7204 7205 if (!instance->evt_detail) { 7206 dev_err(&instance->pdev->dev, 7207 "Failed to allocate event detail buffer\n"); 7208 return -ENOMEM; 7209 } 7210 7211 if (fusion) { 7212 fusion->ioc_init_request = 7213 dma_alloc_coherent(&pdev->dev, 7214 sizeof(struct MPI2_IOC_INIT_REQUEST), 7215 &fusion->ioc_init_request_phys, 7216 GFP_KERNEL); 7217 7218 if (!fusion->ioc_init_request) { 7219 dev_err(&pdev->dev, 7220 "Failed to allocate ioc init request\n"); 7221 return -ENOMEM; 7222 } 7223 7224 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 7225 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7226 &instance->snapdump_prop_h, GFP_KERNEL); 7227 7228 if (!instance->snapdump_prop) 7229 dev_err(&pdev->dev, 7230 "Failed to allocate snapdump properties buffer\n"); 7231 7232 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 7233 HOST_DEVICE_LIST_SZ, 7234 &instance->host_device_list_buf_h, 7235 GFP_KERNEL); 7236 7237 if (!instance->host_device_list_buf) { 7238 dev_err(&pdev->dev, 7239 "Failed to allocate targetid list buffer\n"); 7240 return -ENOMEM; 7241 } 7242 7243 } 7244 7245 instance->pd_list_buf = 7246 dma_alloc_coherent(&pdev->dev, 7247 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7248 &instance->pd_list_buf_h, GFP_KERNEL); 7249 7250 if (!instance->pd_list_buf) { 7251 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 7252 return -ENOMEM; 7253 } 7254 7255 instance->ctrl_info_buf = 7256 dma_alloc_coherent(&pdev->dev, 7257 sizeof(struct megasas_ctrl_info), 7258 &instance->ctrl_info_buf_h, GFP_KERNEL); 7259 7260 if (!instance->ctrl_info_buf) { 7261 dev_err(&pdev->dev, 7262 "Failed to allocate controller info buffer\n"); 7263 return -ENOMEM; 7264 } 7265 7266 instance->ld_list_buf = 7267 dma_alloc_coherent(&pdev->dev, 7268 sizeof(struct MR_LD_LIST), 7269 &instance->ld_list_buf_h, GFP_KERNEL); 7270 7271 if (!instance->ld_list_buf) { 7272 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 7273 return -ENOMEM; 7274 } 7275 7276 instance->ld_targetid_list_buf = 7277 dma_alloc_coherent(&pdev->dev, 7278 sizeof(struct MR_LD_TARGETID_LIST), 7279 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 7280 7281 if (!instance->ld_targetid_list_buf) { 7282 dev_err(&pdev->dev, 7283 "Failed to allocate LD targetid list buffer\n"); 7284 return -ENOMEM; 7285 } 7286 7287 if (!reset_devices) { 7288 instance->system_info_buf = 7289 dma_alloc_coherent(&pdev->dev, 7290 sizeof(struct MR_DRV_SYSTEM_INFO), 7291 &instance->system_info_h, GFP_KERNEL); 7292 instance->pd_info = 7293 dma_alloc_coherent(&pdev->dev, 7294 sizeof(struct MR_PD_INFO), 7295 &instance->pd_info_h, GFP_KERNEL); 7296 instance->tgt_prop = 7297 dma_alloc_coherent(&pdev->dev, 7298 sizeof(struct MR_TARGET_PROPERTIES), 7299 &instance->tgt_prop_h, GFP_KERNEL); 7300 instance->crash_dump_buf = 7301 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7302 &instance->crash_dump_h, GFP_KERNEL); 7303 7304 if (!instance->system_info_buf) 7305 dev_err(&instance->pdev->dev, 7306 "Failed to allocate system info buffer\n"); 7307 7308 if (!instance->pd_info) 7309 dev_err(&instance->pdev->dev, 7310 "Failed to allocate pd_info buffer\n"); 7311 7312 if (!instance->tgt_prop) 7313 dev_err(&instance->pdev->dev, 7314 "Failed to allocate tgt_prop buffer\n"); 7315 7316 if (!instance->crash_dump_buf) 7317 dev_err(&instance->pdev->dev, 7318 "Failed to allocate crash dump buffer\n"); 7319 } 7320 7321 return 0; 7322 } 7323 7324 /* 7325 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 7326 * during driver load time 7327 * 7328 * @instance- Adapter soft instance 7329 * 7330 */ 7331 static inline 7332 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 7333 { 7334 struct pci_dev *pdev = instance->pdev; 7335 struct fusion_context *fusion = instance->ctrl_context; 7336 7337 if (instance->evt_detail) 7338 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 7339 instance->evt_detail, 7340 instance->evt_detail_h); 7341 7342 if (fusion && fusion->ioc_init_request) 7343 dma_free_coherent(&pdev->dev, 7344 sizeof(struct MPI2_IOC_INIT_REQUEST), 7345 fusion->ioc_init_request, 7346 fusion->ioc_init_request_phys); 7347 7348 if (instance->pd_list_buf) 7349 dma_free_coherent(&pdev->dev, 7350 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7351 instance->pd_list_buf, 7352 instance->pd_list_buf_h); 7353 7354 if (instance->ld_list_buf) 7355 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 7356 instance->ld_list_buf, 7357 instance->ld_list_buf_h); 7358 7359 if (instance->ld_targetid_list_buf) 7360 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 7361 instance->ld_targetid_list_buf, 7362 instance->ld_targetid_list_buf_h); 7363 7364 if (instance->ctrl_info_buf) 7365 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 7366 instance->ctrl_info_buf, 7367 instance->ctrl_info_buf_h); 7368 7369 if (instance->system_info_buf) 7370 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 7371 instance->system_info_buf, 7372 instance->system_info_h); 7373 7374 if (instance->pd_info) 7375 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 7376 instance->pd_info, instance->pd_info_h); 7377 7378 if (instance->tgt_prop) 7379 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 7380 instance->tgt_prop, instance->tgt_prop_h); 7381 7382 if (instance->crash_dump_buf) 7383 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7384 instance->crash_dump_buf, 7385 instance->crash_dump_h); 7386 7387 if (instance->snapdump_prop) 7388 dma_free_coherent(&pdev->dev, 7389 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7390 instance->snapdump_prop, 7391 instance->snapdump_prop_h); 7392 7393 if (instance->host_device_list_buf) 7394 dma_free_coherent(&pdev->dev, 7395 HOST_DEVICE_LIST_SZ, 7396 instance->host_device_list_buf, 7397 instance->host_device_list_buf_h); 7398 7399 } 7400 7401 /* 7402 * megasas_init_ctrl_params - Initialize controller's instance 7403 * parameters before FW init 7404 * @instance - Adapter soft instance 7405 * @return - void 7406 */ 7407 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 7408 { 7409 instance->fw_crash_state = UNAVAILABLE; 7410 7411 megasas_poll_wait_aen = 0; 7412 instance->issuepend_done = 1; 7413 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 7414 7415 /* 7416 * Initialize locks and queues 7417 */ 7418 INIT_LIST_HEAD(&instance->cmd_pool); 7419 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 7420 7421 atomic_set(&instance->fw_outstanding, 0); 7422 atomic64_set(&instance->total_io_count, 0); 7423 7424 init_waitqueue_head(&instance->int_cmd_wait_q); 7425 init_waitqueue_head(&instance->abort_cmd_wait_q); 7426 7427 mutex_init(&instance->crashdump_lock); 7428 spin_lock_init(&instance->mfi_pool_lock); 7429 spin_lock_init(&instance->hba_lock); 7430 spin_lock_init(&instance->stream_lock); 7431 spin_lock_init(&instance->completion_lock); 7432 7433 mutex_init(&instance->reset_mutex); 7434 7435 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 7436 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 7437 instance->flag_ieee = 1; 7438 7439 instance->flag = 0; 7440 instance->unload = 1; 7441 instance->last_time = 0; 7442 instance->disableOnlineCtrlReset = 1; 7443 instance->UnevenSpanSupport = 0; 7444 instance->smp_affinity_enable = smp_affinity_enable ? true : false; 7445 instance->msix_load_balance = false; 7446 7447 if (instance->adapter_type != MFI_SERIES) 7448 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 7449 else 7450 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 7451 } 7452 7453 /** 7454 * megasas_probe_one - PCI hotplug entry point 7455 * @pdev: PCI device structure 7456 * @id: PCI ids of supported hotplugged adapter 7457 */ 7458 static int megasas_probe_one(struct pci_dev *pdev, 7459 const struct pci_device_id *id) 7460 { 7461 int rval, pos; 7462 struct Scsi_Host *host; 7463 struct megasas_instance *instance; 7464 u16 control = 0; 7465 7466 switch (pdev->device) { 7467 case PCI_DEVICE_ID_LSI_AERO_10E0: 7468 case PCI_DEVICE_ID_LSI_AERO_10E3: 7469 case PCI_DEVICE_ID_LSI_AERO_10E4: 7470 case PCI_DEVICE_ID_LSI_AERO_10E7: 7471 dev_err(&pdev->dev, "Adapter is in non secure mode\n"); 7472 return 1; 7473 case PCI_DEVICE_ID_LSI_AERO_10E1: 7474 case PCI_DEVICE_ID_LSI_AERO_10E5: 7475 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 7476 break; 7477 } 7478 7479 /* Reset MSI-X in the kdump kernel */ 7480 if (reset_devices) { 7481 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 7482 if (pos) { 7483 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 7484 &control); 7485 if (control & PCI_MSIX_FLAGS_ENABLE) { 7486 dev_info(&pdev->dev, "resetting MSI-X\n"); 7487 pci_write_config_word(pdev, 7488 pos + PCI_MSIX_FLAGS, 7489 control & 7490 ~PCI_MSIX_FLAGS_ENABLE); 7491 } 7492 } 7493 } 7494 7495 /* 7496 * PCI prepping: enable device set bus mastering and dma mask 7497 */ 7498 rval = pci_enable_device_mem(pdev); 7499 7500 if (rval) { 7501 return rval; 7502 } 7503 7504 pci_set_master(pdev); 7505 7506 host = scsi_host_alloc(&megasas_template, 7507 sizeof(struct megasas_instance)); 7508 7509 if (!host) { 7510 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 7511 goto fail_alloc_instance; 7512 } 7513 7514 instance = (struct megasas_instance *)host->hostdata; 7515 memset(instance, 0, sizeof(*instance)); 7516 atomic_set(&instance->fw_reset_no_pci_access, 0); 7517 7518 /* 7519 * Initialize PCI related and misc parameters 7520 */ 7521 instance->pdev = pdev; 7522 instance->host = host; 7523 instance->unique_id = pci_dev_id(pdev); 7524 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 7525 7526 megasas_set_adapter_type(instance); 7527 7528 /* 7529 * Initialize MFI Firmware 7530 */ 7531 if (megasas_init_fw(instance)) 7532 goto fail_init_mfi; 7533 7534 if (instance->requestorId) { 7535 if (instance->PlasmaFW111) { 7536 instance->vf_affiliation_111 = 7537 dma_alloc_coherent(&pdev->dev, 7538 sizeof(struct MR_LD_VF_AFFILIATION_111), 7539 &instance->vf_affiliation_111_h, 7540 GFP_KERNEL); 7541 if (!instance->vf_affiliation_111) 7542 dev_warn(&pdev->dev, "Can't allocate " 7543 "memory for VF affiliation buffer\n"); 7544 } else { 7545 instance->vf_affiliation = 7546 dma_alloc_coherent(&pdev->dev, 7547 (MAX_LOGICAL_DRIVES + 1) * 7548 sizeof(struct MR_LD_VF_AFFILIATION), 7549 &instance->vf_affiliation_h, 7550 GFP_KERNEL); 7551 if (!instance->vf_affiliation) 7552 dev_warn(&pdev->dev, "Can't allocate " 7553 "memory for VF affiliation buffer\n"); 7554 } 7555 } 7556 7557 /* 7558 * Store instance in PCI softstate 7559 */ 7560 pci_set_drvdata(pdev, instance); 7561 7562 /* 7563 * Add this controller to megasas_mgmt_info structure so that it 7564 * can be exported to management applications 7565 */ 7566 megasas_mgmt_info.count++; 7567 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 7568 megasas_mgmt_info.max_index++; 7569 7570 /* 7571 * Register with SCSI mid-layer 7572 */ 7573 if (megasas_io_attach(instance)) 7574 goto fail_io_attach; 7575 7576 instance->unload = 0; 7577 /* 7578 * Trigger SCSI to scan our drives 7579 */ 7580 if (!instance->enable_fw_dev_list || 7581 (instance->host_device_list_buf->count > 0)) 7582 scsi_scan_host(host); 7583 7584 /* 7585 * Initiate AEN (Asynchronous Event Notification) 7586 */ 7587 if (megasas_start_aen(instance)) { 7588 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 7589 goto fail_start_aen; 7590 } 7591 7592 megasas_setup_debugfs(instance); 7593 7594 /* Get current SR-IOV LD/VF affiliation */ 7595 if (instance->requestorId) 7596 megasas_get_ld_vf_affiliation(instance, 1); 7597 7598 return 0; 7599 7600 fail_start_aen: 7601 instance->unload = 1; 7602 scsi_remove_host(instance->host); 7603 fail_io_attach: 7604 megasas_mgmt_info.count--; 7605 megasas_mgmt_info.max_index--; 7606 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7607 7608 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7609 del_timer_sync(&instance->sriov_heartbeat_timer); 7610 7611 instance->instancet->disable_intr(instance); 7612 megasas_destroy_irqs(instance); 7613 7614 if (instance->adapter_type != MFI_SERIES) 7615 megasas_release_fusion(instance); 7616 else 7617 megasas_release_mfi(instance); 7618 7619 if (instance->msix_vectors) 7620 pci_free_irq_vectors(instance->pdev); 7621 instance->msix_vectors = 0; 7622 7623 if (instance->fw_crash_state != UNAVAILABLE) 7624 megasas_free_host_crash_buffer(instance); 7625 7626 if (instance->adapter_type != MFI_SERIES) 7627 megasas_fusion_stop_watchdog(instance); 7628 fail_init_mfi: 7629 scsi_host_put(host); 7630 fail_alloc_instance: 7631 pci_disable_device(pdev); 7632 7633 return -ENODEV; 7634 } 7635 7636 /** 7637 * megasas_flush_cache - Requests FW to flush all its caches 7638 * @instance: Adapter soft state 7639 */ 7640 static void megasas_flush_cache(struct megasas_instance *instance) 7641 { 7642 struct megasas_cmd *cmd; 7643 struct megasas_dcmd_frame *dcmd; 7644 7645 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7646 return; 7647 7648 cmd = megasas_get_cmd(instance); 7649 7650 if (!cmd) 7651 return; 7652 7653 dcmd = &cmd->frame->dcmd; 7654 7655 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7656 7657 dcmd->cmd = MFI_CMD_DCMD; 7658 dcmd->cmd_status = 0x0; 7659 dcmd->sge_count = 0; 7660 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7661 dcmd->timeout = 0; 7662 dcmd->pad_0 = 0; 7663 dcmd->data_xfer_len = 0; 7664 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 7665 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 7666 7667 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7668 != DCMD_SUCCESS) { 7669 dev_err(&instance->pdev->dev, 7670 "return from %s %d\n", __func__, __LINE__); 7671 return; 7672 } 7673 7674 megasas_return_cmd(instance, cmd); 7675 } 7676 7677 /** 7678 * megasas_shutdown_controller - Instructs FW to shutdown the controller 7679 * @instance: Adapter soft state 7680 * @opcode: Shutdown/Hibernate 7681 */ 7682 static void megasas_shutdown_controller(struct megasas_instance *instance, 7683 u32 opcode) 7684 { 7685 struct megasas_cmd *cmd; 7686 struct megasas_dcmd_frame *dcmd; 7687 7688 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7689 return; 7690 7691 cmd = megasas_get_cmd(instance); 7692 7693 if (!cmd) 7694 return; 7695 7696 if (instance->aen_cmd) 7697 megasas_issue_blocked_abort_cmd(instance, 7698 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7699 if (instance->map_update_cmd) 7700 megasas_issue_blocked_abort_cmd(instance, 7701 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7702 if (instance->jbod_seq_cmd) 7703 megasas_issue_blocked_abort_cmd(instance, 7704 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7705 7706 dcmd = &cmd->frame->dcmd; 7707 7708 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7709 7710 dcmd->cmd = MFI_CMD_DCMD; 7711 dcmd->cmd_status = 0x0; 7712 dcmd->sge_count = 0; 7713 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7714 dcmd->timeout = 0; 7715 dcmd->pad_0 = 0; 7716 dcmd->data_xfer_len = 0; 7717 dcmd->opcode = cpu_to_le32(opcode); 7718 7719 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7720 != DCMD_SUCCESS) { 7721 dev_err(&instance->pdev->dev, 7722 "return from %s %d\n", __func__, __LINE__); 7723 return; 7724 } 7725 7726 megasas_return_cmd(instance, cmd); 7727 } 7728 7729 /** 7730 * megasas_suspend - driver suspend entry point 7731 * @dev: Device structure 7732 */ 7733 static int __maybe_unused 7734 megasas_suspend(struct device *dev) 7735 { 7736 struct megasas_instance *instance; 7737 7738 instance = dev_get_drvdata(dev); 7739 7740 if (!instance) 7741 return 0; 7742 7743 instance->unload = 1; 7744 7745 dev_info(dev, "%s is called\n", __func__); 7746 7747 /* Shutdown SR-IOV heartbeat timer */ 7748 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7749 del_timer_sync(&instance->sriov_heartbeat_timer); 7750 7751 /* Stop the FW fault detection watchdog */ 7752 if (instance->adapter_type != MFI_SERIES) 7753 megasas_fusion_stop_watchdog(instance); 7754 7755 megasas_flush_cache(instance); 7756 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7757 7758 /* cancel the delayed work if this work still in queue */ 7759 if (instance->ev != NULL) { 7760 struct megasas_aen_event *ev = instance->ev; 7761 cancel_delayed_work_sync(&ev->hotplug_work); 7762 instance->ev = NULL; 7763 } 7764 7765 tasklet_kill(&instance->isr_tasklet); 7766 7767 pci_set_drvdata(instance->pdev, instance); 7768 instance->instancet->disable_intr(instance); 7769 7770 megasas_destroy_irqs(instance); 7771 7772 if (instance->msix_vectors) 7773 pci_free_irq_vectors(instance->pdev); 7774 7775 return 0; 7776 } 7777 7778 /** 7779 * megasas_resume- driver resume entry point 7780 * @dev: Device structure 7781 */ 7782 static int __maybe_unused 7783 megasas_resume(struct device *dev) 7784 { 7785 int rval; 7786 struct Scsi_Host *host; 7787 struct megasas_instance *instance; 7788 u32 status_reg; 7789 7790 instance = dev_get_drvdata(dev); 7791 7792 if (!instance) 7793 return 0; 7794 7795 host = instance->host; 7796 7797 dev_info(dev, "%s is called\n", __func__); 7798 7799 /* 7800 * We expect the FW state to be READY 7801 */ 7802 7803 if (megasas_transition_to_ready(instance, 0)) { 7804 dev_info(&instance->pdev->dev, 7805 "Failed to transition controller to ready from %s!\n", 7806 __func__); 7807 if (instance->adapter_type != MFI_SERIES) { 7808 status_reg = 7809 instance->instancet->read_fw_status_reg(instance); 7810 if (!(status_reg & MFI_RESET_ADAPTER) || 7811 ((megasas_adp_reset_wait_for_ready 7812 (instance, true, 0)) == FAILED)) 7813 goto fail_ready_state; 7814 } else { 7815 atomic_set(&instance->fw_reset_no_pci_access, 1); 7816 instance->instancet->adp_reset 7817 (instance, instance->reg_set); 7818 atomic_set(&instance->fw_reset_no_pci_access, 0); 7819 7820 /* waiting for about 30 seconds before retry */ 7821 ssleep(30); 7822 7823 if (megasas_transition_to_ready(instance, 0)) 7824 goto fail_ready_state; 7825 } 7826 7827 dev_info(&instance->pdev->dev, 7828 "FW restarted successfully from %s!\n", 7829 __func__); 7830 } 7831 if (megasas_set_dma_mask(instance)) 7832 goto fail_set_dma_mask; 7833 7834 /* 7835 * Initialize MFI Firmware 7836 */ 7837 7838 atomic_set(&instance->fw_outstanding, 0); 7839 atomic_set(&instance->ldio_outstanding, 0); 7840 7841 /* Now re-enable MSI-X */ 7842 if (instance->msix_vectors) 7843 megasas_alloc_irq_vectors(instance); 7844 7845 if (!instance->msix_vectors) { 7846 rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, 7847 PCI_IRQ_INTX); 7848 if (rval < 0) 7849 goto fail_reenable_msix; 7850 } 7851 7852 megasas_setup_reply_map(instance); 7853 7854 if (instance->adapter_type != MFI_SERIES) { 7855 megasas_reset_reply_desc(instance); 7856 if (megasas_ioc_init_fusion(instance)) { 7857 megasas_free_cmds(instance); 7858 megasas_free_cmds_fusion(instance); 7859 goto fail_init_mfi; 7860 } 7861 if (!megasas_get_map_info(instance)) 7862 megasas_sync_map_info(instance); 7863 } else { 7864 *instance->producer = 0; 7865 *instance->consumer = 0; 7866 if (megasas_issue_init_mfi(instance)) 7867 goto fail_init_mfi; 7868 } 7869 7870 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7871 goto fail_init_mfi; 7872 7873 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7874 (unsigned long)instance); 7875 7876 if (instance->msix_vectors ? 7877 megasas_setup_irqs_msix(instance, 0) : 7878 megasas_setup_irqs_ioapic(instance)) 7879 goto fail_init_mfi; 7880 7881 if (instance->adapter_type != MFI_SERIES) 7882 megasas_setup_irq_poll(instance); 7883 7884 /* Re-launch SR-IOV heartbeat timer */ 7885 if (instance->requestorId) { 7886 if (!megasas_sriov_start_heartbeat(instance, 0)) 7887 megasas_start_timer(instance); 7888 else { 7889 instance->skip_heartbeat_timer_del = 1; 7890 goto fail_init_mfi; 7891 } 7892 } 7893 7894 instance->instancet->enable_intr(instance); 7895 megasas_setup_jbod_map(instance); 7896 instance->unload = 0; 7897 7898 /* 7899 * Initiate AEN (Asynchronous Event Notification) 7900 */ 7901 if (megasas_start_aen(instance)) 7902 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7903 7904 /* Re-launch FW fault watchdog */ 7905 if (instance->adapter_type != MFI_SERIES) 7906 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7907 goto fail_start_watchdog; 7908 7909 return 0; 7910 7911 fail_start_watchdog: 7912 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7913 del_timer_sync(&instance->sriov_heartbeat_timer); 7914 fail_init_mfi: 7915 megasas_free_ctrl_dma_buffers(instance); 7916 megasas_free_ctrl_mem(instance); 7917 scsi_host_put(host); 7918 7919 fail_reenable_msix: 7920 fail_set_dma_mask: 7921 fail_ready_state: 7922 7923 return -ENODEV; 7924 } 7925 7926 static inline int 7927 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7928 { 7929 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7930 int i; 7931 u8 adp_state; 7932 7933 for (i = 0; i < wait_time; i++) { 7934 adp_state = atomic_read(&instance->adprecovery); 7935 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7936 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7937 break; 7938 7939 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7940 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7941 7942 msleep(1000); 7943 } 7944 7945 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7946 dev_info(&instance->pdev->dev, 7947 "%s HBA failed to become operational, adp_state %d\n", 7948 __func__, adp_state); 7949 return 1; 7950 } 7951 7952 return 0; 7953 } 7954 7955 /** 7956 * megasas_detach_one - PCI hot"un"plug entry point 7957 * @pdev: PCI device structure 7958 */ 7959 static void megasas_detach_one(struct pci_dev *pdev) 7960 { 7961 int i; 7962 struct Scsi_Host *host; 7963 struct megasas_instance *instance; 7964 struct fusion_context *fusion; 7965 size_t pd_seq_map_sz; 7966 7967 instance = pci_get_drvdata(pdev); 7968 7969 if (!instance) 7970 return; 7971 7972 host = instance->host; 7973 fusion = instance->ctrl_context; 7974 7975 /* Shutdown SR-IOV heartbeat timer */ 7976 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7977 del_timer_sync(&instance->sriov_heartbeat_timer); 7978 7979 /* Stop the FW fault detection watchdog */ 7980 if (instance->adapter_type != MFI_SERIES) 7981 megasas_fusion_stop_watchdog(instance); 7982 7983 if (instance->fw_crash_state != UNAVAILABLE) 7984 megasas_free_host_crash_buffer(instance); 7985 scsi_remove_host(instance->host); 7986 instance->unload = 1; 7987 7988 if (megasas_wait_for_adapter_operational(instance)) 7989 goto skip_firing_dcmds; 7990 7991 megasas_flush_cache(instance); 7992 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7993 7994 skip_firing_dcmds: 7995 /* cancel the delayed work if this work still in queue*/ 7996 if (instance->ev != NULL) { 7997 struct megasas_aen_event *ev = instance->ev; 7998 cancel_delayed_work_sync(&ev->hotplug_work); 7999 instance->ev = NULL; 8000 } 8001 8002 /* cancel all wait events */ 8003 wake_up_all(&instance->int_cmd_wait_q); 8004 8005 tasklet_kill(&instance->isr_tasklet); 8006 8007 /* 8008 * Take the instance off the instance array. Note that we will not 8009 * decrement the max_index. We let this array be sparse array 8010 */ 8011 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8012 if (megasas_mgmt_info.instance[i] == instance) { 8013 megasas_mgmt_info.count--; 8014 megasas_mgmt_info.instance[i] = NULL; 8015 8016 break; 8017 } 8018 } 8019 8020 instance->instancet->disable_intr(instance); 8021 8022 megasas_destroy_irqs(instance); 8023 8024 if (instance->msix_vectors) 8025 pci_free_irq_vectors(instance->pdev); 8026 8027 if (instance->adapter_type >= VENTURA_SERIES) { 8028 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 8029 kfree(fusion->stream_detect_by_ld[i]); 8030 kfree(fusion->stream_detect_by_ld); 8031 fusion->stream_detect_by_ld = NULL; 8032 } 8033 8034 8035 if (instance->adapter_type != MFI_SERIES) { 8036 megasas_release_fusion(instance); 8037 pd_seq_map_sz = 8038 struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, 8039 seq, MAX_PHYSICAL_DEVICES); 8040 for (i = 0; i < 2 ; i++) { 8041 if (fusion->ld_map[i]) 8042 dma_free_coherent(&instance->pdev->dev, 8043 fusion->max_map_sz, 8044 fusion->ld_map[i], 8045 fusion->ld_map_phys[i]); 8046 if (fusion->ld_drv_map[i]) { 8047 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 8048 vfree(fusion->ld_drv_map[i]); 8049 else 8050 free_pages((ulong)fusion->ld_drv_map[i], 8051 fusion->drv_map_pages); 8052 } 8053 8054 if (fusion->pd_seq_sync[i]) 8055 dma_free_coherent(&instance->pdev->dev, 8056 pd_seq_map_sz, 8057 fusion->pd_seq_sync[i], 8058 fusion->pd_seq_phys[i]); 8059 } 8060 } else { 8061 megasas_release_mfi(instance); 8062 } 8063 8064 if (instance->vf_affiliation) 8065 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 8066 sizeof(struct MR_LD_VF_AFFILIATION), 8067 instance->vf_affiliation, 8068 instance->vf_affiliation_h); 8069 8070 if (instance->vf_affiliation_111) 8071 dma_free_coherent(&pdev->dev, 8072 sizeof(struct MR_LD_VF_AFFILIATION_111), 8073 instance->vf_affiliation_111, 8074 instance->vf_affiliation_111_h); 8075 8076 if (instance->hb_host_mem) 8077 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 8078 instance->hb_host_mem, 8079 instance->hb_host_mem_h); 8080 8081 megasas_free_ctrl_dma_buffers(instance); 8082 8083 megasas_free_ctrl_mem(instance); 8084 8085 megasas_destroy_debugfs(instance); 8086 8087 scsi_host_put(host); 8088 8089 pci_disable_device(pdev); 8090 } 8091 8092 /** 8093 * megasas_shutdown - Shutdown entry point 8094 * @pdev: PCI device structure 8095 */ 8096 static void megasas_shutdown(struct pci_dev *pdev) 8097 { 8098 struct megasas_instance *instance = pci_get_drvdata(pdev); 8099 8100 if (!instance) 8101 return; 8102 8103 instance->unload = 1; 8104 8105 if (megasas_wait_for_adapter_operational(instance)) 8106 goto skip_firing_dcmds; 8107 8108 megasas_flush_cache(instance); 8109 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 8110 8111 skip_firing_dcmds: 8112 instance->instancet->disable_intr(instance); 8113 megasas_destroy_irqs(instance); 8114 8115 if (instance->msix_vectors) 8116 pci_free_irq_vectors(instance->pdev); 8117 } 8118 8119 /* 8120 * megasas_mgmt_open - char node "open" entry point 8121 * @inode: char node inode 8122 * @filep: char node file 8123 */ 8124 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 8125 { 8126 /* 8127 * Allow only those users with admin rights 8128 */ 8129 if (!capable(CAP_SYS_ADMIN)) 8130 return -EACCES; 8131 8132 return 0; 8133 } 8134 8135 /* 8136 * megasas_mgmt_fasync - Async notifier registration from applications 8137 * @fd: char node file descriptor number 8138 * @filep: char node file 8139 * @mode: notifier on/off 8140 * 8141 * This function adds the calling process to a driver global queue. When an 8142 * event occurs, SIGIO will be sent to all processes in this queue. 8143 */ 8144 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 8145 { 8146 int rc; 8147 8148 mutex_lock(&megasas_async_queue_mutex); 8149 8150 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 8151 8152 mutex_unlock(&megasas_async_queue_mutex); 8153 8154 if (rc >= 0) { 8155 /* For sanity check when we get ioctl */ 8156 filep->private_data = filep; 8157 return 0; 8158 } 8159 8160 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 8161 8162 return rc; 8163 } 8164 8165 /* 8166 * megasas_mgmt_poll - char node "poll" entry point 8167 * @filep: char node file 8168 * @wait: Events to poll for 8169 */ 8170 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 8171 { 8172 __poll_t mask; 8173 unsigned long flags; 8174 8175 poll_wait(file, &megasas_poll_wait, wait); 8176 spin_lock_irqsave(&poll_aen_lock, flags); 8177 if (megasas_poll_wait_aen) 8178 mask = (EPOLLIN | EPOLLRDNORM); 8179 else 8180 mask = 0; 8181 megasas_poll_wait_aen = 0; 8182 spin_unlock_irqrestore(&poll_aen_lock, flags); 8183 return mask; 8184 } 8185 8186 /* 8187 * megasas_set_crash_dump_params_ioctl: 8188 * Send CRASH_DUMP_MODE DCMD to all controllers 8189 * @cmd: MFI command frame 8190 */ 8191 8192 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 8193 { 8194 struct megasas_instance *local_instance; 8195 int i, error = 0; 8196 int crash_support; 8197 8198 crash_support = cmd->frame->dcmd.mbox.w[0]; 8199 8200 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8201 local_instance = megasas_mgmt_info.instance[i]; 8202 if (local_instance && local_instance->crash_dump_drv_support) { 8203 if ((atomic_read(&local_instance->adprecovery) == 8204 MEGASAS_HBA_OPERATIONAL) && 8205 !megasas_set_crash_dump_params(local_instance, 8206 crash_support)) { 8207 local_instance->crash_dump_app_support = 8208 crash_support; 8209 dev_info(&local_instance->pdev->dev, 8210 "Application firmware crash " 8211 "dump mode set success\n"); 8212 error = 0; 8213 } else { 8214 dev_info(&local_instance->pdev->dev, 8215 "Application firmware crash " 8216 "dump mode set failed\n"); 8217 error = -1; 8218 } 8219 } 8220 } 8221 return error; 8222 } 8223 8224 /** 8225 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 8226 * @instance: Adapter soft state 8227 * @user_ioc: User's ioctl packet 8228 * @ioc: ioctl packet 8229 */ 8230 static int 8231 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 8232 struct megasas_iocpacket __user * user_ioc, 8233 struct megasas_iocpacket *ioc) 8234 { 8235 struct megasas_sge64 *kern_sge64 = NULL; 8236 struct megasas_sge32 *kern_sge32 = NULL; 8237 struct megasas_cmd *cmd; 8238 void *kbuff_arr[MAX_IOCTL_SGE]; 8239 dma_addr_t buf_handle = 0; 8240 int error = 0, i; 8241 void *sense = NULL; 8242 dma_addr_t sense_handle; 8243 void *sense_ptr; 8244 u32 opcode = 0; 8245 int ret = DCMD_SUCCESS; 8246 8247 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 8248 8249 if (ioc->sge_count > MAX_IOCTL_SGE) { 8250 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 8251 ioc->sge_count, MAX_IOCTL_SGE); 8252 return -EINVAL; 8253 } 8254 8255 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 8256 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 8257 !instance->support_nvme_passthru) || 8258 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && 8259 !instance->support_pci_lane_margining)) { 8260 dev_err(&instance->pdev->dev, 8261 "Received invalid ioctl command 0x%x\n", 8262 ioc->frame.hdr.cmd); 8263 return -ENOTSUPP; 8264 } 8265 8266 cmd = megasas_get_cmd(instance); 8267 if (!cmd) { 8268 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 8269 return -ENOMEM; 8270 } 8271 8272 /* 8273 * User's IOCTL packet has 2 frames (maximum). Copy those two 8274 * frames into our cmd's frames. cmd->frame's context will get 8275 * overwritten when we copy from user's frames. So set that value 8276 * alone separately 8277 */ 8278 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 8279 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 8280 cmd->frame->hdr.pad_0 = 0; 8281 8282 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 8283 8284 if (instance->consistent_mask_64bit) 8285 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 8286 MFI_FRAME_SENSE64)); 8287 else 8288 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 8289 MFI_FRAME_SENSE64)); 8290 8291 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 8292 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 8293 8294 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 8295 mutex_lock(&instance->reset_mutex); 8296 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 8297 megasas_return_cmd(instance, cmd); 8298 mutex_unlock(&instance->reset_mutex); 8299 return -1; 8300 } 8301 mutex_unlock(&instance->reset_mutex); 8302 } 8303 8304 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 8305 error = megasas_set_crash_dump_params_ioctl(cmd); 8306 megasas_return_cmd(instance, cmd); 8307 return error; 8308 } 8309 8310 /* 8311 * The management interface between applications and the fw uses 8312 * MFI frames. E.g, RAID configuration changes, LD property changes 8313 * etc are accomplishes through different kinds of MFI frames. The 8314 * driver needs to care only about substituting user buffers with 8315 * kernel buffers in SGLs. The location of SGL is embedded in the 8316 * struct iocpacket itself. 8317 */ 8318 if (instance->consistent_mask_64bit) 8319 kern_sge64 = (struct megasas_sge64 *) 8320 ((unsigned long)cmd->frame + ioc->sgl_off); 8321 else 8322 kern_sge32 = (struct megasas_sge32 *) 8323 ((unsigned long)cmd->frame + ioc->sgl_off); 8324 8325 /* 8326 * For each user buffer, create a mirror buffer and copy in 8327 */ 8328 for (i = 0; i < ioc->sge_count; i++) { 8329 if (!ioc->sgl[i].iov_len) 8330 continue; 8331 8332 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 8333 ioc->sgl[i].iov_len, 8334 &buf_handle, GFP_KERNEL); 8335 if (!kbuff_arr[i]) { 8336 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 8337 "kernel SGL buffer for IOCTL\n"); 8338 error = -ENOMEM; 8339 goto out; 8340 } 8341 8342 /* 8343 * We don't change the dma_coherent_mask, so 8344 * dma_alloc_coherent only returns 32bit addresses 8345 */ 8346 if (instance->consistent_mask_64bit) { 8347 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 8348 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8349 } else { 8350 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 8351 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8352 } 8353 8354 /* 8355 * We created a kernel buffer corresponding to the 8356 * user buffer. Now copy in from the user buffer 8357 */ 8358 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 8359 (u32) (ioc->sgl[i].iov_len))) { 8360 error = -EFAULT; 8361 goto out; 8362 } 8363 } 8364 8365 if (ioc->sense_len) { 8366 /* make sure the pointer is part of the frame */ 8367 if (ioc->sense_off > 8368 (sizeof(union megasas_frame) - sizeof(__le64))) { 8369 error = -EINVAL; 8370 goto out; 8371 } 8372 8373 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 8374 &sense_handle, GFP_KERNEL); 8375 if (!sense) { 8376 error = -ENOMEM; 8377 goto out; 8378 } 8379 8380 /* always store 64 bits regardless of addressing */ 8381 sense_ptr = (void *)cmd->frame + ioc->sense_off; 8382 put_unaligned_le64(sense_handle, sense_ptr); 8383 } 8384 8385 /* 8386 * Set the sync_cmd flag so that the ISR knows not to complete this 8387 * cmd to the SCSI mid-layer 8388 */ 8389 cmd->sync_cmd = 1; 8390 8391 ret = megasas_issue_blocked_cmd(instance, cmd, 0); 8392 switch (ret) { 8393 case DCMD_INIT: 8394 case DCMD_BUSY: 8395 cmd->sync_cmd = 0; 8396 dev_err(&instance->pdev->dev, 8397 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 8398 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 8399 cmd->cmd_status_drv); 8400 error = -EBUSY; 8401 goto out; 8402 } 8403 8404 cmd->sync_cmd = 0; 8405 8406 if (instance->unload == 1) { 8407 dev_info(&instance->pdev->dev, "Driver unload is in progress " 8408 "don't submit data to application\n"); 8409 goto out; 8410 } 8411 /* 8412 * copy out the kernel buffers to user buffers 8413 */ 8414 for (i = 0; i < ioc->sge_count; i++) { 8415 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 8416 ioc->sgl[i].iov_len)) { 8417 error = -EFAULT; 8418 goto out; 8419 } 8420 } 8421 8422 /* 8423 * copy out the sense 8424 */ 8425 if (ioc->sense_len) { 8426 void __user *uptr; 8427 /* 8428 * sense_ptr points to the location that has the user 8429 * sense buffer address 8430 */ 8431 sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; 8432 if (in_compat_syscall()) 8433 uptr = compat_ptr(get_unaligned((compat_uptr_t *) 8434 sense_ptr)); 8435 else 8436 uptr = get_unaligned((void __user **)sense_ptr); 8437 8438 if (copy_to_user(uptr, sense, ioc->sense_len)) { 8439 dev_err(&instance->pdev->dev, "Failed to copy out to user " 8440 "sense data\n"); 8441 error = -EFAULT; 8442 goto out; 8443 } 8444 } 8445 8446 /* 8447 * copy the status codes returned by the fw 8448 */ 8449 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 8450 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 8451 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 8452 error = -EFAULT; 8453 } 8454 8455 out: 8456 if (sense) { 8457 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 8458 sense, sense_handle); 8459 } 8460 8461 for (i = 0; i < ioc->sge_count; i++) { 8462 if (kbuff_arr[i]) { 8463 if (instance->consistent_mask_64bit) 8464 dma_free_coherent(&instance->pdev->dev, 8465 le32_to_cpu(kern_sge64[i].length), 8466 kbuff_arr[i], 8467 le64_to_cpu(kern_sge64[i].phys_addr)); 8468 else 8469 dma_free_coherent(&instance->pdev->dev, 8470 le32_to_cpu(kern_sge32[i].length), 8471 kbuff_arr[i], 8472 le32_to_cpu(kern_sge32[i].phys_addr)); 8473 kbuff_arr[i] = NULL; 8474 } 8475 } 8476 8477 megasas_return_cmd(instance, cmd); 8478 return error; 8479 } 8480 8481 static struct megasas_iocpacket * 8482 megasas_compat_iocpacket_get_user(void __user *arg) 8483 { 8484 struct megasas_iocpacket *ioc; 8485 struct compat_megasas_iocpacket __user *cioc = arg; 8486 size_t size; 8487 int err = -EFAULT; 8488 int i; 8489 8490 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 8491 if (!ioc) 8492 return ERR_PTR(-ENOMEM); 8493 size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); 8494 if (copy_from_user(ioc, arg, size)) 8495 goto out; 8496 8497 for (i = 0; i < MAX_IOCTL_SGE; i++) { 8498 compat_uptr_t iov_base; 8499 8500 if (get_user(iov_base, &cioc->sgl[i].iov_base) || 8501 get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) 8502 goto out; 8503 8504 ioc->sgl[i].iov_base = compat_ptr(iov_base); 8505 } 8506 8507 return ioc; 8508 out: 8509 kfree(ioc); 8510 return ERR_PTR(err); 8511 } 8512 8513 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 8514 { 8515 struct megasas_iocpacket __user *user_ioc = 8516 (struct megasas_iocpacket __user *)arg; 8517 struct megasas_iocpacket *ioc; 8518 struct megasas_instance *instance; 8519 int error; 8520 8521 if (in_compat_syscall()) 8522 ioc = megasas_compat_iocpacket_get_user(user_ioc); 8523 else 8524 ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); 8525 8526 if (IS_ERR(ioc)) 8527 return PTR_ERR(ioc); 8528 8529 instance = megasas_lookup_instance(ioc->host_no); 8530 if (!instance) { 8531 error = -ENODEV; 8532 goto out_kfree_ioc; 8533 } 8534 8535 /* Block ioctls in VF mode */ 8536 if (instance->requestorId && !allow_vf_ioctls) { 8537 error = -ENODEV; 8538 goto out_kfree_ioc; 8539 } 8540 8541 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8542 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 8543 error = -ENODEV; 8544 goto out_kfree_ioc; 8545 } 8546 8547 if (instance->unload == 1) { 8548 error = -ENODEV; 8549 goto out_kfree_ioc; 8550 } 8551 8552 if (down_interruptible(&instance->ioctl_sem)) { 8553 error = -ERESTARTSYS; 8554 goto out_kfree_ioc; 8555 } 8556 8557 if (megasas_wait_for_adapter_operational(instance)) { 8558 error = -ENODEV; 8559 goto out_up; 8560 } 8561 8562 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 8563 out_up: 8564 up(&instance->ioctl_sem); 8565 8566 out_kfree_ioc: 8567 kfree(ioc); 8568 return error; 8569 } 8570 8571 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 8572 { 8573 struct megasas_instance *instance; 8574 struct megasas_aen aen; 8575 int error; 8576 8577 if (file->private_data != file) { 8578 printk(KERN_DEBUG "megasas: fasync_helper was not " 8579 "called first\n"); 8580 return -EINVAL; 8581 } 8582 8583 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 8584 return -EFAULT; 8585 8586 instance = megasas_lookup_instance(aen.host_no); 8587 8588 if (!instance) 8589 return -ENODEV; 8590 8591 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8592 return -ENODEV; 8593 } 8594 8595 if (instance->unload == 1) { 8596 return -ENODEV; 8597 } 8598 8599 if (megasas_wait_for_adapter_operational(instance)) 8600 return -ENODEV; 8601 8602 mutex_lock(&instance->reset_mutex); 8603 error = megasas_register_aen(instance, aen.seq_num, 8604 aen.class_locale_word); 8605 mutex_unlock(&instance->reset_mutex); 8606 return error; 8607 } 8608 8609 /** 8610 * megasas_mgmt_ioctl - char node ioctl entry point 8611 * @file: char device file pointer 8612 * @cmd: ioctl command 8613 * @arg: ioctl command arguments address 8614 */ 8615 static long 8616 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8617 { 8618 switch (cmd) { 8619 case MEGASAS_IOC_FIRMWARE: 8620 return megasas_mgmt_ioctl_fw(file, arg); 8621 8622 case MEGASAS_IOC_GET_AEN: 8623 return megasas_mgmt_ioctl_aen(file, arg); 8624 } 8625 8626 return -ENOTTY; 8627 } 8628 8629 #ifdef CONFIG_COMPAT 8630 static long 8631 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 8632 unsigned long arg) 8633 { 8634 switch (cmd) { 8635 case MEGASAS_IOC_FIRMWARE32: 8636 return megasas_mgmt_ioctl_fw(file, arg); 8637 case MEGASAS_IOC_GET_AEN: 8638 return megasas_mgmt_ioctl_aen(file, arg); 8639 } 8640 8641 return -ENOTTY; 8642 } 8643 #endif 8644 8645 /* 8646 * File operations structure for management interface 8647 */ 8648 static const struct file_operations megasas_mgmt_fops = { 8649 .owner = THIS_MODULE, 8650 .open = megasas_mgmt_open, 8651 .fasync = megasas_mgmt_fasync, 8652 .unlocked_ioctl = megasas_mgmt_ioctl, 8653 .poll = megasas_mgmt_poll, 8654 #ifdef CONFIG_COMPAT 8655 .compat_ioctl = megasas_mgmt_compat_ioctl, 8656 #endif 8657 .llseek = noop_llseek, 8658 }; 8659 8660 static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); 8661 8662 /* 8663 * PCI hotplug support registration structure 8664 */ 8665 static struct pci_driver megasas_pci_driver = { 8666 8667 .name = "megaraid_sas", 8668 .id_table = megasas_pci_table, 8669 .probe = megasas_probe_one, 8670 .remove = megasas_detach_one, 8671 .driver.pm = &megasas_pm_ops, 8672 .shutdown = megasas_shutdown, 8673 }; 8674 8675 /* 8676 * Sysfs driver attributes 8677 */ 8678 static ssize_t version_show(struct device_driver *dd, char *buf) 8679 { 8680 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 8681 MEGASAS_VERSION); 8682 } 8683 static DRIVER_ATTR_RO(version); 8684 8685 static ssize_t release_date_show(struct device_driver *dd, char *buf) 8686 { 8687 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 8688 MEGASAS_RELDATE); 8689 } 8690 static DRIVER_ATTR_RO(release_date); 8691 8692 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 8693 { 8694 return sprintf(buf, "%u\n", support_poll_for_event); 8695 } 8696 static DRIVER_ATTR_RO(support_poll_for_event); 8697 8698 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 8699 { 8700 return sprintf(buf, "%u\n", support_device_change); 8701 } 8702 static DRIVER_ATTR_RO(support_device_change); 8703 8704 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8705 { 8706 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8707 } 8708 8709 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8710 size_t count) 8711 { 8712 int retval = count; 8713 8714 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8715 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8716 retval = -EINVAL; 8717 } 8718 return retval; 8719 } 8720 static DRIVER_ATTR_RW(dbg_lvl); 8721 8722 static ssize_t 8723 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8724 { 8725 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8726 } 8727 8728 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8729 8730 static ssize_t 8731 support_pci_lane_margining_show(struct device_driver *dd, char *buf) 8732 { 8733 return sprintf(buf, "%u\n", support_pci_lane_margining); 8734 } 8735 8736 static DRIVER_ATTR_RO(support_pci_lane_margining); 8737 8738 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8739 { 8740 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8741 scsi_remove_device(sdev); 8742 scsi_device_put(sdev); 8743 } 8744 8745 /** 8746 * megasas_update_device_list - Update the PD and LD device list from FW 8747 * after an AEN event notification 8748 * @instance: Adapter soft state 8749 * @event_type: Indicates type of event (PD or LD event) 8750 * 8751 * @return: Success or failure 8752 * 8753 * Issue DCMDs to Firmware to update the internal device list in driver. 8754 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8755 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8756 */ 8757 static 8758 int megasas_update_device_list(struct megasas_instance *instance, 8759 int event_type) 8760 { 8761 int dcmd_ret; 8762 8763 if (instance->enable_fw_dev_list) { 8764 return megasas_host_device_list_query(instance, false); 8765 } else { 8766 if (event_type & SCAN_PD_CHANNEL) { 8767 dcmd_ret = megasas_get_pd_list(instance); 8768 if (dcmd_ret != DCMD_SUCCESS) 8769 return dcmd_ret; 8770 } 8771 8772 if (event_type & SCAN_VD_CHANNEL) { 8773 if (!instance->requestorId || 8774 megasas_get_ld_vf_affiliation(instance, 0)) { 8775 return megasas_ld_list_query(instance, 8776 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8777 } 8778 } 8779 } 8780 return DCMD_SUCCESS; 8781 } 8782 8783 /** 8784 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8785 * after an AEN event notification 8786 * @instance: Adapter soft state 8787 * @scan_type: Indicates type of devices (PD/LD) to add 8788 * @return void 8789 */ 8790 static 8791 void megasas_add_remove_devices(struct megasas_instance *instance, 8792 int scan_type) 8793 { 8794 int i, j; 8795 u16 pd_index = 0; 8796 u16 ld_index = 0; 8797 u16 channel = 0, id = 0; 8798 struct Scsi_Host *host; 8799 struct scsi_device *sdev1; 8800 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8801 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8802 8803 host = instance->host; 8804 8805 if (instance->enable_fw_dev_list) { 8806 targetid_list = instance->host_device_list_buf; 8807 for (i = 0; i < targetid_list->count; i++) { 8808 targetid_entry = &targetid_list->host_device_list[i]; 8809 if (targetid_entry->flags.u.bits.is_sys_pd) { 8810 channel = le16_to_cpu(targetid_entry->target_id) / 8811 MEGASAS_MAX_DEV_PER_CHANNEL; 8812 id = le16_to_cpu(targetid_entry->target_id) % 8813 MEGASAS_MAX_DEV_PER_CHANNEL; 8814 } else { 8815 channel = MEGASAS_MAX_PD_CHANNELS + 8816 (le16_to_cpu(targetid_entry->target_id) / 8817 MEGASAS_MAX_DEV_PER_CHANNEL); 8818 id = le16_to_cpu(targetid_entry->target_id) % 8819 MEGASAS_MAX_DEV_PER_CHANNEL; 8820 } 8821 sdev1 = scsi_device_lookup(host, channel, id, 0); 8822 if (!sdev1) { 8823 scsi_add_device(host, channel, id, 0); 8824 } else { 8825 scsi_device_put(sdev1); 8826 } 8827 } 8828 } 8829 8830 if (scan_type & SCAN_PD_CHANNEL) { 8831 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8832 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8833 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8834 sdev1 = scsi_device_lookup(host, i, j, 0); 8835 if (instance->pd_list[pd_index].driveState == 8836 MR_PD_STATE_SYSTEM) { 8837 if (!sdev1) 8838 scsi_add_device(host, i, j, 0); 8839 else 8840 scsi_device_put(sdev1); 8841 } else { 8842 if (sdev1) 8843 megasas_remove_scsi_device(sdev1); 8844 } 8845 } 8846 } 8847 } 8848 8849 if (scan_type & SCAN_VD_CHANNEL) { 8850 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8851 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8852 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8853 sdev1 = scsi_device_lookup(host, 8854 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8855 if (instance->ld_ids[ld_index] != 0xff) { 8856 if (!sdev1) 8857 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8858 else 8859 scsi_device_put(sdev1); 8860 } else { 8861 if (sdev1) 8862 megasas_remove_scsi_device(sdev1); 8863 } 8864 } 8865 } 8866 } 8867 8868 } 8869 8870 static void 8871 megasas_aen_polling(struct work_struct *work) 8872 { 8873 struct megasas_aen_event *ev = 8874 container_of(work, struct megasas_aen_event, hotplug_work.work); 8875 struct megasas_instance *instance = ev->instance; 8876 union megasas_evt_class_locale class_locale; 8877 int event_type = 0; 8878 u32 seq_num; 8879 u16 ld_target_id; 8880 int error; 8881 u8 dcmd_ret = DCMD_SUCCESS; 8882 struct scsi_device *sdev1; 8883 8884 if (!instance) { 8885 printk(KERN_ERR "invalid instance!\n"); 8886 kfree(ev); 8887 return; 8888 } 8889 8890 /* Don't run the event workqueue thread if OCR is running */ 8891 mutex_lock(&instance->reset_mutex); 8892 8893 instance->ev = NULL; 8894 if (instance->evt_detail) { 8895 megasas_decode_evt(instance); 8896 8897 switch (le32_to_cpu(instance->evt_detail->code)) { 8898 8899 case MR_EVT_PD_INSERTED: 8900 case MR_EVT_PD_REMOVED: 8901 event_type = SCAN_PD_CHANNEL; 8902 break; 8903 8904 case MR_EVT_LD_OFFLINE: 8905 case MR_EVT_LD_DELETED: 8906 ld_target_id = instance->evt_detail->args.ld.target_id; 8907 sdev1 = scsi_device_lookup(instance->host, 8908 MEGASAS_MAX_PD_CHANNELS + 8909 (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), 8910 (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 8911 0); 8912 if (sdev1) 8913 megasas_remove_scsi_device(sdev1); 8914 8915 event_type = SCAN_VD_CHANNEL; 8916 break; 8917 case MR_EVT_LD_CREATED: 8918 event_type = SCAN_VD_CHANNEL; 8919 break; 8920 8921 case MR_EVT_CFG_CLEARED: 8922 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8923 case MR_EVT_FOREIGN_CFG_IMPORTED: 8924 case MR_EVT_LD_STATE_CHANGE: 8925 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8926 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8927 instance->host->host_no); 8928 break; 8929 8930 case MR_EVT_CTRL_PROP_CHANGED: 8931 dcmd_ret = megasas_get_ctrl_info(instance); 8932 if (dcmd_ret == DCMD_SUCCESS && 8933 instance->snapdump_wait_time) { 8934 megasas_get_snapdump_properties(instance); 8935 dev_info(&instance->pdev->dev, 8936 "Snap dump wait time\t: %d\n", 8937 instance->snapdump_wait_time); 8938 } 8939 break; 8940 default: 8941 event_type = 0; 8942 break; 8943 } 8944 } else { 8945 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8946 mutex_unlock(&instance->reset_mutex); 8947 kfree(ev); 8948 return; 8949 } 8950 8951 if (event_type) 8952 dcmd_ret = megasas_update_device_list(instance, event_type); 8953 8954 mutex_unlock(&instance->reset_mutex); 8955 8956 if (event_type && dcmd_ret == DCMD_SUCCESS) 8957 megasas_add_remove_devices(instance, event_type); 8958 8959 if (dcmd_ret == DCMD_SUCCESS) 8960 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8961 else 8962 seq_num = instance->last_seq_num; 8963 8964 /* Register AEN with FW for latest sequence number plus 1 */ 8965 class_locale.members.reserved = 0; 8966 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8967 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8968 8969 if (instance->aen_cmd != NULL) { 8970 kfree(ev); 8971 return; 8972 } 8973 8974 mutex_lock(&instance->reset_mutex); 8975 error = megasas_register_aen(instance, seq_num, 8976 class_locale.word); 8977 if (error) 8978 dev_err(&instance->pdev->dev, 8979 "register aen failed error %x\n", error); 8980 8981 mutex_unlock(&instance->reset_mutex); 8982 kfree(ev); 8983 } 8984 8985 /** 8986 * megasas_init - Driver load entry point 8987 */ 8988 static int __init megasas_init(void) 8989 { 8990 int rval; 8991 8992 /* 8993 * Booted in kdump kernel, minimize memory footprints by 8994 * disabling few features 8995 */ 8996 if (reset_devices) { 8997 msix_vectors = 1; 8998 rdpq_enable = 0; 8999 dual_qdepth_disable = 1; 9000 poll_queues = 0; 9001 } 9002 9003 /* 9004 * Announce driver version and other information 9005 */ 9006 pr_info("megasas: %s\n", MEGASAS_VERSION); 9007 9008 megasas_dbg_lvl = 0; 9009 support_poll_for_event = 2; 9010 support_device_change = 1; 9011 support_nvme_encapsulation = true; 9012 support_pci_lane_margining = true; 9013 9014 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 9015 9016 /* 9017 * Register character device node 9018 */ 9019 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 9020 9021 if (rval < 0) { 9022 printk(KERN_DEBUG "megasas: failed to open device node\n"); 9023 return rval; 9024 } 9025 9026 megasas_mgmt_majorno = rval; 9027 9028 megasas_init_debugfs(); 9029 9030 /* 9031 * Register ourselves as PCI hotplug module 9032 */ 9033 rval = pci_register_driver(&megasas_pci_driver); 9034 9035 if (rval) { 9036 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 9037 goto err_pcidrv; 9038 } 9039 9040 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 9041 (event_log_level > MFI_EVT_CLASS_DEAD)) { 9042 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 9043 event_log_level = MFI_EVT_CLASS_CRITICAL; 9044 } 9045 9046 rval = driver_create_file(&megasas_pci_driver.driver, 9047 &driver_attr_version); 9048 if (rval) 9049 goto err_dcf_attr_ver; 9050 9051 rval = driver_create_file(&megasas_pci_driver.driver, 9052 &driver_attr_release_date); 9053 if (rval) 9054 goto err_dcf_rel_date; 9055 9056 rval = driver_create_file(&megasas_pci_driver.driver, 9057 &driver_attr_support_poll_for_event); 9058 if (rval) 9059 goto err_dcf_support_poll_for_event; 9060 9061 rval = driver_create_file(&megasas_pci_driver.driver, 9062 &driver_attr_dbg_lvl); 9063 if (rval) 9064 goto err_dcf_dbg_lvl; 9065 rval = driver_create_file(&megasas_pci_driver.driver, 9066 &driver_attr_support_device_change); 9067 if (rval) 9068 goto err_dcf_support_device_change; 9069 9070 rval = driver_create_file(&megasas_pci_driver.driver, 9071 &driver_attr_support_nvme_encapsulation); 9072 if (rval) 9073 goto err_dcf_support_nvme_encapsulation; 9074 9075 rval = driver_create_file(&megasas_pci_driver.driver, 9076 &driver_attr_support_pci_lane_margining); 9077 if (rval) 9078 goto err_dcf_support_pci_lane_margining; 9079 9080 return rval; 9081 9082 err_dcf_support_pci_lane_margining: 9083 driver_remove_file(&megasas_pci_driver.driver, 9084 &driver_attr_support_nvme_encapsulation); 9085 9086 err_dcf_support_nvme_encapsulation: 9087 driver_remove_file(&megasas_pci_driver.driver, 9088 &driver_attr_support_device_change); 9089 9090 err_dcf_support_device_change: 9091 driver_remove_file(&megasas_pci_driver.driver, 9092 &driver_attr_dbg_lvl); 9093 err_dcf_dbg_lvl: 9094 driver_remove_file(&megasas_pci_driver.driver, 9095 &driver_attr_support_poll_for_event); 9096 err_dcf_support_poll_for_event: 9097 driver_remove_file(&megasas_pci_driver.driver, 9098 &driver_attr_release_date); 9099 err_dcf_rel_date: 9100 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9101 err_dcf_attr_ver: 9102 pci_unregister_driver(&megasas_pci_driver); 9103 err_pcidrv: 9104 megasas_exit_debugfs(); 9105 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9106 return rval; 9107 } 9108 9109 /** 9110 * megasas_exit - Driver unload entry point 9111 */ 9112 static void __exit megasas_exit(void) 9113 { 9114 driver_remove_file(&megasas_pci_driver.driver, 9115 &driver_attr_dbg_lvl); 9116 driver_remove_file(&megasas_pci_driver.driver, 9117 &driver_attr_support_poll_for_event); 9118 driver_remove_file(&megasas_pci_driver.driver, 9119 &driver_attr_support_device_change); 9120 driver_remove_file(&megasas_pci_driver.driver, 9121 &driver_attr_release_date); 9122 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9123 driver_remove_file(&megasas_pci_driver.driver, 9124 &driver_attr_support_nvme_encapsulation); 9125 driver_remove_file(&megasas_pci_driver.driver, 9126 &driver_attr_support_pci_lane_margining); 9127 9128 pci_unregister_driver(&megasas_pci_driver); 9129 megasas_exit_debugfs(); 9130 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9131 } 9132 9133 module_init(megasas_init); 9134 module_exit(megasas_exit); 9135