1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux MegaRAID driver for SAS based RAID controllers 4 * 5 * Copyright (c) 2003-2013 LSI Corporation 6 * Copyright (c) 2013-2016 Avago Technologies 7 * Copyright (c) 2016-2018 Broadcom Inc. 8 * 9 * Authors: Broadcom Inc. 10 * Sreenivas Bagalkote 11 * Sumant Patro 12 * Bo Yang 13 * Adam Radford 14 * Kashyap Desai <kashyap.desai@broadcom.com> 15 * Sumit Saxena <sumit.saxena@broadcom.com> 16 * 17 * Send feedback to: megaraidlinux.pdl@broadcom.com 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/module.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/uio.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <linux/unaligned.h> 33 #include <linux/fs.h> 34 #include <linux/compat.h> 35 #include <linux/blkdev.h> 36 #include <linux/mutex.h> 37 #include <linux/poll.h> 38 #include <linux/vmalloc.h> 39 #include <linux/irq_poll.h> 40 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <scsi/scsi_dbg.h> 47 #include "megaraid_sas_fusion.h" 48 #include "megaraid_sas.h" 49 50 /* 51 * Number of sectors per IO command 52 * Will be set in megasas_init_mfi if user does not provide 53 */ 54 static unsigned int max_sectors; 55 module_param_named(max_sectors, max_sectors, int, 0444); 56 MODULE_PARM_DESC(max_sectors, 57 "Maximum number of sectors per IO command"); 58 59 static int msix_disable; 60 module_param(msix_disable, int, 0444); 61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 62 63 static unsigned int msix_vectors; 64 module_param(msix_vectors, int, 0444); 65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); 66 67 static int allow_vf_ioctls; 68 module_param(allow_vf_ioctls, int, 0444); 69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); 70 71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; 72 module_param(throttlequeuedepth, int, 0444); 73 MODULE_PARM_DESC(throttlequeuedepth, 74 "Adapter queue depth when throttled due to I/O timeout. Default: 16"); 75 76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 77 module_param(resetwaittime, int, 0444); 78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s"); 79 80 static int smp_affinity_enable = 1; 81 module_param(smp_affinity_enable, int, 0444); 82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)"); 83 84 static int rdpq_enable = 1; 85 module_param(rdpq_enable, int, 0444); 86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)"); 87 88 unsigned int dual_qdepth_disable; 89 module_param(dual_qdepth_disable, int, 0444); 90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0"); 91 92 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 93 module_param(scmd_timeout, int, 0444); 94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer."); 95 96 int perf_mode = -1; 97 module_param(perf_mode, int, 0444); 98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t" 99 "0 - balanced: High iops and low latency queues are allocated &\n\t\t" 100 "interrupt coalescing is enabled only on high iops queues\n\t\t" 101 "1 - iops: High iops queues are not allocated &\n\t\t" 102 "interrupt coalescing is enabled on all queues\n\t\t" 103 "2 - latency: High iops queues are not allocated &\n\t\t" 104 "interrupt coalescing is disabled on all queues\n\t\t" 105 "default mode is 'balanced'" 106 ); 107 108 int event_log_level = MFI_EVT_CLASS_CRITICAL; 109 module_param(event_log_level, int, 0644); 110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 111 112 unsigned int enable_sdev_max_qd; 113 module_param(enable_sdev_max_qd, int, 0444); 114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); 115 116 int poll_queues; 117 module_param(poll_queues, int, 0444); 118 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" 119 "This parameter is effective only if host_tagset_enable=1 &\n\t\t" 120 "It is not applicable for MFI_SERIES. &\n\t\t" 121 "Driver will work in latency mode. &\n\t\t" 122 "High iops queues are not allocated &\n\t\t" 123 ); 124 125 int host_tagset_enable = 1; 126 module_param(host_tagset_enable, int, 0444); 127 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); 128 129 MODULE_LICENSE("GPL"); 130 MODULE_VERSION(MEGASAS_VERSION); 131 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 132 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver"); 133 134 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 135 static int megasas_get_pd_list(struct megasas_instance *instance); 136 static int megasas_ld_list_query(struct megasas_instance *instance, 137 u8 query_type); 138 static int megasas_issue_init_mfi(struct megasas_instance *instance); 139 static int megasas_register_aen(struct megasas_instance *instance, 140 u32 seq_num, u32 class_locale_word); 141 static void megasas_get_pd_info(struct megasas_instance *instance, 142 struct scsi_device *sdev); 143 static void 144 megasas_set_ld_removed_by_fw(struct megasas_instance *instance); 145 146 /* 147 * PCI ID table for all supported controllers 148 */ 149 static struct pci_device_id megasas_pci_table[] = { 150 151 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, 152 /* xscale IOP */ 153 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, 154 /* ppc IOP */ 155 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, 156 /* ppc IOP */ 157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, 158 /* gen2*/ 159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, 160 /* gen2*/ 161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, 162 /* skinny*/ 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, 164 /* skinny*/ 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, 166 /* xscale IOP, vega */ 167 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, 168 /* xscale IOP */ 169 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 170 /* Fusion */ 171 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, 172 /* Plasma */ 173 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, 174 /* Invader */ 175 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, 176 /* Fury */ 177 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)}, 178 /* Intruder */ 179 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)}, 180 /* Intruder 24 port*/ 181 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 182 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 183 /* VENTURA */ 184 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 185 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)}, 186 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 187 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 188 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 189 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 190 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)}, 191 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)}, 192 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)}, 193 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)}, 194 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)}, 195 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)}, 196 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)}, 197 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)}, 198 {} 199 }; 200 201 MODULE_DEVICE_TABLE(pci, megasas_pci_table); 202 203 static int megasas_mgmt_majorno; 204 struct megasas_mgmt_info megasas_mgmt_info; 205 static struct fasync_struct *megasas_async_queue; 206 static DEFINE_MUTEX(megasas_async_queue_mutex); 207 208 static int megasas_poll_wait_aen; 209 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); 210 static u32 support_poll_for_event; 211 u32 megasas_dbg_lvl; 212 static u32 support_device_change; 213 static bool support_nvme_encapsulation; 214 static bool support_pci_lane_margining; 215 216 /* define lock for aen poll */ 217 static DEFINE_SPINLOCK(poll_aen_lock); 218 219 extern struct dentry *megasas_debugfs_root; 220 extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); 221 222 void 223 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 224 u8 alt_status); 225 static u32 226 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance); 227 static int 228 megasas_adp_reset_gen2(struct megasas_instance *instance, 229 struct megasas_register_set __iomem *reg_set); 230 static irqreturn_t megasas_isr(int irq, void *devp); 231 static u32 232 megasas_init_adapter_mfi(struct megasas_instance *instance); 233 u32 234 megasas_build_and_issue_cmd(struct megasas_instance *instance, 235 struct scsi_cmnd *scmd); 236 static void megasas_complete_cmd_dpc(unsigned long instance_addr); 237 int 238 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 239 int seconds); 240 void megasas_fusion_ocr_wq(struct work_struct *work); 241 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 242 int initial); 243 static int 244 megasas_set_dma_mask(struct megasas_instance *instance); 245 static int 246 megasas_alloc_ctrl_mem(struct megasas_instance *instance); 247 static inline void 248 megasas_free_ctrl_mem(struct megasas_instance *instance); 249 static inline int 250 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance); 251 static inline void 252 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance); 253 static inline void 254 megasas_init_ctrl_params(struct megasas_instance *instance); 255 256 u32 megasas_readl(struct megasas_instance *instance, 257 const volatile void __iomem *addr) 258 { 259 u32 i = 0, ret_val; 260 /* 261 * Due to a HW errata in Aero controllers, reads to certain 262 * Fusion registers could intermittently return all zeroes. 263 * This behavior is transient in nature and subsequent reads will 264 * return valid value. As a workaround in driver, retry readl for 265 * up to thirty times until a non-zero value is read. 266 */ 267 if (instance->adapter_type == AERO_SERIES) { 268 do { 269 ret_val = readl(addr); 270 i++; 271 } while (ret_val == 0 && i < 30); 272 return ret_val; 273 } else { 274 return readl(addr); 275 } 276 } 277 278 /** 279 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 280 * @instance: Adapter soft state 281 * @dcmd: DCMD frame inside MFI command 282 * @dma_addr: DMA address of buffer to be passed to FW 283 * @dma_len: Length of DMA buffer to be passed to FW 284 * @return: void 285 */ 286 void megasas_set_dma_settings(struct megasas_instance *instance, 287 struct megasas_dcmd_frame *dcmd, 288 dma_addr_t dma_addr, u32 dma_len) 289 { 290 if (instance->consistent_mask_64bit) { 291 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr); 292 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len); 293 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64); 294 295 } else { 296 dcmd->sgl.sge32[0].phys_addr = 297 cpu_to_le32(lower_32_bits(dma_addr)); 298 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len); 299 dcmd->flags = cpu_to_le16(dcmd->flags); 300 } 301 } 302 303 static void 304 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 305 { 306 instance->instancet->fire_cmd(instance, 307 cmd->frame_phys_addr, 0, instance->reg_set); 308 return; 309 } 310 311 /** 312 * megasas_get_cmd - Get a command from the free pool 313 * @instance: Adapter soft state 314 * 315 * Returns a free command from the pool 316 */ 317 struct megasas_cmd *megasas_get_cmd(struct megasas_instance 318 *instance) 319 { 320 unsigned long flags; 321 struct megasas_cmd *cmd = NULL; 322 323 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 324 325 if (!list_empty(&instance->cmd_pool)) { 326 cmd = list_entry((&instance->cmd_pool)->next, 327 struct megasas_cmd, list); 328 list_del_init(&cmd->list); 329 } else { 330 dev_err(&instance->pdev->dev, "Command pool empty!\n"); 331 } 332 333 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 334 return cmd; 335 } 336 337 /** 338 * megasas_return_cmd - Return a cmd to free command pool 339 * @instance: Adapter soft state 340 * @cmd: Command packet to be returned to free command pool 341 */ 342 void 343 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 344 { 345 unsigned long flags; 346 u32 blk_tags; 347 struct megasas_cmd_fusion *cmd_fusion; 348 struct fusion_context *fusion = instance->ctrl_context; 349 350 /* This flag is used only for fusion adapter. 351 * Wait for Interrupt for Polled mode DCMD 352 */ 353 if (cmd->flags & DRV_DCMD_POLLED_MODE) 354 return; 355 356 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 357 358 if (fusion) { 359 blk_tags = instance->max_scsi_cmds + cmd->index; 360 cmd_fusion = fusion->cmd_list[blk_tags]; 361 megasas_return_cmd_fusion(instance, cmd_fusion); 362 } 363 cmd->scmd = NULL; 364 cmd->frame_count = 0; 365 cmd->flags = 0; 366 memset(cmd->frame, 0, instance->mfi_frame_size); 367 cmd->frame->io.context = cpu_to_le32(cmd->index); 368 if (!fusion && reset_devices) 369 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 370 list_add(&cmd->list, (&instance->cmd_pool)->next); 371 372 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 373 374 } 375 376 static const char * 377 format_timestamp(uint32_t timestamp) 378 { 379 static char buffer[32]; 380 381 if ((timestamp & 0xff000000) == 0xff000000) 382 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp & 383 0x00ffffff); 384 else 385 snprintf(buffer, sizeof(buffer), "%us", timestamp); 386 return buffer; 387 } 388 389 static const char * 390 format_class(int8_t class) 391 { 392 static char buffer[6]; 393 394 switch (class) { 395 case MFI_EVT_CLASS_DEBUG: 396 return "debug"; 397 case MFI_EVT_CLASS_PROGRESS: 398 return "progress"; 399 case MFI_EVT_CLASS_INFO: 400 return "info"; 401 case MFI_EVT_CLASS_WARNING: 402 return "WARN"; 403 case MFI_EVT_CLASS_CRITICAL: 404 return "CRIT"; 405 case MFI_EVT_CLASS_FATAL: 406 return "FATAL"; 407 case MFI_EVT_CLASS_DEAD: 408 return "DEAD"; 409 default: 410 snprintf(buffer, sizeof(buffer), "%d", class); 411 return buffer; 412 } 413 } 414 415 /** 416 * megasas_decode_evt: Decode FW AEN event and print critical event 417 * for information. 418 * @instance: Adapter soft state 419 */ 420 static void 421 megasas_decode_evt(struct megasas_instance *instance) 422 { 423 struct megasas_evt_detail *evt_detail = instance->evt_detail; 424 union megasas_evt_class_locale class_locale; 425 class_locale.word = le32_to_cpu(evt_detail->cl.word); 426 427 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 428 (event_log_level > MFI_EVT_CLASS_DEAD)) { 429 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 430 event_log_level = MFI_EVT_CLASS_CRITICAL; 431 } 432 433 if (class_locale.members.class >= event_log_level) 434 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n", 435 le32_to_cpu(evt_detail->seq_num), 436 format_timestamp(le32_to_cpu(evt_detail->time_stamp)), 437 (class_locale.members.locale), 438 format_class(class_locale.members.class), 439 evt_detail->description); 440 441 if (megasas_dbg_lvl & LD_PD_DEBUG) 442 dev_info(&instance->pdev->dev, 443 "evt_detail.args.ld.target_id/index %d/%d\n", 444 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index); 445 446 } 447 448 /* 449 * The following functions are defined for xscale 450 * (deviceid : 1064R, PERC5) controllers 451 */ 452 453 /** 454 * megasas_enable_intr_xscale - Enables interrupts 455 * @instance: Adapter soft state 456 */ 457 static inline void 458 megasas_enable_intr_xscale(struct megasas_instance *instance) 459 { 460 struct megasas_register_set __iomem *regs; 461 462 regs = instance->reg_set; 463 writel(0, &(regs)->outbound_intr_mask); 464 465 /* Dummy readl to force pci flush */ 466 readl(®s->outbound_intr_mask); 467 } 468 469 /** 470 * megasas_disable_intr_xscale -Disables interrupt 471 * @instance: Adapter soft state 472 */ 473 static inline void 474 megasas_disable_intr_xscale(struct megasas_instance *instance) 475 { 476 struct megasas_register_set __iomem *regs; 477 u32 mask = 0x1f; 478 479 regs = instance->reg_set; 480 writel(mask, ®s->outbound_intr_mask); 481 /* Dummy readl to force pci flush */ 482 readl(®s->outbound_intr_mask); 483 } 484 485 /** 486 * megasas_read_fw_status_reg_xscale - returns the current FW status value 487 * @instance: Adapter soft state 488 */ 489 static u32 490 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance) 491 { 492 return readl(&instance->reg_set->outbound_msg_0); 493 } 494 /** 495 * megasas_clear_intr_xscale - Check & clear interrupt 496 * @instance: Adapter soft state 497 */ 498 static int 499 megasas_clear_intr_xscale(struct megasas_instance *instance) 500 { 501 u32 status; 502 u32 mfiStatus = 0; 503 struct megasas_register_set __iomem *regs; 504 regs = instance->reg_set; 505 506 /* 507 * Check if it is our interrupt 508 */ 509 status = readl(®s->outbound_intr_status); 510 511 if (status & MFI_OB_INTR_STATUS_MASK) 512 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 513 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) 514 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 515 516 /* 517 * Clear the interrupt by writing back the same value 518 */ 519 if (mfiStatus) 520 writel(status, ®s->outbound_intr_status); 521 522 /* Dummy readl to force pci flush */ 523 readl(®s->outbound_intr_status); 524 525 return mfiStatus; 526 } 527 528 /** 529 * megasas_fire_cmd_xscale - Sends command to the FW 530 * @instance: Adapter soft state 531 * @frame_phys_addr : Physical address of cmd 532 * @frame_count : Number of frames for the command 533 * @regs : MFI register set 534 */ 535 static inline void 536 megasas_fire_cmd_xscale(struct megasas_instance *instance, 537 dma_addr_t frame_phys_addr, 538 u32 frame_count, 539 struct megasas_register_set __iomem *regs) 540 { 541 unsigned long flags; 542 543 spin_lock_irqsave(&instance->hba_lock, flags); 544 writel((frame_phys_addr >> 3)|(frame_count), 545 &(regs)->inbound_queue_port); 546 spin_unlock_irqrestore(&instance->hba_lock, flags); 547 } 548 549 /** 550 * megasas_adp_reset_xscale - For controller reset 551 * @instance: Adapter soft state 552 * @regs: MFI register set 553 */ 554 static int 555 megasas_adp_reset_xscale(struct megasas_instance *instance, 556 struct megasas_register_set __iomem *regs) 557 { 558 u32 i; 559 u32 pcidata; 560 561 writel(MFI_ADP_RESET, ®s->inbound_doorbell); 562 563 for (i = 0; i < 3; i++) 564 msleep(1000); /* sleep for 3 secs */ 565 pcidata = 0; 566 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 567 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata); 568 if (pcidata & 0x2) { 569 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata); 570 pcidata &= ~0x2; 571 pci_write_config_dword(instance->pdev, 572 MFI_1068_PCSR_OFFSET, pcidata); 573 574 for (i = 0; i < 2; i++) 575 msleep(1000); /* need to wait 2 secs again */ 576 577 pcidata = 0; 578 pci_read_config_dword(instance->pdev, 579 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 580 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata); 581 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 582 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata); 583 pcidata = 0; 584 pci_write_config_dword(instance->pdev, 585 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 586 } 587 } 588 return 0; 589 } 590 591 /** 592 * megasas_check_reset_xscale - For controller reset check 593 * @instance: Adapter soft state 594 * @regs: MFI register set 595 */ 596 static int 597 megasas_check_reset_xscale(struct megasas_instance *instance, 598 struct megasas_register_set __iomem *regs) 599 { 600 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) && 601 (le32_to_cpu(*instance->consumer) == 602 MEGASAS_ADPRESET_INPROG_SIGN)) 603 return 1; 604 return 0; 605 } 606 607 static struct megasas_instance_template megasas_instance_template_xscale = { 608 609 .fire_cmd = megasas_fire_cmd_xscale, 610 .enable_intr = megasas_enable_intr_xscale, 611 .disable_intr = megasas_disable_intr_xscale, 612 .clear_intr = megasas_clear_intr_xscale, 613 .read_fw_status_reg = megasas_read_fw_status_reg_xscale, 614 .adp_reset = megasas_adp_reset_xscale, 615 .check_reset = megasas_check_reset_xscale, 616 .service_isr = megasas_isr, 617 .tasklet = megasas_complete_cmd_dpc, 618 .init_adapter = megasas_init_adapter_mfi, 619 .build_and_issue_cmd = megasas_build_and_issue_cmd, 620 .issue_dcmd = megasas_issue_dcmd, 621 }; 622 623 /* 624 * This is the end of set of functions & definitions specific 625 * to xscale (deviceid : 1064R, PERC5) controllers 626 */ 627 628 /* 629 * The following functions are defined for ppc (deviceid : 0x60) 630 * controllers 631 */ 632 633 /** 634 * megasas_enable_intr_ppc - Enables interrupts 635 * @instance: Adapter soft state 636 */ 637 static inline void 638 megasas_enable_intr_ppc(struct megasas_instance *instance) 639 { 640 struct megasas_register_set __iomem *regs; 641 642 regs = instance->reg_set; 643 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 644 645 writel(~0x80000000, &(regs)->outbound_intr_mask); 646 647 /* Dummy readl to force pci flush */ 648 readl(®s->outbound_intr_mask); 649 } 650 651 /** 652 * megasas_disable_intr_ppc - Disable interrupt 653 * @instance: Adapter soft state 654 */ 655 static inline void 656 megasas_disable_intr_ppc(struct megasas_instance *instance) 657 { 658 struct megasas_register_set __iomem *regs; 659 u32 mask = 0xFFFFFFFF; 660 661 regs = instance->reg_set; 662 writel(mask, ®s->outbound_intr_mask); 663 /* Dummy readl to force pci flush */ 664 readl(®s->outbound_intr_mask); 665 } 666 667 /** 668 * megasas_read_fw_status_reg_ppc - returns the current FW status value 669 * @instance: Adapter soft state 670 */ 671 static u32 672 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance) 673 { 674 return readl(&instance->reg_set->outbound_scratch_pad_0); 675 } 676 677 /** 678 * megasas_clear_intr_ppc - Check & clear interrupt 679 * @instance: Adapter soft state 680 */ 681 static int 682 megasas_clear_intr_ppc(struct megasas_instance *instance) 683 { 684 u32 status, mfiStatus = 0; 685 struct megasas_register_set __iomem *regs; 686 regs = instance->reg_set; 687 688 /* 689 * Check if it is our interrupt 690 */ 691 status = readl(®s->outbound_intr_status); 692 693 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) 694 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 695 696 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) 697 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 698 699 /* 700 * Clear the interrupt by writing back the same value 701 */ 702 writel(status, ®s->outbound_doorbell_clear); 703 704 /* Dummy readl to force pci flush */ 705 readl(®s->outbound_doorbell_clear); 706 707 return mfiStatus; 708 } 709 710 /** 711 * megasas_fire_cmd_ppc - Sends command to the FW 712 * @instance: Adapter soft state 713 * @frame_phys_addr: Physical address of cmd 714 * @frame_count: Number of frames for the command 715 * @regs: MFI register set 716 */ 717 static inline void 718 megasas_fire_cmd_ppc(struct megasas_instance *instance, 719 dma_addr_t frame_phys_addr, 720 u32 frame_count, 721 struct megasas_register_set __iomem *regs) 722 { 723 unsigned long flags; 724 725 spin_lock_irqsave(&instance->hba_lock, flags); 726 writel((frame_phys_addr | (frame_count<<1))|1, 727 &(regs)->inbound_queue_port); 728 spin_unlock_irqrestore(&instance->hba_lock, flags); 729 } 730 731 /** 732 * megasas_check_reset_ppc - For controller reset check 733 * @instance: Adapter soft state 734 * @regs: MFI register set 735 */ 736 static int 737 megasas_check_reset_ppc(struct megasas_instance *instance, 738 struct megasas_register_set __iomem *regs) 739 { 740 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 741 return 1; 742 743 return 0; 744 } 745 746 static struct megasas_instance_template megasas_instance_template_ppc = { 747 748 .fire_cmd = megasas_fire_cmd_ppc, 749 .enable_intr = megasas_enable_intr_ppc, 750 .disable_intr = megasas_disable_intr_ppc, 751 .clear_intr = megasas_clear_intr_ppc, 752 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 753 .adp_reset = megasas_adp_reset_xscale, 754 .check_reset = megasas_check_reset_ppc, 755 .service_isr = megasas_isr, 756 .tasklet = megasas_complete_cmd_dpc, 757 .init_adapter = megasas_init_adapter_mfi, 758 .build_and_issue_cmd = megasas_build_and_issue_cmd, 759 .issue_dcmd = megasas_issue_dcmd, 760 }; 761 762 /** 763 * megasas_enable_intr_skinny - Enables interrupts 764 * @instance: Adapter soft state 765 */ 766 static inline void 767 megasas_enable_intr_skinny(struct megasas_instance *instance) 768 { 769 struct megasas_register_set __iomem *regs; 770 771 regs = instance->reg_set; 772 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 773 774 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 775 776 /* Dummy readl to force pci flush */ 777 readl(®s->outbound_intr_mask); 778 } 779 780 /** 781 * megasas_disable_intr_skinny - Disables interrupt 782 * @instance: Adapter soft state 783 */ 784 static inline void 785 megasas_disable_intr_skinny(struct megasas_instance *instance) 786 { 787 struct megasas_register_set __iomem *regs; 788 u32 mask = 0xFFFFFFFF; 789 790 regs = instance->reg_set; 791 writel(mask, ®s->outbound_intr_mask); 792 /* Dummy readl to force pci flush */ 793 readl(®s->outbound_intr_mask); 794 } 795 796 /** 797 * megasas_read_fw_status_reg_skinny - returns the current FW status value 798 * @instance: Adapter soft state 799 */ 800 static u32 801 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance) 802 { 803 return readl(&instance->reg_set->outbound_scratch_pad_0); 804 } 805 806 /** 807 * megasas_clear_intr_skinny - Check & clear interrupt 808 * @instance: Adapter soft state 809 */ 810 static int 811 megasas_clear_intr_skinny(struct megasas_instance *instance) 812 { 813 u32 status; 814 u32 mfiStatus = 0; 815 struct megasas_register_set __iomem *regs; 816 regs = instance->reg_set; 817 818 /* 819 * Check if it is our interrupt 820 */ 821 status = readl(®s->outbound_intr_status); 822 823 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { 824 return 0; 825 } 826 827 /* 828 * Check if it is our interrupt 829 */ 830 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) == 831 MFI_STATE_FAULT) { 832 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 833 } else 834 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 835 836 /* 837 * Clear the interrupt by writing back the same value 838 */ 839 writel(status, ®s->outbound_intr_status); 840 841 /* 842 * dummy read to flush PCI 843 */ 844 readl(®s->outbound_intr_status); 845 846 return mfiStatus; 847 } 848 849 /** 850 * megasas_fire_cmd_skinny - Sends command to the FW 851 * @instance: Adapter soft state 852 * @frame_phys_addr: Physical address of cmd 853 * @frame_count: Number of frames for the command 854 * @regs: MFI register set 855 */ 856 static inline void 857 megasas_fire_cmd_skinny(struct megasas_instance *instance, 858 dma_addr_t frame_phys_addr, 859 u32 frame_count, 860 struct megasas_register_set __iomem *regs) 861 { 862 unsigned long flags; 863 864 spin_lock_irqsave(&instance->hba_lock, flags); 865 writel(upper_32_bits(frame_phys_addr), 866 &(regs)->inbound_high_queue_port); 867 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 868 &(regs)->inbound_low_queue_port); 869 spin_unlock_irqrestore(&instance->hba_lock, flags); 870 } 871 872 /** 873 * megasas_check_reset_skinny - For controller reset check 874 * @instance: Adapter soft state 875 * @regs: MFI register set 876 */ 877 static int 878 megasas_check_reset_skinny(struct megasas_instance *instance, 879 struct megasas_register_set __iomem *regs) 880 { 881 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 882 return 1; 883 884 return 0; 885 } 886 887 static struct megasas_instance_template megasas_instance_template_skinny = { 888 889 .fire_cmd = megasas_fire_cmd_skinny, 890 .enable_intr = megasas_enable_intr_skinny, 891 .disable_intr = megasas_disable_intr_skinny, 892 .clear_intr = megasas_clear_intr_skinny, 893 .read_fw_status_reg = megasas_read_fw_status_reg_skinny, 894 .adp_reset = megasas_adp_reset_gen2, 895 .check_reset = megasas_check_reset_skinny, 896 .service_isr = megasas_isr, 897 .tasklet = megasas_complete_cmd_dpc, 898 .init_adapter = megasas_init_adapter_mfi, 899 .build_and_issue_cmd = megasas_build_and_issue_cmd, 900 .issue_dcmd = megasas_issue_dcmd, 901 }; 902 903 904 /* 905 * The following functions are defined for gen2 (deviceid : 0x78 0x79) 906 * controllers 907 */ 908 909 /** 910 * megasas_enable_intr_gen2 - Enables interrupts 911 * @instance: Adapter soft state 912 */ 913 static inline void 914 megasas_enable_intr_gen2(struct megasas_instance *instance) 915 { 916 struct megasas_register_set __iomem *regs; 917 918 regs = instance->reg_set; 919 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 920 921 /* write ~0x00000005 (4 & 1) to the intr mask*/ 922 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 923 924 /* Dummy readl to force pci flush */ 925 readl(®s->outbound_intr_mask); 926 } 927 928 /** 929 * megasas_disable_intr_gen2 - Disables interrupt 930 * @instance: Adapter soft state 931 */ 932 static inline void 933 megasas_disable_intr_gen2(struct megasas_instance *instance) 934 { 935 struct megasas_register_set __iomem *regs; 936 u32 mask = 0xFFFFFFFF; 937 938 regs = instance->reg_set; 939 writel(mask, ®s->outbound_intr_mask); 940 /* Dummy readl to force pci flush */ 941 readl(®s->outbound_intr_mask); 942 } 943 944 /** 945 * megasas_read_fw_status_reg_gen2 - returns the current FW status value 946 * @instance: Adapter soft state 947 */ 948 static u32 949 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance) 950 { 951 return readl(&instance->reg_set->outbound_scratch_pad_0); 952 } 953 954 /** 955 * megasas_clear_intr_gen2 - Check & clear interrupt 956 * @instance: Adapter soft state 957 */ 958 static int 959 megasas_clear_intr_gen2(struct megasas_instance *instance) 960 { 961 u32 status; 962 u32 mfiStatus = 0; 963 struct megasas_register_set __iomem *regs; 964 regs = instance->reg_set; 965 966 /* 967 * Check if it is our interrupt 968 */ 969 status = readl(®s->outbound_intr_status); 970 971 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { 972 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; 973 } 974 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { 975 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 976 } 977 978 /* 979 * Clear the interrupt by writing back the same value 980 */ 981 if (mfiStatus) 982 writel(status, ®s->outbound_doorbell_clear); 983 984 /* Dummy readl to force pci flush */ 985 readl(®s->outbound_intr_status); 986 987 return mfiStatus; 988 } 989 990 /** 991 * megasas_fire_cmd_gen2 - Sends command to the FW 992 * @instance: Adapter soft state 993 * @frame_phys_addr: Physical address of cmd 994 * @frame_count: Number of frames for the command 995 * @regs: MFI register set 996 */ 997 static inline void 998 megasas_fire_cmd_gen2(struct megasas_instance *instance, 999 dma_addr_t frame_phys_addr, 1000 u32 frame_count, 1001 struct megasas_register_set __iomem *regs) 1002 { 1003 unsigned long flags; 1004 1005 spin_lock_irqsave(&instance->hba_lock, flags); 1006 writel((frame_phys_addr | (frame_count<<1))|1, 1007 &(regs)->inbound_queue_port); 1008 spin_unlock_irqrestore(&instance->hba_lock, flags); 1009 } 1010 1011 /** 1012 * megasas_adp_reset_gen2 - For controller reset 1013 * @instance: Adapter soft state 1014 * @reg_set: MFI register set 1015 */ 1016 static int 1017 megasas_adp_reset_gen2(struct megasas_instance *instance, 1018 struct megasas_register_set __iomem *reg_set) 1019 { 1020 u32 retry = 0 ; 1021 u32 HostDiag; 1022 u32 __iomem *seq_offset = ®_set->seq_offset; 1023 u32 __iomem *hostdiag_offset = ®_set->host_diag; 1024 1025 if (instance->instancet == &megasas_instance_template_skinny) { 1026 seq_offset = ®_set->fusion_seq_offset; 1027 hostdiag_offset = ®_set->fusion_host_diag; 1028 } 1029 1030 writel(0, seq_offset); 1031 writel(4, seq_offset); 1032 writel(0xb, seq_offset); 1033 writel(2, seq_offset); 1034 writel(7, seq_offset); 1035 writel(0xd, seq_offset); 1036 1037 msleep(1000); 1038 1039 HostDiag = (u32)readl(hostdiag_offset); 1040 1041 while (!(HostDiag & DIAG_WRITE_ENABLE)) { 1042 msleep(100); 1043 HostDiag = (u32)readl(hostdiag_offset); 1044 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n", 1045 retry, HostDiag); 1046 1047 if (retry++ >= 100) 1048 return 1; 1049 1050 } 1051 1052 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 1053 1054 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 1055 1056 ssleep(10); 1057 1058 HostDiag = (u32)readl(hostdiag_offset); 1059 while (HostDiag & DIAG_RESET_ADAPTER) { 1060 msleep(100); 1061 HostDiag = (u32)readl(hostdiag_offset); 1062 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n", 1063 retry, HostDiag); 1064 1065 if (retry++ >= 1000) 1066 return 1; 1067 1068 } 1069 return 0; 1070 } 1071 1072 /** 1073 * megasas_check_reset_gen2 - For controller reset check 1074 * @instance: Adapter soft state 1075 * @regs: MFI register set 1076 */ 1077 static int 1078 megasas_check_reset_gen2(struct megasas_instance *instance, 1079 struct megasas_register_set __iomem *regs) 1080 { 1081 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1082 return 1; 1083 1084 return 0; 1085 } 1086 1087 static struct megasas_instance_template megasas_instance_template_gen2 = { 1088 1089 .fire_cmd = megasas_fire_cmd_gen2, 1090 .enable_intr = megasas_enable_intr_gen2, 1091 .disable_intr = megasas_disable_intr_gen2, 1092 .clear_intr = megasas_clear_intr_gen2, 1093 .read_fw_status_reg = megasas_read_fw_status_reg_gen2, 1094 .adp_reset = megasas_adp_reset_gen2, 1095 .check_reset = megasas_check_reset_gen2, 1096 .service_isr = megasas_isr, 1097 .tasklet = megasas_complete_cmd_dpc, 1098 .init_adapter = megasas_init_adapter_mfi, 1099 .build_and_issue_cmd = megasas_build_and_issue_cmd, 1100 .issue_dcmd = megasas_issue_dcmd, 1101 }; 1102 1103 /* 1104 * This is the end of set of functions & definitions 1105 * specific to gen2 (deviceid : 0x78, 0x79) controllers 1106 */ 1107 1108 /* 1109 * Template added for TB (Fusion) 1110 */ 1111 extern struct megasas_instance_template megasas_instance_template_fusion; 1112 1113 /** 1114 * megasas_issue_polled - Issues a polling command 1115 * @instance: Adapter soft state 1116 * @cmd: Command packet to be issued 1117 * 1118 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. 1119 */ 1120 int 1121 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 1122 { 1123 struct megasas_header *frame_hdr = &cmd->frame->hdr; 1124 1125 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1126 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1127 1128 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1129 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1130 __func__, __LINE__); 1131 return DCMD_INIT; 1132 } 1133 1134 instance->instancet->issue_dcmd(instance, cmd); 1135 1136 return wait_and_poll(instance, cmd, instance->requestorId ? 1137 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1138 } 1139 1140 /** 1141 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds 1142 * @instance: Adapter soft state 1143 * @cmd: Command to be issued 1144 * @timeout: Timeout in seconds 1145 * 1146 * This function waits on an event for the command to be returned from ISR. 1147 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1148 * Used to issue ioctl commands. 1149 */ 1150 int 1151 megasas_issue_blocked_cmd(struct megasas_instance *instance, 1152 struct megasas_cmd *cmd, int timeout) 1153 { 1154 int ret = 0; 1155 cmd->cmd_status_drv = DCMD_INIT; 1156 1157 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1158 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1159 __func__, __LINE__); 1160 return DCMD_INIT; 1161 } 1162 1163 instance->instancet->issue_dcmd(instance, cmd); 1164 1165 if (timeout) { 1166 ret = wait_event_timeout(instance->int_cmd_wait_q, 1167 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1168 if (!ret) { 1169 dev_err(&instance->pdev->dev, 1170 "DCMD(opcode: 0x%x) is timed out, func:%s\n", 1171 cmd->frame->dcmd.opcode, __func__); 1172 return DCMD_TIMEOUT; 1173 } 1174 } else 1175 wait_event(instance->int_cmd_wait_q, 1176 cmd->cmd_status_drv != DCMD_INIT); 1177 1178 return cmd->cmd_status_drv; 1179 } 1180 1181 /** 1182 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd 1183 * @instance: Adapter soft state 1184 * @cmd_to_abort: Previously issued cmd to be aborted 1185 * @timeout: Timeout in seconds 1186 * 1187 * MFI firmware can abort previously issued AEN comamnd (automatic event 1188 * notification). The megasas_issue_blocked_abort_cmd() issues such abort 1189 * cmd and waits for return status. 1190 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 1191 */ 1192 static int 1193 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, 1194 struct megasas_cmd *cmd_to_abort, int timeout) 1195 { 1196 struct megasas_cmd *cmd; 1197 struct megasas_abort_frame *abort_fr; 1198 int ret = 0; 1199 u32 opcode; 1200 1201 cmd = megasas_get_cmd(instance); 1202 1203 if (!cmd) 1204 return -1; 1205 1206 abort_fr = &cmd->frame->abort; 1207 1208 /* 1209 * Prepare and issue the abort frame 1210 */ 1211 abort_fr->cmd = MFI_CMD_ABORT; 1212 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; 1213 abort_fr->flags = cpu_to_le16(0); 1214 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 1215 abort_fr->abort_mfi_phys_addr_lo = 1216 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 1217 abort_fr->abort_mfi_phys_addr_hi = 1218 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 1219 1220 cmd->sync_cmd = 1; 1221 cmd->cmd_status_drv = DCMD_INIT; 1222 1223 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 1224 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1225 __func__, __LINE__); 1226 return DCMD_INIT; 1227 } 1228 1229 instance->instancet->issue_dcmd(instance, cmd); 1230 1231 if (timeout) { 1232 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1233 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); 1234 if (!ret) { 1235 opcode = cmd_to_abort->frame->dcmd.opcode; 1236 dev_err(&instance->pdev->dev, 1237 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n", 1238 opcode, __func__); 1239 return DCMD_TIMEOUT; 1240 } 1241 } else 1242 wait_event(instance->abort_cmd_wait_q, 1243 cmd->cmd_status_drv != DCMD_INIT); 1244 1245 cmd->sync_cmd = 0; 1246 1247 megasas_return_cmd(instance, cmd); 1248 return cmd->cmd_status_drv; 1249 } 1250 1251 /** 1252 * megasas_make_sgl32 - Prepares 32-bit SGL 1253 * @instance: Adapter soft state 1254 * @scp: SCSI command from the mid-layer 1255 * @mfi_sgl: SGL to be filled in 1256 * 1257 * If successful, this function returns the number of SG elements. Otherwise, 1258 * it returnes -1. 1259 */ 1260 static int 1261 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, 1262 union megasas_sgl *mfi_sgl) 1263 { 1264 int i; 1265 int sge_count; 1266 struct scatterlist *os_sgl; 1267 1268 sge_count = scsi_dma_map(scp); 1269 BUG_ON(sge_count < 0); 1270 1271 if (sge_count) { 1272 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1273 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1274 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 1275 } 1276 } 1277 return sge_count; 1278 } 1279 1280 /** 1281 * megasas_make_sgl64 - Prepares 64-bit SGL 1282 * @instance: Adapter soft state 1283 * @scp: SCSI command from the mid-layer 1284 * @mfi_sgl: SGL to be filled in 1285 * 1286 * If successful, this function returns the number of SG elements. Otherwise, 1287 * it returnes -1. 1288 */ 1289 static int 1290 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, 1291 union megasas_sgl *mfi_sgl) 1292 { 1293 int i; 1294 int sge_count; 1295 struct scatterlist *os_sgl; 1296 1297 sge_count = scsi_dma_map(scp); 1298 BUG_ON(sge_count < 0); 1299 1300 if (sge_count) { 1301 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1302 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1303 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1304 } 1305 } 1306 return sge_count; 1307 } 1308 1309 /** 1310 * megasas_make_sgl_skinny - Prepares IEEE SGL 1311 * @instance: Adapter soft state 1312 * @scp: SCSI command from the mid-layer 1313 * @mfi_sgl: SGL to be filled in 1314 * 1315 * If successful, this function returns the number of SG elements. Otherwise, 1316 * it returnes -1. 1317 */ 1318 static int 1319 megasas_make_sgl_skinny(struct megasas_instance *instance, 1320 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) 1321 { 1322 int i; 1323 int sge_count; 1324 struct scatterlist *os_sgl; 1325 1326 sge_count = scsi_dma_map(scp); 1327 1328 if (sge_count) { 1329 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1330 mfi_sgl->sge_skinny[i].length = 1331 cpu_to_le32(sg_dma_len(os_sgl)); 1332 mfi_sgl->sge_skinny[i].phys_addr = 1333 cpu_to_le64(sg_dma_address(os_sgl)); 1334 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1335 } 1336 } 1337 return sge_count; 1338 } 1339 1340 /** 1341 * megasas_get_frame_count - Computes the number of frames 1342 * @frame_type : type of frame- io or pthru frame 1343 * @sge_count : number of sg elements 1344 * 1345 * Returns the number of frames required for numnber of sge's (sge_count) 1346 */ 1347 1348 static u32 megasas_get_frame_count(struct megasas_instance *instance, 1349 u8 sge_count, u8 frame_type) 1350 { 1351 int num_cnt; 1352 int sge_bytes; 1353 u32 sge_sz; 1354 u32 frame_count = 0; 1355 1356 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1357 sizeof(struct megasas_sge32); 1358 1359 if (instance->flag_ieee) { 1360 sge_sz = sizeof(struct megasas_sge_skinny); 1361 } 1362 1363 /* 1364 * Main frame can contain 2 SGEs for 64-bit SGLs and 1365 * 3 SGEs for 32-bit SGLs for ldio & 1366 * 1 SGEs for 64-bit SGLs and 1367 * 2 SGEs for 32-bit SGLs for pthru frame 1368 */ 1369 if (unlikely(frame_type == PTHRU_FRAME)) { 1370 if (instance->flag_ieee == 1) { 1371 num_cnt = sge_count - 1; 1372 } else if (IS_DMA64) 1373 num_cnt = sge_count - 1; 1374 else 1375 num_cnt = sge_count - 2; 1376 } else { 1377 if (instance->flag_ieee == 1) { 1378 num_cnt = sge_count - 1; 1379 } else if (IS_DMA64) 1380 num_cnt = sge_count - 2; 1381 else 1382 num_cnt = sge_count - 3; 1383 } 1384 1385 if (num_cnt > 0) { 1386 sge_bytes = sge_sz * num_cnt; 1387 1388 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1389 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1390 } 1391 /* Main frame */ 1392 frame_count += 1; 1393 1394 if (frame_count > 7) 1395 frame_count = 8; 1396 return frame_count; 1397 } 1398 1399 /** 1400 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command 1401 * @instance: Adapter soft state 1402 * @scp: SCSI command 1403 * @cmd: Command to be prepared in 1404 * 1405 * This function prepares CDB commands. These are typcially pass-through 1406 * commands to the devices. 1407 */ 1408 static int 1409 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, 1410 struct megasas_cmd *cmd) 1411 { 1412 u32 is_logical; 1413 u32 device_id; 1414 u16 flags = 0; 1415 struct megasas_pthru_frame *pthru; 1416 1417 is_logical = MEGASAS_IS_LOGICAL(scp->device); 1418 device_id = MEGASAS_DEV_INDEX(scp); 1419 pthru = (struct megasas_pthru_frame *)cmd->frame; 1420 1421 if (scp->sc_data_direction == DMA_TO_DEVICE) 1422 flags = MFI_FRAME_DIR_WRITE; 1423 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1424 flags = MFI_FRAME_DIR_READ; 1425 else if (scp->sc_data_direction == DMA_NONE) 1426 flags = MFI_FRAME_DIR_NONE; 1427 1428 if (instance->flag_ieee == 1) { 1429 flags |= MFI_FRAME_IEEE; 1430 } 1431 1432 /* 1433 * Prepare the DCDB frame 1434 */ 1435 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; 1436 pthru->cmd_status = 0x0; 1437 pthru->scsi_status = 0x0; 1438 pthru->target_id = device_id; 1439 pthru->lun = scp->device->lun; 1440 pthru->cdb_len = scp->cmd_len; 1441 pthru->timeout = 0; 1442 pthru->pad_0 = 0; 1443 pthru->flags = cpu_to_le16(flags); 1444 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1445 1446 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1447 1448 /* 1449 * If the command is for the tape device, set the 1450 * pthru timeout to the os layer timeout value. 1451 */ 1452 if (scp->device->type == TYPE_TAPE) { 1453 if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) 1454 pthru->timeout = cpu_to_le16(0xFFFF); 1455 else 1456 pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); 1457 } 1458 1459 /* 1460 * Construct SGL 1461 */ 1462 if (instance->flag_ieee == 1) { 1463 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1464 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1465 &pthru->sgl); 1466 } else if (IS_DMA64) { 1467 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1468 pthru->sge_count = megasas_make_sgl64(instance, scp, 1469 &pthru->sgl); 1470 } else 1471 pthru->sge_count = megasas_make_sgl32(instance, scp, 1472 &pthru->sgl); 1473 1474 if (pthru->sge_count > instance->max_num_sge) { 1475 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n", 1476 pthru->sge_count); 1477 return 0; 1478 } 1479 1480 /* 1481 * Sense info specific 1482 */ 1483 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1484 pthru->sense_buf_phys_addr_hi = 1485 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1486 pthru->sense_buf_phys_addr_lo = 1487 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1488 1489 /* 1490 * Compute the total number of frames this command consumes. FW uses 1491 * this number to pull sufficient number of frames from host memory. 1492 */ 1493 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, 1494 PTHRU_FRAME); 1495 1496 return cmd->frame_count; 1497 } 1498 1499 /** 1500 * megasas_build_ldio - Prepares IOs to logical devices 1501 * @instance: Adapter soft state 1502 * @scp: SCSI command 1503 * @cmd: Command to be prepared 1504 * 1505 * Frames (and accompanying SGLs) for regular SCSI IOs use this function. 1506 */ 1507 static int 1508 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, 1509 struct megasas_cmd *cmd) 1510 { 1511 u32 device_id; 1512 u8 sc = scp->cmnd[0]; 1513 u16 flags = 0; 1514 struct megasas_io_frame *ldio; 1515 1516 device_id = MEGASAS_DEV_INDEX(scp); 1517 ldio = (struct megasas_io_frame *)cmd->frame; 1518 1519 if (scp->sc_data_direction == DMA_TO_DEVICE) 1520 flags = MFI_FRAME_DIR_WRITE; 1521 else if (scp->sc_data_direction == DMA_FROM_DEVICE) 1522 flags = MFI_FRAME_DIR_READ; 1523 1524 if (instance->flag_ieee == 1) { 1525 flags |= MFI_FRAME_IEEE; 1526 } 1527 1528 /* 1529 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds 1530 */ 1531 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; 1532 ldio->cmd_status = 0x0; 1533 ldio->scsi_status = 0x0; 1534 ldio->target_id = device_id; 1535 ldio->timeout = 0; 1536 ldio->reserved_0 = 0; 1537 ldio->pad_0 = 0; 1538 ldio->flags = cpu_to_le16(flags); 1539 ldio->start_lba_hi = 0; 1540 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1541 1542 /* 1543 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1544 */ 1545 if (scp->cmd_len == 6) { 1546 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1547 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1548 ((u32) scp->cmnd[2] << 8) | 1549 (u32) scp->cmnd[3]); 1550 1551 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1552 } 1553 1554 /* 1555 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1556 */ 1557 else if (scp->cmd_len == 10) { 1558 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1559 ((u32) scp->cmnd[7] << 8)); 1560 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1561 ((u32) scp->cmnd[3] << 16) | 1562 ((u32) scp->cmnd[4] << 8) | 1563 (u32) scp->cmnd[5]); 1564 } 1565 1566 /* 1567 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1568 */ 1569 else if (scp->cmd_len == 12) { 1570 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1571 ((u32) scp->cmnd[7] << 16) | 1572 ((u32) scp->cmnd[8] << 8) | 1573 (u32) scp->cmnd[9]); 1574 1575 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1576 ((u32) scp->cmnd[3] << 16) | 1577 ((u32) scp->cmnd[4] << 8) | 1578 (u32) scp->cmnd[5]); 1579 } 1580 1581 /* 1582 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1583 */ 1584 else if (scp->cmd_len == 16) { 1585 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1586 ((u32) scp->cmnd[11] << 16) | 1587 ((u32) scp->cmnd[12] << 8) | 1588 (u32) scp->cmnd[13]); 1589 1590 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1591 ((u32) scp->cmnd[7] << 16) | 1592 ((u32) scp->cmnd[8] << 8) | 1593 (u32) scp->cmnd[9]); 1594 1595 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1596 ((u32) scp->cmnd[3] << 16) | 1597 ((u32) scp->cmnd[4] << 8) | 1598 (u32) scp->cmnd[5]); 1599 1600 } 1601 1602 /* 1603 * Construct SGL 1604 */ 1605 if (instance->flag_ieee) { 1606 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1607 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1608 &ldio->sgl); 1609 } else if (IS_DMA64) { 1610 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1611 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1612 } else 1613 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1614 1615 if (ldio->sge_count > instance->max_num_sge) { 1616 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n", 1617 ldio->sge_count); 1618 return 0; 1619 } 1620 1621 /* 1622 * Sense info specific 1623 */ 1624 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1625 ldio->sense_buf_phys_addr_hi = 0; 1626 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1627 1628 /* 1629 * Compute the total number of frames this command consumes. FW uses 1630 * this number to pull sufficient number of frames from host memory. 1631 */ 1632 cmd->frame_count = megasas_get_frame_count(instance, 1633 ldio->sge_count, IO_FRAME); 1634 1635 return cmd->frame_count; 1636 } 1637 1638 /** 1639 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD 1640 * and whether it's RW or non RW 1641 * @cmd: SCSI command 1642 * 1643 */ 1644 inline int megasas_cmd_type(struct scsi_cmnd *cmd) 1645 { 1646 int ret; 1647 1648 switch (cmd->cmnd[0]) { 1649 case READ_10: 1650 case WRITE_10: 1651 case READ_12: 1652 case WRITE_12: 1653 case READ_6: 1654 case WRITE_6: 1655 case READ_16: 1656 case WRITE_16: 1657 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1658 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1659 break; 1660 default: 1661 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? 1662 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1663 } 1664 return ret; 1665 } 1666 1667 /** 1668 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1669 * in FW 1670 * @instance: Adapter soft state 1671 */ 1672 static inline void 1673 megasas_dump_pending_frames(struct megasas_instance *instance) 1674 { 1675 struct megasas_cmd *cmd; 1676 int i,n; 1677 union megasas_sgl *mfi_sgl; 1678 struct megasas_io_frame *ldio; 1679 struct megasas_pthru_frame *pthru; 1680 u32 sgcount; 1681 u16 max_cmd = instance->max_fw_cmds; 1682 1683 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1684 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1685 if (IS_DMA64) 1686 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1687 else 1688 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1689 1690 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1691 for (i = 0; i < max_cmd; i++) { 1692 cmd = instance->cmd_list[i]; 1693 if (!cmd->scmd) 1694 continue; 1695 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1696 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1697 ldio = (struct megasas_io_frame *)cmd->frame; 1698 mfi_sgl = &ldio->sgl; 1699 sgcount = ldio->sge_count; 1700 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1701 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1702 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1703 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1704 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1705 } else { 1706 pthru = (struct megasas_pthru_frame *) cmd->frame; 1707 mfi_sgl = &pthru->sgl; 1708 sgcount = pthru->sge_count; 1709 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1710 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1711 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1712 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1713 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1714 } 1715 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) { 1716 for (n = 0; n < sgcount; n++) { 1717 if (IS_DMA64) 1718 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n", 1719 le32_to_cpu(mfi_sgl->sge64[n].length), 1720 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1721 else 1722 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n", 1723 le32_to_cpu(mfi_sgl->sge32[n].length), 1724 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1725 } 1726 } 1727 } /*for max_cmd*/ 1728 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1729 for (i = 0; i < max_cmd; i++) { 1730 1731 cmd = instance->cmd_list[i]; 1732 1733 if (cmd->sync_cmd == 1) 1734 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1735 } 1736 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no); 1737 } 1738 1739 u32 1740 megasas_build_and_issue_cmd(struct megasas_instance *instance, 1741 struct scsi_cmnd *scmd) 1742 { 1743 struct megasas_cmd *cmd; 1744 u32 frame_count; 1745 1746 cmd = megasas_get_cmd(instance); 1747 if (!cmd) 1748 return SCSI_MLQUEUE_HOST_BUSY; 1749 1750 /* 1751 * Logical drive command 1752 */ 1753 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) 1754 frame_count = megasas_build_ldio(instance, scmd, cmd); 1755 else 1756 frame_count = megasas_build_dcdb(instance, scmd, cmd); 1757 1758 if (!frame_count) 1759 goto out_return_cmd; 1760 1761 cmd->scmd = scmd; 1762 megasas_priv(scmd)->cmd_priv = cmd; 1763 1764 /* 1765 * Issue the command to the FW 1766 */ 1767 atomic_inc(&instance->fw_outstanding); 1768 1769 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1770 cmd->frame_count-1, instance->reg_set); 1771 1772 return 0; 1773 out_return_cmd: 1774 megasas_return_cmd(instance, cmd); 1775 return SCSI_MLQUEUE_HOST_BUSY; 1776 } 1777 1778 1779 /** 1780 * megasas_queue_command - Queue entry point 1781 * @shost: adapter SCSI host 1782 * @scmd: SCSI command to be queued 1783 */ 1784 static int 1785 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 1786 { 1787 struct megasas_instance *instance; 1788 struct MR_PRIV_DEVICE *mr_device_priv_data; 1789 u32 ld_tgt_id; 1790 1791 instance = (struct megasas_instance *) 1792 scmd->device->host->hostdata; 1793 1794 if (instance->unload == 1) { 1795 scmd->result = DID_NO_CONNECT << 16; 1796 scsi_done(scmd); 1797 return 0; 1798 } 1799 1800 if (instance->issuepend_done == 0) 1801 return SCSI_MLQUEUE_HOST_BUSY; 1802 1803 1804 /* Check for an mpio path and adjust behavior */ 1805 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1806 if (megasas_check_mpio_paths(instance, scmd) == 1807 (DID_REQUEUE << 16)) { 1808 return SCSI_MLQUEUE_HOST_BUSY; 1809 } else { 1810 scmd->result = DID_NO_CONNECT << 16; 1811 scsi_done(scmd); 1812 return 0; 1813 } 1814 } 1815 1816 mr_device_priv_data = scmd->device->hostdata; 1817 if (!mr_device_priv_data || 1818 (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { 1819 scmd->result = DID_NO_CONNECT << 16; 1820 scsi_done(scmd); 1821 return 0; 1822 } 1823 1824 if (MEGASAS_IS_LOGICAL(scmd->device)) { 1825 ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); 1826 if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { 1827 scmd->result = DID_NO_CONNECT << 16; 1828 scsi_done(scmd); 1829 return 0; 1830 } 1831 } 1832 1833 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) 1834 return SCSI_MLQUEUE_HOST_BUSY; 1835 1836 if (mr_device_priv_data->tm_busy) 1837 return SCSI_MLQUEUE_DEVICE_BUSY; 1838 1839 1840 scmd->result = 0; 1841 1842 if (MEGASAS_IS_LOGICAL(scmd->device) && 1843 (scmd->device->id >= instance->fw_supported_vd_count || 1844 scmd->device->lun)) { 1845 scmd->result = DID_BAD_TARGET << 16; 1846 goto out_done; 1847 } 1848 1849 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && 1850 MEGASAS_IS_LOGICAL(scmd->device) && 1851 (!instance->fw_sync_cache_support)) { 1852 scmd->result = DID_OK << 16; 1853 goto out_done; 1854 } 1855 1856 return instance->instancet->build_and_issue_cmd(instance, scmd); 1857 1858 out_done: 1859 scsi_done(scmd); 1860 return 0; 1861 } 1862 1863 static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1864 { 1865 int i; 1866 1867 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 1868 1869 if ((megasas_mgmt_info.instance[i]) && 1870 (megasas_mgmt_info.instance[i]->host->host_no == host_no)) 1871 return megasas_mgmt_info.instance[i]; 1872 } 1873 1874 return NULL; 1875 } 1876 1877 /* 1878 * megasas_set_dynamic_target_properties - 1879 * Device property set by driver may not be static and it is required to be 1880 * updated after OCR 1881 * 1882 * set tm_capable. 1883 * set dma alignment (only for eedp protection enable vd). 1884 * 1885 * @sdev: OS provided scsi device 1886 * 1887 * Returns void 1888 */ 1889 void megasas_set_dynamic_target_properties(struct scsi_device *sdev, 1890 struct queue_limits *lim, bool is_target_prop) 1891 { 1892 u16 pd_index = 0, ld; 1893 u32 device_id; 1894 struct megasas_instance *instance; 1895 struct fusion_context *fusion; 1896 struct MR_PRIV_DEVICE *mr_device_priv_data; 1897 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1898 struct MR_LD_RAID *raid; 1899 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1900 1901 instance = megasas_lookup_instance(sdev->host->host_no); 1902 fusion = instance->ctrl_context; 1903 mr_device_priv_data = sdev->hostdata; 1904 1905 if (!fusion || !mr_device_priv_data) 1906 return; 1907 1908 if (MEGASAS_IS_LOGICAL(sdev)) { 1909 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1910 + sdev->id; 1911 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1912 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1913 if (ld >= instance->fw_supported_vd_count) 1914 return; 1915 raid = MR_LdRaidGet(ld, local_map_ptr); 1916 1917 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1918 if (lim) 1919 lim->dma_alignment = 0x7; 1920 } 1921 1922 mr_device_priv_data->is_tm_capable = 1923 raid->capability.tmCapable; 1924 1925 if (!raid->flags.isEPD) 1926 sdev->no_write_same = 1; 1927 1928 } else if (instance->use_seqnum_jbod_fp) { 1929 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1930 sdev->id; 1931 pd_sync = (void *)fusion->pd_seq_sync 1932 [(instance->pd_seq_map_id - 1) & 1]; 1933 mr_device_priv_data->is_tm_capable = 1934 pd_sync->seq[pd_index].capability.tmCapable; 1935 } 1936 1937 if (is_target_prop && instance->tgt_prop->reset_tmo) { 1938 /* 1939 * If FW provides a target reset timeout value, driver will use 1940 * it. If not set, fallback to default values. 1941 */ 1942 mr_device_priv_data->target_reset_tmo = 1943 min_t(u8, instance->max_reset_tmo, 1944 instance->tgt_prop->reset_tmo); 1945 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; 1946 } else { 1947 mr_device_priv_data->target_reset_tmo = 1948 MEGASAS_DEFAULT_TM_TIMEOUT; 1949 mr_device_priv_data->task_abort_tmo = 1950 MEGASAS_DEFAULT_TM_TIMEOUT; 1951 } 1952 } 1953 1954 /* 1955 * megasas_set_nvme_device_properties - 1956 * set nomerges=2 1957 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). 1958 * set maximum io transfer = MDTS of NVME device provided by MR firmware. 1959 * 1960 * MR firmware provides value in KB. Caller of this function converts 1961 * kb into bytes. 1962 * 1963 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, 1964 * MR firmware provides value 128 as (32 * 4K) = 128K. 1965 * 1966 * @sdev: scsi device 1967 * @max_io_size: maximum io transfer size 1968 * 1969 */ 1970 static inline void 1971 megasas_set_nvme_device_properties(struct scsi_device *sdev, 1972 struct queue_limits *lim, u32 max_io_size) 1973 { 1974 struct megasas_instance *instance; 1975 u32 mr_nvme_pg_size; 1976 1977 instance = (struct megasas_instance *)sdev->host->hostdata; 1978 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, 1979 MR_DEFAULT_NVME_PAGE_SIZE); 1980 1981 lim->max_hw_sectors = max_io_size / 512; 1982 lim->virt_boundary_mask = mr_nvme_pg_size - 1; 1983 } 1984 1985 /* 1986 * megasas_set_fw_assisted_qd - 1987 * set device queue depth to can_queue 1988 * set device queue depth to fw assisted qd 1989 * 1990 * @sdev: scsi device 1991 * @is_target_prop true, if fw provided target properties. 1992 */ 1993 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, 1994 bool is_target_prop) 1995 { 1996 u8 interface_type; 1997 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1998 u32 tgt_device_qd; 1999 struct megasas_instance *instance; 2000 struct MR_PRIV_DEVICE *mr_device_priv_data; 2001 2002 instance = megasas_lookup_instance(sdev->host->host_no); 2003 mr_device_priv_data = sdev->hostdata; 2004 interface_type = mr_device_priv_data->interface_type; 2005 2006 switch (interface_type) { 2007 case SAS_PD: 2008 device_qd = MEGASAS_SAS_QD; 2009 break; 2010 case SATA_PD: 2011 device_qd = MEGASAS_SATA_QD; 2012 break; 2013 case NVME_PD: 2014 device_qd = MEGASAS_NVME_QD; 2015 break; 2016 } 2017 2018 if (is_target_prop) { 2019 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); 2020 if (tgt_device_qd) 2021 device_qd = min(instance->host->can_queue, 2022 (int)tgt_device_qd); 2023 } 2024 2025 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) 2026 device_qd = instance->host->can_queue; 2027 2028 scsi_change_queue_depth(sdev, device_qd); 2029 } 2030 2031 /* 2032 * megasas_set_static_target_properties - 2033 * Device property set by driver are static and it is not required to be 2034 * updated after OCR. 2035 * 2036 * set io timeout 2037 * set device queue depth 2038 * set nvme device properties. see - megasas_set_nvme_device_properties 2039 * 2040 * @sdev: scsi device 2041 * @is_target_prop true, if fw provided target properties. 2042 */ 2043 static void megasas_set_static_target_properties(struct scsi_device *sdev, 2044 struct queue_limits *lim, bool is_target_prop) 2045 { 2046 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; 2047 struct megasas_instance *instance; 2048 2049 instance = megasas_lookup_instance(sdev->host->host_no); 2050 2051 /* 2052 * The RAID firmware may require extended timeouts. 2053 */ 2054 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); 2055 2056 /* max_io_size_kb will be set to non zero for 2057 * nvme based vd and syspd. 2058 */ 2059 if (is_target_prop) 2060 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 2061 2062 if (instance->nvme_page_size && max_io_size_kb) 2063 megasas_set_nvme_device_properties(sdev, lim, 2064 max_io_size_kb << 10); 2065 2066 megasas_set_fw_assisted_qd(sdev, is_target_prop); 2067 } 2068 2069 2070 static int megasas_device_configure(struct scsi_device *sdev, 2071 struct queue_limits *lim) 2072 { 2073 u16 pd_index = 0; 2074 struct megasas_instance *instance; 2075 int ret_target_prop = DCMD_FAILED; 2076 bool is_target_prop = false; 2077 2078 instance = megasas_lookup_instance(sdev->host->host_no); 2079 if (instance->pd_list_not_supported) { 2080 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { 2081 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2082 sdev->id; 2083 if (instance->pd_list[pd_index].driveState != 2084 MR_PD_STATE_SYSTEM) 2085 return -ENXIO; 2086 } 2087 } 2088 2089 mutex_lock(&instance->reset_mutex); 2090 /* Send DCMD to Firmware and cache the information */ 2091 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 2092 megasas_get_pd_info(instance, sdev); 2093 2094 /* Some ventura firmware may not have instance->nvme_page_size set. 2095 * Do not send MR_DCMD_DRV_GET_TARGET_PROP 2096 */ 2097 if ((instance->tgt_prop) && (instance->nvme_page_size)) 2098 ret_target_prop = megasas_get_target_prop(instance, sdev); 2099 2100 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 2101 megasas_set_static_target_properties(sdev, lim, is_target_prop); 2102 2103 /* This sdev property may change post OCR */ 2104 megasas_set_dynamic_target_properties(sdev, lim, is_target_prop); 2105 2106 mutex_unlock(&instance->reset_mutex); 2107 2108 return 0; 2109 } 2110 2111 static int megasas_slave_alloc(struct scsi_device *sdev) 2112 { 2113 u16 pd_index = 0, ld_tgt_id; 2114 struct megasas_instance *instance ; 2115 struct MR_PRIV_DEVICE *mr_device_priv_data; 2116 2117 instance = megasas_lookup_instance(sdev->host->host_no); 2118 if (!MEGASAS_IS_LOGICAL(sdev)) { 2119 /* 2120 * Open the OS scan to the SYSTEM PD 2121 */ 2122 pd_index = 2123 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 2124 sdev->id; 2125 if ((instance->pd_list_not_supported || 2126 instance->pd_list[pd_index].driveState == 2127 MR_PD_STATE_SYSTEM)) { 2128 goto scan_target; 2129 } 2130 return -ENXIO; 2131 } else if (!MEGASAS_IS_LUN_VALID(sdev)) { 2132 sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2133 return -ENXIO; 2134 } 2135 2136 scan_target: 2137 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data), 2138 GFP_KERNEL); 2139 if (!mr_device_priv_data) 2140 return -ENOMEM; 2141 2142 if (MEGASAS_IS_LOGICAL(sdev)) { 2143 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2144 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE; 2145 if (megasas_dbg_lvl & LD_PD_DEBUG) 2146 sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id); 2147 } 2148 2149 sdev->hostdata = mr_device_priv_data; 2150 2151 atomic_set(&mr_device_priv_data->r1_ldio_hint, 2152 instance->r1_ldio_hint_default); 2153 return 0; 2154 } 2155 2156 static void megasas_slave_destroy(struct scsi_device *sdev) 2157 { 2158 u16 ld_tgt_id; 2159 struct megasas_instance *instance; 2160 2161 instance = megasas_lookup_instance(sdev->host->host_no); 2162 2163 if (MEGASAS_IS_LOGICAL(sdev)) { 2164 if (!MEGASAS_IS_LUN_VALID(sdev)) { 2165 sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); 2166 return; 2167 } 2168 ld_tgt_id = MEGASAS_TARGET_ID(sdev); 2169 instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; 2170 if (megasas_dbg_lvl & LD_PD_DEBUG) 2171 sdev_printk(KERN_INFO, sdev, 2172 "LD target ID %d removed from OS stack\n", ld_tgt_id); 2173 } 2174 2175 kfree(sdev->hostdata); 2176 sdev->hostdata = NULL; 2177 } 2178 2179 /* 2180 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a 2181 * kill adapter 2182 * @instance: Adapter soft state 2183 * 2184 */ 2185 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) 2186 { 2187 int i; 2188 struct megasas_cmd *cmd_mfi; 2189 struct megasas_cmd_fusion *cmd_fusion; 2190 struct fusion_context *fusion = instance->ctrl_context; 2191 2192 /* Find all outstanding ioctls */ 2193 if (fusion) { 2194 for (i = 0; i < instance->max_fw_cmds; i++) { 2195 cmd_fusion = fusion->cmd_list[i]; 2196 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { 2197 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2198 if (cmd_mfi->sync_cmd && 2199 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) { 2200 cmd_mfi->frame->hdr.cmd_status = 2201 MFI_STAT_WRONG_STATE; 2202 megasas_complete_cmd(instance, 2203 cmd_mfi, DID_OK); 2204 } 2205 } 2206 } 2207 } else { 2208 for (i = 0; i < instance->max_fw_cmds; i++) { 2209 cmd_mfi = instance->cmd_list[i]; 2210 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != 2211 MFI_CMD_ABORT) 2212 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2213 } 2214 } 2215 } 2216 2217 2218 void megaraid_sas_kill_hba(struct megasas_instance *instance) 2219 { 2220 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2221 dev_warn(&instance->pdev->dev, 2222 "Adapter already dead, skipping kill HBA\n"); 2223 return; 2224 } 2225 2226 /* Set critical error to block I/O & ioctls in case caller didn't */ 2227 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2228 /* Wait 1 second to ensure IO or ioctls in build have posted */ 2229 msleep(1000); 2230 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2231 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2232 (instance->adapter_type != MFI_SERIES)) { 2233 if (!instance->requestorId) { 2234 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2235 /* Flush */ 2236 readl(&instance->reg_set->doorbell); 2237 } 2238 if (instance->requestorId && instance->peerIsPresent) 2239 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2240 } else { 2241 writel(MFI_STOP_ADP, 2242 &instance->reg_set->inbound_doorbell); 2243 } 2244 /* Complete outstanding ioctls when adapter is killed */ 2245 megasas_complete_outstanding_ioctls(instance); 2246 } 2247 2248 /** 2249 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be 2250 * restored to max value 2251 * @instance: Adapter soft state 2252 * 2253 */ 2254 void 2255 megasas_check_and_restore_queue_depth(struct megasas_instance *instance) 2256 { 2257 unsigned long flags; 2258 2259 if (instance->flag & MEGASAS_FW_BUSY 2260 && time_after(jiffies, instance->last_time + 5 * HZ) 2261 && atomic_read(&instance->fw_outstanding) < 2262 instance->throttlequeuedepth + 1) { 2263 2264 spin_lock_irqsave(instance->host->host_lock, flags); 2265 instance->flag &= ~MEGASAS_FW_BUSY; 2266 2267 instance->host->can_queue = instance->cur_can_queue; 2268 spin_unlock_irqrestore(instance->host->host_lock, flags); 2269 } 2270 } 2271 2272 /** 2273 * megasas_complete_cmd_dpc - Returns FW's controller structure 2274 * @instance_addr: Address of adapter soft state 2275 * 2276 * Tasklet to complete cmds 2277 */ 2278 static void megasas_complete_cmd_dpc(unsigned long instance_addr) 2279 { 2280 u32 producer; 2281 u32 consumer; 2282 u32 context; 2283 struct megasas_cmd *cmd; 2284 struct megasas_instance *instance = 2285 (struct megasas_instance *)instance_addr; 2286 unsigned long flags; 2287 2288 /* If we have already declared adapter dead, donot complete cmds */ 2289 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 2290 return; 2291 2292 spin_lock_irqsave(&instance->completion_lock, flags); 2293 2294 producer = le32_to_cpu(*instance->producer); 2295 consumer = le32_to_cpu(*instance->consumer); 2296 2297 while (consumer != producer) { 2298 context = le32_to_cpu(instance->reply_queue[consumer]); 2299 if (context >= instance->max_fw_cmds) { 2300 dev_err(&instance->pdev->dev, "Unexpected context value %x\n", 2301 context); 2302 BUG(); 2303 } 2304 2305 cmd = instance->cmd_list[context]; 2306 2307 megasas_complete_cmd(instance, cmd, DID_OK); 2308 2309 consumer++; 2310 if (consumer == (instance->max_fw_cmds + 1)) { 2311 consumer = 0; 2312 } 2313 } 2314 2315 *instance->consumer = cpu_to_le32(producer); 2316 2317 spin_unlock_irqrestore(&instance->completion_lock, flags); 2318 2319 /* 2320 * Check if we can restore can_queue 2321 */ 2322 megasas_check_and_restore_queue_depth(instance); 2323 } 2324 2325 static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2326 2327 /** 2328 * megasas_start_timer - Initializes sriov heartbeat timer object 2329 * @instance: Adapter soft state 2330 * 2331 */ 2332 void megasas_start_timer(struct megasas_instance *instance) 2333 { 2334 struct timer_list *timer = &instance->sriov_heartbeat_timer; 2335 2336 timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2337 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2338 add_timer(timer); 2339 } 2340 2341 static void 2342 megasas_internal_reset_defer_cmds(struct megasas_instance *instance); 2343 2344 static void 2345 process_fw_state_change_wq(struct work_struct *work); 2346 2347 static void megasas_do_ocr(struct megasas_instance *instance) 2348 { 2349 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 2350 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 2351 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2352 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2353 } 2354 instance->instancet->disable_intr(instance); 2355 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 2356 instance->issuepend_done = 0; 2357 2358 atomic_set(&instance->fw_outstanding, 0); 2359 megasas_internal_reset_defer_cmds(instance); 2360 process_fw_state_change_wq(&instance->work_init); 2361 } 2362 2363 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, 2364 int initial) 2365 { 2366 struct megasas_cmd *cmd; 2367 struct megasas_dcmd_frame *dcmd; 2368 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 2369 dma_addr_t new_affiliation_111_h; 2370 int ld, retval = 0; 2371 u8 thisVf; 2372 2373 cmd = megasas_get_cmd(instance); 2374 2375 if (!cmd) { 2376 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:" 2377 "Failed to get cmd for scsi%d\n", 2378 instance->host->host_no); 2379 return -ENOMEM; 2380 } 2381 2382 dcmd = &cmd->frame->dcmd; 2383 2384 if (!instance->vf_affiliation_111) { 2385 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2386 "affiliation for scsi%d\n", instance->host->host_no); 2387 megasas_return_cmd(instance, cmd); 2388 return -ENOMEM; 2389 } 2390 2391 if (initial) 2392 memset(instance->vf_affiliation_111, 0, 2393 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2394 else { 2395 new_affiliation_111 = 2396 dma_alloc_coherent(&instance->pdev->dev, 2397 sizeof(struct MR_LD_VF_AFFILIATION_111), 2398 &new_affiliation_111_h, GFP_KERNEL); 2399 if (!new_affiliation_111) { 2400 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2401 "memory for new affiliation for scsi%d\n", 2402 instance->host->host_no); 2403 megasas_return_cmd(instance, cmd); 2404 return -ENOMEM; 2405 } 2406 } 2407 2408 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2409 2410 dcmd->cmd = MFI_CMD_DCMD; 2411 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2412 dcmd->sge_count = 1; 2413 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2414 dcmd->timeout = 0; 2415 dcmd->pad_0 = 0; 2416 dcmd->data_xfer_len = 2417 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); 2418 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); 2419 2420 if (initial) 2421 dcmd->sgl.sge32[0].phys_addr = 2422 cpu_to_le32(instance->vf_affiliation_111_h); 2423 else 2424 dcmd->sgl.sge32[0].phys_addr = 2425 cpu_to_le32(new_affiliation_111_h); 2426 2427 dcmd->sgl.sge32[0].length = cpu_to_le32( 2428 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2429 2430 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2431 "scsi%d\n", instance->host->host_no); 2432 2433 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2434 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2435 " failed with status 0x%x for scsi%d\n", 2436 dcmd->cmd_status, instance->host->host_no); 2437 retval = 1; /* Do a scan if we couldn't get affiliation */ 2438 goto out; 2439 } 2440 2441 if (!initial) { 2442 thisVf = new_affiliation_111->thisVf; 2443 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 2444 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 2445 new_affiliation_111->map[ld].policy[thisVf]) { 2446 dev_warn(&instance->pdev->dev, "SR-IOV: " 2447 "Got new LD/VF affiliation for scsi%d\n", 2448 instance->host->host_no); 2449 memcpy(instance->vf_affiliation_111, 2450 new_affiliation_111, 2451 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2452 retval = 1; 2453 goto out; 2454 } 2455 } 2456 out: 2457 if (new_affiliation_111) { 2458 dma_free_coherent(&instance->pdev->dev, 2459 sizeof(struct MR_LD_VF_AFFILIATION_111), 2460 new_affiliation_111, 2461 new_affiliation_111_h); 2462 } 2463 2464 megasas_return_cmd(instance, cmd); 2465 2466 return retval; 2467 } 2468 2469 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, 2470 int initial) 2471 { 2472 struct megasas_cmd *cmd; 2473 struct megasas_dcmd_frame *dcmd; 2474 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; 2475 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; 2476 dma_addr_t new_affiliation_h; 2477 int i, j, retval = 0, found = 0, doscan = 0; 2478 u8 thisVf; 2479 2480 cmd = megasas_get_cmd(instance); 2481 2482 if (!cmd) { 2483 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: " 2484 "Failed to get cmd for scsi%d\n", 2485 instance->host->host_no); 2486 return -ENOMEM; 2487 } 2488 2489 dcmd = &cmd->frame->dcmd; 2490 2491 if (!instance->vf_affiliation) { 2492 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF " 2493 "affiliation for scsi%d\n", instance->host->host_no); 2494 megasas_return_cmd(instance, cmd); 2495 return -ENOMEM; 2496 } 2497 2498 if (initial) 2499 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * 2500 sizeof(struct MR_LD_VF_AFFILIATION)); 2501 else { 2502 new_affiliation = 2503 dma_alloc_coherent(&instance->pdev->dev, 2504 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), 2505 &new_affiliation_h, GFP_KERNEL); 2506 if (!new_affiliation) { 2507 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2508 "memory for new affiliation for scsi%d\n", 2509 instance->host->host_no); 2510 megasas_return_cmd(instance, cmd); 2511 return -ENOMEM; 2512 } 2513 } 2514 2515 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2516 2517 dcmd->cmd = MFI_CMD_DCMD; 2518 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2519 dcmd->sge_count = 1; 2520 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2521 dcmd->timeout = 0; 2522 dcmd->pad_0 = 0; 2523 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2524 sizeof(struct MR_LD_VF_AFFILIATION)); 2525 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); 2526 2527 if (initial) 2528 dcmd->sgl.sge32[0].phys_addr = 2529 cpu_to_le32(instance->vf_affiliation_h); 2530 else 2531 dcmd->sgl.sge32[0].phys_addr = 2532 cpu_to_le32(new_affiliation_h); 2533 2534 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2535 sizeof(struct MR_LD_VF_AFFILIATION)); 2536 2537 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for " 2538 "scsi%d\n", instance->host->host_no); 2539 2540 2541 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) { 2542 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD" 2543 " failed with status 0x%x for scsi%d\n", 2544 dcmd->cmd_status, instance->host->host_no); 2545 retval = 1; /* Do a scan if we couldn't get affiliation */ 2546 goto out; 2547 } 2548 2549 if (!initial) { 2550 if (!new_affiliation->ldCount) { 2551 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2552 "affiliation for passive path for scsi%d\n", 2553 instance->host->host_no); 2554 retval = 1; 2555 goto out; 2556 } 2557 newmap = new_affiliation->map; 2558 savedmap = instance->vf_affiliation->map; 2559 thisVf = new_affiliation->thisVf; 2560 for (i = 0 ; i < new_affiliation->ldCount; i++) { 2561 found = 0; 2562 for (j = 0; j < instance->vf_affiliation->ldCount; 2563 j++) { 2564 if (newmap->ref.targetId == 2565 savedmap->ref.targetId) { 2566 found = 1; 2567 if (newmap->policy[thisVf] != 2568 savedmap->policy[thisVf]) { 2569 doscan = 1; 2570 goto out; 2571 } 2572 } 2573 savedmap = (struct MR_LD_VF_MAP *) 2574 ((unsigned char *)savedmap + 2575 savedmap->size); 2576 } 2577 if (!found && newmap->policy[thisVf] != 2578 MR_LD_ACCESS_HIDDEN) { 2579 doscan = 1; 2580 goto out; 2581 } 2582 newmap = (struct MR_LD_VF_MAP *) 2583 ((unsigned char *)newmap + newmap->size); 2584 } 2585 2586 newmap = new_affiliation->map; 2587 savedmap = instance->vf_affiliation->map; 2588 2589 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) { 2590 found = 0; 2591 for (j = 0 ; j < new_affiliation->ldCount; j++) { 2592 if (savedmap->ref.targetId == 2593 newmap->ref.targetId) { 2594 found = 1; 2595 if (savedmap->policy[thisVf] != 2596 newmap->policy[thisVf]) { 2597 doscan = 1; 2598 goto out; 2599 } 2600 } 2601 newmap = (struct MR_LD_VF_MAP *) 2602 ((unsigned char *)newmap + 2603 newmap->size); 2604 } 2605 if (!found && savedmap->policy[thisVf] != 2606 MR_LD_ACCESS_HIDDEN) { 2607 doscan = 1; 2608 goto out; 2609 } 2610 savedmap = (struct MR_LD_VF_MAP *) 2611 ((unsigned char *)savedmap + 2612 savedmap->size); 2613 } 2614 } 2615 out: 2616 if (doscan) { 2617 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF " 2618 "affiliation for scsi%d\n", instance->host->host_no); 2619 memcpy(instance->vf_affiliation, new_affiliation, 2620 new_affiliation->size); 2621 retval = 1; 2622 } 2623 2624 if (new_affiliation) 2625 dma_free_coherent(&instance->pdev->dev, 2626 (MAX_LOGICAL_DRIVES + 1) * 2627 sizeof(struct MR_LD_VF_AFFILIATION), 2628 new_affiliation, new_affiliation_h); 2629 megasas_return_cmd(instance, cmd); 2630 2631 return retval; 2632 } 2633 2634 /* This function will get the current SR-IOV LD/VF affiliation */ 2635 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 2636 int initial) 2637 { 2638 int retval; 2639 2640 if (instance->PlasmaFW111) 2641 retval = megasas_get_ld_vf_affiliation_111(instance, initial); 2642 else 2643 retval = megasas_get_ld_vf_affiliation_12(instance, initial); 2644 return retval; 2645 } 2646 2647 /* This function will tell FW to start the SR-IOV heartbeat */ 2648 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2649 int initial) 2650 { 2651 struct megasas_cmd *cmd; 2652 struct megasas_dcmd_frame *dcmd; 2653 int retval = 0; 2654 2655 cmd = megasas_get_cmd(instance); 2656 2657 if (!cmd) { 2658 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: " 2659 "Failed to get cmd for scsi%d\n", 2660 instance->host->host_no); 2661 return -ENOMEM; 2662 } 2663 2664 dcmd = &cmd->frame->dcmd; 2665 2666 if (initial) { 2667 instance->hb_host_mem = 2668 dma_alloc_coherent(&instance->pdev->dev, 2669 sizeof(struct MR_CTRL_HB_HOST_MEM), 2670 &instance->hb_host_mem_h, 2671 GFP_KERNEL); 2672 if (!instance->hb_host_mem) { 2673 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2674 " memory for heartbeat host memory for scsi%d\n", 2675 instance->host->host_no); 2676 retval = -ENOMEM; 2677 goto out; 2678 } 2679 } 2680 2681 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2682 2683 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2684 dcmd->cmd = MFI_CMD_DCMD; 2685 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 2686 dcmd->sge_count = 1; 2687 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); 2688 dcmd->timeout = 0; 2689 dcmd->pad_0 = 0; 2690 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2691 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2692 2693 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h, 2694 sizeof(struct MR_CTRL_HB_HOST_MEM)); 2695 2696 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2697 instance->host->host_no); 2698 2699 if ((instance->adapter_type != MFI_SERIES) && 2700 !instance->mask_interrupts) 2701 retval = megasas_issue_blocked_cmd(instance, cmd, 2702 MEGASAS_ROUTINE_WAIT_TIME_VF); 2703 else 2704 retval = megasas_issue_polled(instance, cmd); 2705 2706 if (retval) { 2707 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" 2708 "_MEM_ALLOC DCMD %s for scsi%d\n", 2709 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? 2710 "timed out" : "failed", instance->host->host_no); 2711 retval = 1; 2712 } 2713 2714 out: 2715 megasas_return_cmd(instance, cmd); 2716 2717 return retval; 2718 } 2719 2720 /* Handler for SR-IOV heartbeat */ 2721 static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2722 { 2723 struct megasas_instance *instance = 2724 from_timer(instance, t, sriov_heartbeat_timer); 2725 2726 if (instance->hb_host_mem->HB.fwCounter != 2727 instance->hb_host_mem->HB.driverCounter) { 2728 instance->hb_host_mem->HB.driverCounter = 2729 instance->hb_host_mem->HB.fwCounter; 2730 mod_timer(&instance->sriov_heartbeat_timer, 2731 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2732 } else { 2733 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never " 2734 "completed for scsi%d\n", instance->host->host_no); 2735 schedule_work(&instance->work_init); 2736 } 2737 } 2738 2739 /** 2740 * megasas_wait_for_outstanding - Wait for all outstanding cmds 2741 * @instance: Adapter soft state 2742 * 2743 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to 2744 * complete all its outstanding commands. Returns error if one or more IOs 2745 * are pending after this time period. It also marks the controller dead. 2746 */ 2747 static int megasas_wait_for_outstanding(struct megasas_instance *instance) 2748 { 2749 int i, sl, outstanding; 2750 u32 reset_index; 2751 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 2752 unsigned long flags; 2753 struct list_head clist_local; 2754 struct megasas_cmd *reset_cmd; 2755 u32 fw_state; 2756 2757 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2758 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n", 2759 __func__, __LINE__); 2760 return FAILED; 2761 } 2762 2763 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2764 2765 INIT_LIST_HEAD(&clist_local); 2766 spin_lock_irqsave(&instance->hba_lock, flags); 2767 list_splice_init(&instance->internal_reset_pending_q, 2768 &clist_local); 2769 spin_unlock_irqrestore(&instance->hba_lock, flags); 2770 2771 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n"); 2772 for (i = 0; i < wait_time; i++) { 2773 msleep(1000); 2774 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) 2775 break; 2776 } 2777 2778 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 2779 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n"); 2780 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); 2781 return FAILED; 2782 } 2783 2784 reset_index = 0; 2785 while (!list_empty(&clist_local)) { 2786 reset_cmd = list_entry((&clist_local)->next, 2787 struct megasas_cmd, list); 2788 list_del_init(&reset_cmd->list); 2789 if (reset_cmd->scmd) { 2790 reset_cmd->scmd->result = DID_REQUEUE << 16; 2791 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2792 reset_index, reset_cmd, 2793 reset_cmd->scmd->cmnd[0]); 2794 2795 scsi_done(reset_cmd->scmd); 2796 megasas_return_cmd(instance, reset_cmd); 2797 } else if (reset_cmd->sync_cmd) { 2798 dev_notice(&instance->pdev->dev, "%p synch cmds" 2799 "reset queue\n", 2800 reset_cmd); 2801 2802 reset_cmd->cmd_status_drv = DCMD_INIT; 2803 instance->instancet->fire_cmd(instance, 2804 reset_cmd->frame_phys_addr, 2805 0, instance->reg_set); 2806 } else { 2807 dev_notice(&instance->pdev->dev, "%p unexpected" 2808 "cmds lst\n", 2809 reset_cmd); 2810 } 2811 reset_index++; 2812 } 2813 2814 return SUCCESS; 2815 } 2816 2817 for (i = 0; i < resetwaittime; i++) { 2818 outstanding = atomic_read(&instance->fw_outstanding); 2819 2820 if (!outstanding) 2821 break; 2822 2823 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2824 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d " 2825 "commands to complete\n",i,outstanding); 2826 /* 2827 * Call cmd completion routine. Cmd to be 2828 * be completed directly without depending on isr. 2829 */ 2830 megasas_complete_cmd_dpc((unsigned long)instance); 2831 } 2832 2833 msleep(1000); 2834 } 2835 2836 i = 0; 2837 outstanding = atomic_read(&instance->fw_outstanding); 2838 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2839 2840 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2841 goto no_outstanding; 2842 2843 if (instance->disableOnlineCtrlReset) 2844 goto kill_hba_and_failed; 2845 do { 2846 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) { 2847 dev_info(&instance->pdev->dev, 2848 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n", 2849 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding)); 2850 if (i == 3) 2851 goto kill_hba_and_failed; 2852 megasas_do_ocr(instance); 2853 2854 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2855 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n", 2856 __func__, __LINE__); 2857 return FAILED; 2858 } 2859 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n", 2860 __func__, __LINE__); 2861 2862 for (sl = 0; sl < 10; sl++) 2863 msleep(500); 2864 2865 outstanding = atomic_read(&instance->fw_outstanding); 2866 2867 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK; 2868 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2869 goto no_outstanding; 2870 } 2871 i++; 2872 } while (i <= 3); 2873 2874 no_outstanding: 2875 2876 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n", 2877 __func__, __LINE__); 2878 return SUCCESS; 2879 2880 kill_hba_and_failed: 2881 2882 /* Reset not supported, kill adapter */ 2883 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d" 2884 " disableOnlineCtrlReset %d fw_outstanding %d \n", 2885 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset, 2886 atomic_read(&instance->fw_outstanding)); 2887 megasas_dump_pending_frames(instance); 2888 megaraid_sas_kill_hba(instance); 2889 2890 return FAILED; 2891 } 2892 2893 /** 2894 * megasas_generic_reset - Generic reset routine 2895 * @scmd: Mid-layer SCSI command 2896 * 2897 * This routine implements a generic reset handler for device, bus and host 2898 * reset requests. Device, bus and host specific reset handlers can use this 2899 * function after they do their specific tasks. 2900 */ 2901 static int megasas_generic_reset(struct scsi_cmnd *scmd) 2902 { 2903 int ret_val; 2904 struct megasas_instance *instance; 2905 2906 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2907 2908 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", 2909 scmd->cmnd[0], scmd->retries); 2910 2911 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 2912 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n"); 2913 return FAILED; 2914 } 2915 2916 ret_val = megasas_wait_for_outstanding(instance); 2917 if (ret_val == SUCCESS) 2918 dev_notice(&instance->pdev->dev, "reset successful\n"); 2919 else 2920 dev_err(&instance->pdev->dev, "failed to do reset\n"); 2921 2922 return ret_val; 2923 } 2924 2925 /** 2926 * megasas_reset_timer - quiesce the adapter if required 2927 * @scmd: scsi cmnd 2928 * 2929 * Sets the FW busy flag and reduces the host->can_queue if the 2930 * cmd has not been completed within the timeout period. 2931 */ 2932 static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd) 2933 { 2934 struct megasas_instance *instance; 2935 unsigned long flags; 2936 2937 if (time_after(jiffies, scmd->jiffies_at_alloc + 2938 (scmd_timeout * 2) * HZ)) { 2939 return SCSI_EH_NOT_HANDLED; 2940 } 2941 2942 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2943 if (!(instance->flag & MEGASAS_FW_BUSY)) { 2944 /* FW is busy, throttle IO */ 2945 spin_lock_irqsave(instance->host->host_lock, flags); 2946 2947 instance->host->can_queue = instance->throttlequeuedepth; 2948 instance->last_time = jiffies; 2949 instance->flag |= MEGASAS_FW_BUSY; 2950 2951 spin_unlock_irqrestore(instance->host->host_lock, flags); 2952 } 2953 return SCSI_EH_RESET_TIMER; 2954 } 2955 2956 /** 2957 * megasas_dump - This function will print hexdump of provided buffer. 2958 * @buf: Buffer to be dumped 2959 * @sz: Size in bytes 2960 * @format: Different formats of dumping e.g. format=n will 2961 * cause only 'n' 32 bit words to be dumped in a single 2962 * line. 2963 */ 2964 inline void 2965 megasas_dump(void *buf, int sz, int format) 2966 { 2967 int i; 2968 __le32 *buf_loc = (__le32 *)buf; 2969 2970 for (i = 0; i < (sz / sizeof(__le32)); i++) { 2971 if ((i % format) == 0) { 2972 if (i != 0) 2973 printk(KERN_CONT "\n"); 2974 printk(KERN_CONT "%08x: ", (i * 4)); 2975 } 2976 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i])); 2977 } 2978 printk(KERN_CONT "\n"); 2979 } 2980 2981 /** 2982 * megasas_dump_reg_set - This function will print hexdump of register set 2983 * @reg_set: Register set to be dumped 2984 */ 2985 inline void 2986 megasas_dump_reg_set(void __iomem *reg_set) 2987 { 2988 unsigned int i, sz = 256; 2989 u32 __iomem *reg = (u32 __iomem *)reg_set; 2990 2991 for (i = 0; i < (sz / sizeof(u32)); i++) 2992 printk("%08x: %08x\n", (i * 4), readl(®[i])); 2993 } 2994 2995 /** 2996 * megasas_dump_fusion_io - This function will print key details 2997 * of SCSI IO 2998 * @scmd: SCSI command pointer of SCSI IO 2999 */ 3000 void 3001 megasas_dump_fusion_io(struct scsi_cmnd *scmd) 3002 { 3003 struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv; 3004 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3005 struct megasas_instance *instance; 3006 3007 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3008 3009 scmd_printk(KERN_INFO, scmd, 3010 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n", 3011 scmd, scmd->retries, scmd->allowed); 3012 scsi_print_command(scmd); 3013 3014 if (cmd) { 3015 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 3016 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n"); 3017 scmd_printk(KERN_INFO, scmd, 3018 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n", 3019 req_desc->SCSIIO.RequestFlags, 3020 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID, 3021 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle); 3022 3023 printk(KERN_INFO "IO request frame:\n"); 3024 megasas_dump(cmd->io_request, 3025 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8); 3026 printk(KERN_INFO "Chain frame:\n"); 3027 megasas_dump(cmd->sg_frame, 3028 instance->max_chain_frame_sz, 8); 3029 } 3030 3031 } 3032 3033 /* 3034 * megasas_dump_sys_regs - This function will dump system registers through 3035 * sysfs. 3036 * @reg_set: Pointer to System register set. 3037 * @buf: Buffer to which output is to be written. 3038 * @return: Number of bytes written to buffer. 3039 */ 3040 static inline ssize_t 3041 megasas_dump_sys_regs(void __iomem *reg_set, char *buf) 3042 { 3043 unsigned int i, sz = 256; 3044 int bytes_wrote = 0; 3045 char *loc = (char *)buf; 3046 u32 __iomem *reg = (u32 __iomem *)reg_set; 3047 3048 for (i = 0; i < sz / sizeof(u32); i++) { 3049 bytes_wrote += scnprintf(loc + bytes_wrote, 3050 PAGE_SIZE - bytes_wrote, 3051 "%08x: %08x\n", (i * 4), 3052 readl(®[i])); 3053 } 3054 return bytes_wrote; 3055 } 3056 3057 /** 3058 * megasas_reset_bus_host - Bus & host reset handler entry point 3059 * @scmd: Mid-layer SCSI command 3060 */ 3061 static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 3062 { 3063 int ret; 3064 struct megasas_instance *instance; 3065 3066 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3067 3068 scmd_printk(KERN_INFO, scmd, 3069 "OCR is requested due to IO timeout!!\n"); 3070 3071 scmd_printk(KERN_INFO, scmd, 3072 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n", 3073 scmd->device->host->shost_state, 3074 scsi_host_busy(scmd->device->host), 3075 atomic_read(&instance->fw_outstanding)); 3076 /* 3077 * First wait for all commands to complete 3078 */ 3079 if (instance->adapter_type == MFI_SERIES) { 3080 ret = megasas_generic_reset(scmd); 3081 } else { 3082 megasas_dump_fusion_io(scmd); 3083 ret = megasas_reset_fusion(scmd->device->host, 3084 SCSIIO_TIMEOUT_OCR); 3085 } 3086 3087 return ret; 3088 } 3089 3090 /** 3091 * megasas_task_abort - Issues task abort request to firmware 3092 * (supported only for fusion adapters) 3093 * @scmd: SCSI command pointer 3094 */ 3095 static int megasas_task_abort(struct scsi_cmnd *scmd) 3096 { 3097 int ret; 3098 struct megasas_instance *instance; 3099 3100 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3101 3102 if (instance->adapter_type != MFI_SERIES) 3103 ret = megasas_task_abort_fusion(scmd); 3104 else { 3105 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 3106 ret = FAILED; 3107 } 3108 3109 return ret; 3110 } 3111 3112 /** 3113 * megasas_reset_target: Issues target reset request to firmware 3114 * (supported only for fusion adapters) 3115 * @scmd: SCSI command pointer 3116 */ 3117 static int megasas_reset_target(struct scsi_cmnd *scmd) 3118 { 3119 int ret; 3120 struct megasas_instance *instance; 3121 3122 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3123 3124 if (instance->adapter_type != MFI_SERIES) 3125 ret = megasas_reset_target_fusion(scmd); 3126 else { 3127 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 3128 ret = FAILED; 3129 } 3130 3131 return ret; 3132 } 3133 3134 /** 3135 * megasas_bios_param - Returns disk geometry for a disk 3136 * @sdev: device handle 3137 * @bdev: block device 3138 * @capacity: drive capacity 3139 * @geom: geometry parameters 3140 */ 3141 static int 3142 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, 3143 sector_t capacity, int geom[]) 3144 { 3145 int heads; 3146 int sectors; 3147 sector_t cylinders; 3148 unsigned long tmp; 3149 3150 /* Default heads (64) & sectors (32) */ 3151 heads = 64; 3152 sectors = 32; 3153 3154 tmp = heads * sectors; 3155 cylinders = capacity; 3156 3157 sector_div(cylinders, tmp); 3158 3159 /* 3160 * Handle extended translation size for logical drives > 1Gb 3161 */ 3162 3163 if (capacity >= 0x200000) { 3164 heads = 255; 3165 sectors = 63; 3166 tmp = heads*sectors; 3167 cylinders = capacity; 3168 sector_div(cylinders, tmp); 3169 } 3170 3171 geom[0] = heads; 3172 geom[1] = sectors; 3173 geom[2] = cylinders; 3174 3175 return 0; 3176 } 3177 3178 static void megasas_map_queues(struct Scsi_Host *shost) 3179 { 3180 struct megasas_instance *instance; 3181 int qoff = 0, offset; 3182 struct blk_mq_queue_map *map; 3183 3184 instance = (struct megasas_instance *)shost->hostdata; 3185 3186 if (shost->nr_hw_queues == 1) 3187 return; 3188 3189 offset = instance->low_latency_index_start; 3190 3191 /* Setup Default hctx */ 3192 map = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 3193 map->nr_queues = instance->msix_vectors - offset; 3194 map->queue_offset = 0; 3195 blk_mq_map_hw_queues(map, &instance->pdev->dev, offset); 3196 qoff += map->nr_queues; 3197 offset += map->nr_queues; 3198 3199 /* we never use READ queue, so can't cheat blk-mq */ 3200 shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0; 3201 3202 /* Setup Poll hctx */ 3203 map = &shost->tag_set.map[HCTX_TYPE_POLL]; 3204 map->nr_queues = instance->iopoll_q_count; 3205 if (map->nr_queues) { 3206 /* 3207 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3208 * affinity), so use the regular blk-mq cpu mapping 3209 */ 3210 map->queue_offset = qoff; 3211 blk_mq_map_queues(map); 3212 } 3213 } 3214 3215 static void megasas_aen_polling(struct work_struct *work); 3216 3217 /** 3218 * megasas_service_aen - Processes an event notification 3219 * @instance: Adapter soft state 3220 * @cmd: AEN command completed by the ISR 3221 * 3222 * For AEN, driver sends a command down to FW that is held by the FW till an 3223 * event occurs. When an event of interest occurs, FW completes the command 3224 * that it was previously holding. 3225 * 3226 * This routines sends SIGIO signal to processes that have registered with the 3227 * driver for AEN. 3228 */ 3229 static void 3230 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 3231 { 3232 unsigned long flags; 3233 3234 /* 3235 * Don't signal app if it is just an aborted previously registered aen 3236 */ 3237 if ((!cmd->abort_aen) && (instance->unload == 0)) { 3238 spin_lock_irqsave(&poll_aen_lock, flags); 3239 megasas_poll_wait_aen = 1; 3240 spin_unlock_irqrestore(&poll_aen_lock, flags); 3241 wake_up(&megasas_poll_wait); 3242 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); 3243 } 3244 else 3245 cmd->abort_aen = 0; 3246 3247 instance->aen_cmd = NULL; 3248 3249 megasas_return_cmd(instance, cmd); 3250 3251 if ((instance->unload == 0) && 3252 ((instance->issuepend_done == 1))) { 3253 struct megasas_aen_event *ev; 3254 3255 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3256 if (!ev) { 3257 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n"); 3258 } else { 3259 ev->instance = instance; 3260 instance->ev = ev; 3261 INIT_DELAYED_WORK(&ev->hotplug_work, 3262 megasas_aen_polling); 3263 schedule_delayed_work(&ev->hotplug_work, 0); 3264 } 3265 } 3266 } 3267 3268 static ssize_t 3269 fw_crash_buffer_store(struct device *cdev, 3270 struct device_attribute *attr, const char *buf, size_t count) 3271 { 3272 struct Scsi_Host *shost = class_to_shost(cdev); 3273 struct megasas_instance *instance = 3274 (struct megasas_instance *) shost->hostdata; 3275 int val = 0; 3276 3277 if (kstrtoint(buf, 0, &val) != 0) 3278 return -EINVAL; 3279 3280 mutex_lock(&instance->crashdump_lock); 3281 instance->fw_crash_buffer_offset = val; 3282 mutex_unlock(&instance->crashdump_lock); 3283 return strlen(buf); 3284 } 3285 3286 static ssize_t 3287 fw_crash_buffer_show(struct device *cdev, 3288 struct device_attribute *attr, char *buf) 3289 { 3290 struct Scsi_Host *shost = class_to_shost(cdev); 3291 struct megasas_instance *instance = 3292 (struct megasas_instance *) shost->hostdata; 3293 u32 size; 3294 unsigned long dmachunk = CRASH_DMA_BUF_SIZE; 3295 unsigned long chunk_left_bytes; 3296 unsigned long src_addr; 3297 u32 buff_offset; 3298 3299 mutex_lock(&instance->crashdump_lock); 3300 buff_offset = instance->fw_crash_buffer_offset; 3301 if (!instance->crash_dump_buf || 3302 !((instance->fw_crash_state == AVAILABLE) || 3303 (instance->fw_crash_state == COPYING))) { 3304 dev_err(&instance->pdev->dev, 3305 "Firmware crash dump is not available\n"); 3306 mutex_unlock(&instance->crashdump_lock); 3307 return -EINVAL; 3308 } 3309 3310 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) { 3311 dev_err(&instance->pdev->dev, 3312 "Firmware crash dump offset is out of range\n"); 3313 mutex_unlock(&instance->crashdump_lock); 3314 return 0; 3315 } 3316 3317 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; 3318 chunk_left_bytes = dmachunk - (buff_offset % dmachunk); 3319 size = (size > chunk_left_bytes) ? chunk_left_bytes : size; 3320 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3321 3322 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 3323 (buff_offset % dmachunk); 3324 memcpy(buf, (void *)src_addr, size); 3325 mutex_unlock(&instance->crashdump_lock); 3326 3327 return size; 3328 } 3329 3330 static ssize_t 3331 fw_crash_buffer_size_show(struct device *cdev, 3332 struct device_attribute *attr, char *buf) 3333 { 3334 struct Scsi_Host *shost = class_to_shost(cdev); 3335 struct megasas_instance *instance = 3336 (struct megasas_instance *) shost->hostdata; 3337 3338 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long) 3339 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE); 3340 } 3341 3342 static ssize_t 3343 fw_crash_state_store(struct device *cdev, 3344 struct device_attribute *attr, const char *buf, size_t count) 3345 { 3346 struct Scsi_Host *shost = class_to_shost(cdev); 3347 struct megasas_instance *instance = 3348 (struct megasas_instance *) shost->hostdata; 3349 int val = 0; 3350 3351 if (kstrtoint(buf, 0, &val) != 0) 3352 return -EINVAL; 3353 3354 if ((val <= AVAILABLE || val > COPY_ERROR)) { 3355 dev_err(&instance->pdev->dev, "application updates invalid " 3356 "firmware crash state\n"); 3357 return -EINVAL; 3358 } 3359 3360 instance->fw_crash_state = val; 3361 3362 if ((val == COPIED) || (val == COPY_ERROR)) { 3363 mutex_lock(&instance->crashdump_lock); 3364 megasas_free_host_crash_buffer(instance); 3365 mutex_unlock(&instance->crashdump_lock); 3366 if (val == COPY_ERROR) 3367 dev_info(&instance->pdev->dev, "application failed to " 3368 "copy Firmware crash dump\n"); 3369 else 3370 dev_info(&instance->pdev->dev, "Firmware crash dump " 3371 "copied successfully\n"); 3372 } 3373 return strlen(buf); 3374 } 3375 3376 static ssize_t 3377 fw_crash_state_show(struct device *cdev, 3378 struct device_attribute *attr, char *buf) 3379 { 3380 struct Scsi_Host *shost = class_to_shost(cdev); 3381 struct megasas_instance *instance = 3382 (struct megasas_instance *) shost->hostdata; 3383 3384 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 3385 } 3386 3387 static ssize_t 3388 page_size_show(struct device *cdev, 3389 struct device_attribute *attr, char *buf) 3390 { 3391 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1); 3392 } 3393 3394 static ssize_t 3395 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr, 3396 char *buf) 3397 { 3398 struct Scsi_Host *shost = class_to_shost(cdev); 3399 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3400 3401 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3402 } 3403 3404 static ssize_t 3405 fw_cmds_outstanding_show(struct device *cdev, 3406 struct device_attribute *attr, char *buf) 3407 { 3408 struct Scsi_Host *shost = class_to_shost(cdev); 3409 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3410 3411 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding)); 3412 } 3413 3414 static ssize_t 3415 enable_sdev_max_qd_show(struct device *cdev, 3416 struct device_attribute *attr, char *buf) 3417 { 3418 struct Scsi_Host *shost = class_to_shost(cdev); 3419 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3420 3421 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); 3422 } 3423 3424 static ssize_t 3425 enable_sdev_max_qd_store(struct device *cdev, 3426 struct device_attribute *attr, const char *buf, size_t count) 3427 { 3428 struct Scsi_Host *shost = class_to_shost(cdev); 3429 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; 3430 u32 val = 0; 3431 bool is_target_prop; 3432 int ret_target_prop = DCMD_FAILED; 3433 struct scsi_device *sdev; 3434 3435 if (kstrtou32(buf, 0, &val) != 0) { 3436 pr_err("megasas: could not set enable_sdev_max_qd\n"); 3437 return -EINVAL; 3438 } 3439 3440 mutex_lock(&instance->reset_mutex); 3441 if (val) 3442 instance->enable_sdev_max_qd = true; 3443 else 3444 instance->enable_sdev_max_qd = false; 3445 3446 shost_for_each_device(sdev, shost) { 3447 ret_target_prop = megasas_get_target_prop(instance, sdev); 3448 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 3449 megasas_set_fw_assisted_qd(sdev, is_target_prop); 3450 } 3451 mutex_unlock(&instance->reset_mutex); 3452 3453 return strlen(buf); 3454 } 3455 3456 static ssize_t 3457 dump_system_regs_show(struct device *cdev, 3458 struct device_attribute *attr, char *buf) 3459 { 3460 struct Scsi_Host *shost = class_to_shost(cdev); 3461 struct megasas_instance *instance = 3462 (struct megasas_instance *)shost->hostdata; 3463 3464 return megasas_dump_sys_regs(instance->reg_set, buf); 3465 } 3466 3467 static ssize_t 3468 raid_map_id_show(struct device *cdev, struct device_attribute *attr, 3469 char *buf) 3470 { 3471 struct Scsi_Host *shost = class_to_shost(cdev); 3472 struct megasas_instance *instance = 3473 (struct megasas_instance *)shost->hostdata; 3474 3475 return snprintf(buf, PAGE_SIZE, "%ld\n", 3476 (unsigned long)instance->map_id); 3477 } 3478 3479 static DEVICE_ATTR_RW(fw_crash_buffer); 3480 static DEVICE_ATTR_RO(fw_crash_buffer_size); 3481 static DEVICE_ATTR_RW(fw_crash_state); 3482 static DEVICE_ATTR_RO(page_size); 3483 static DEVICE_ATTR_RO(ldio_outstanding); 3484 static DEVICE_ATTR_RO(fw_cmds_outstanding); 3485 static DEVICE_ATTR_RW(enable_sdev_max_qd); 3486 static DEVICE_ATTR_RO(dump_system_regs); 3487 static DEVICE_ATTR_RO(raid_map_id); 3488 3489 static struct attribute *megaraid_host_attrs[] = { 3490 &dev_attr_fw_crash_buffer_size.attr, 3491 &dev_attr_fw_crash_buffer.attr, 3492 &dev_attr_fw_crash_state.attr, 3493 &dev_attr_page_size.attr, 3494 &dev_attr_ldio_outstanding.attr, 3495 &dev_attr_fw_cmds_outstanding.attr, 3496 &dev_attr_enable_sdev_max_qd.attr, 3497 &dev_attr_dump_system_regs.attr, 3498 &dev_attr_raid_map_id.attr, 3499 NULL, 3500 }; 3501 3502 ATTRIBUTE_GROUPS(megaraid_host); 3503 3504 /* 3505 * Scsi host template for megaraid_sas driver 3506 */ 3507 static const struct scsi_host_template megasas_template = { 3508 3509 .module = THIS_MODULE, 3510 .name = "Avago SAS based MegaRAID driver", 3511 .proc_name = "megaraid_sas", 3512 .device_configure = megasas_device_configure, 3513 .slave_alloc = megasas_slave_alloc, 3514 .slave_destroy = megasas_slave_destroy, 3515 .queuecommand = megasas_queue_command, 3516 .eh_target_reset_handler = megasas_reset_target, 3517 .eh_abort_handler = megasas_task_abort, 3518 .eh_host_reset_handler = megasas_reset_bus_host, 3519 .eh_timed_out = megasas_reset_timer, 3520 .shost_groups = megaraid_host_groups, 3521 .bios_param = megasas_bios_param, 3522 .map_queues = megasas_map_queues, 3523 .mq_poll = megasas_blk_mq_poll, 3524 .change_queue_depth = scsi_change_queue_depth, 3525 .max_segment_size = 0xffffffff, 3526 .cmd_size = sizeof(struct megasas_cmd_priv), 3527 }; 3528 3529 /** 3530 * megasas_complete_int_cmd - Completes an internal command 3531 * @instance: Adapter soft state 3532 * @cmd: Command to be completed 3533 * 3534 * The megasas_issue_blocked_cmd() function waits for a command to complete 3535 * after it issues a command. This function wakes up that waiting routine by 3536 * calling wake_up() on the wait queue. 3537 */ 3538 static void 3539 megasas_complete_int_cmd(struct megasas_instance *instance, 3540 struct megasas_cmd *cmd) 3541 { 3542 if (cmd->cmd_status_drv == DCMD_INIT) 3543 cmd->cmd_status_drv = 3544 (cmd->frame->io.cmd_status == MFI_STAT_OK) ? 3545 DCMD_SUCCESS : DCMD_FAILED; 3546 3547 wake_up(&instance->int_cmd_wait_q); 3548 } 3549 3550 /** 3551 * megasas_complete_abort - Completes aborting a command 3552 * @instance: Adapter soft state 3553 * @cmd: Cmd that was issued to abort another cmd 3554 * 3555 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 3556 * after it issues an abort on a previously issued command. This function 3557 * wakes up all functions waiting on the same wait queue. 3558 */ 3559 static void 3560 megasas_complete_abort(struct megasas_instance *instance, 3561 struct megasas_cmd *cmd) 3562 { 3563 if (cmd->sync_cmd) { 3564 cmd->sync_cmd = 0; 3565 cmd->cmd_status_drv = DCMD_SUCCESS; 3566 wake_up(&instance->abort_cmd_wait_q); 3567 } 3568 } 3569 3570 static void 3571 megasas_set_ld_removed_by_fw(struct megasas_instance *instance) 3572 { 3573 uint i; 3574 3575 for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) { 3576 if (instance->ld_ids_prev[i] != 0xff && 3577 instance->ld_ids_from_raidmap[i] == 0xff) { 3578 if (megasas_dbg_lvl & LD_PD_DEBUG) 3579 dev_info(&instance->pdev->dev, 3580 "LD target ID %d removed from RAID map\n", i); 3581 instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED; 3582 } 3583 } 3584 } 3585 3586 /** 3587 * megasas_complete_cmd - Completes a command 3588 * @instance: Adapter soft state 3589 * @cmd: Command to be completed 3590 * @alt_status: If non-zero, use this value as status to 3591 * SCSI mid-layer instead of the value returned 3592 * by the FW. This should be used if caller wants 3593 * an alternate status (as in the case of aborted 3594 * commands) 3595 */ 3596 void 3597 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 3598 u8 alt_status) 3599 { 3600 int exception = 0; 3601 struct megasas_header *hdr = &cmd->frame->hdr; 3602 unsigned long flags; 3603 struct fusion_context *fusion = instance->ctrl_context; 3604 u32 opcode, status; 3605 3606 /* flag for the retry reset */ 3607 cmd->retry_for_fw_reset = 0; 3608 3609 if (cmd->scmd) 3610 megasas_priv(cmd->scmd)->cmd_priv = NULL; 3611 3612 switch (hdr->cmd) { 3613 case MFI_CMD_INVALID: 3614 /* Some older 1068 controller FW may keep a pended 3615 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 3616 when booting the kdump kernel. Ignore this command to 3617 prevent a kernel panic on shutdown of the kdump kernel. */ 3618 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command " 3619 "completed\n"); 3620 dev_warn(&instance->pdev->dev, "If you have a controller " 3621 "other than PERC5, please upgrade your firmware\n"); 3622 break; 3623 case MFI_CMD_PD_SCSI_IO: 3624 case MFI_CMD_LD_SCSI_IO: 3625 3626 /* 3627 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3628 * issued either through an IO path or an IOCTL path. If it 3629 * was via IOCTL, we will send it to internal completion. 3630 */ 3631 if (cmd->sync_cmd) { 3632 cmd->sync_cmd = 0; 3633 megasas_complete_int_cmd(instance, cmd); 3634 break; 3635 } 3636 fallthrough; 3637 3638 case MFI_CMD_LD_READ: 3639 case MFI_CMD_LD_WRITE: 3640 3641 if (alt_status) { 3642 cmd->scmd->result = alt_status << 16; 3643 exception = 1; 3644 } 3645 3646 if (exception) { 3647 3648 atomic_dec(&instance->fw_outstanding); 3649 3650 scsi_dma_unmap(cmd->scmd); 3651 scsi_done(cmd->scmd); 3652 megasas_return_cmd(instance, cmd); 3653 3654 break; 3655 } 3656 3657 switch (hdr->cmd_status) { 3658 3659 case MFI_STAT_OK: 3660 cmd->scmd->result = DID_OK << 16; 3661 break; 3662 3663 case MFI_STAT_SCSI_IO_FAILED: 3664 case MFI_STAT_LD_INIT_IN_PROGRESS: 3665 cmd->scmd->result = 3666 (DID_ERROR << 16) | hdr->scsi_status; 3667 break; 3668 3669 case MFI_STAT_SCSI_DONE_WITH_ERROR: 3670 3671 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; 3672 3673 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { 3674 memset(cmd->scmd->sense_buffer, 0, 3675 SCSI_SENSE_BUFFERSIZE); 3676 memcpy(cmd->scmd->sense_buffer, cmd->sense, 3677 hdr->sense_len); 3678 } 3679 3680 break; 3681 3682 case MFI_STAT_LD_OFFLINE: 3683 case MFI_STAT_DEVICE_NOT_FOUND: 3684 cmd->scmd->result = DID_BAD_TARGET << 16; 3685 break; 3686 3687 default: 3688 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n", 3689 hdr->cmd_status); 3690 cmd->scmd->result = DID_ERROR << 16; 3691 break; 3692 } 3693 3694 atomic_dec(&instance->fw_outstanding); 3695 3696 scsi_dma_unmap(cmd->scmd); 3697 scsi_done(cmd->scmd); 3698 megasas_return_cmd(instance, cmd); 3699 3700 break; 3701 3702 case MFI_CMD_SMP: 3703 case MFI_CMD_STP: 3704 case MFI_CMD_NVME: 3705 case MFI_CMD_TOOLBOX: 3706 megasas_complete_int_cmd(instance, cmd); 3707 break; 3708 3709 case MFI_CMD_DCMD: 3710 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3711 /* Check for LD map update */ 3712 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 3713 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3714 fusion->fast_path_io = 0; 3715 spin_lock_irqsave(instance->host->host_lock, flags); 3716 status = cmd->frame->hdr.cmd_status; 3717 instance->map_update_cmd = NULL; 3718 if (status != MFI_STAT_OK) { 3719 if (status != MFI_STAT_NOT_FOUND) 3720 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3721 cmd->frame->hdr.cmd_status); 3722 else { 3723 megasas_return_cmd(instance, cmd); 3724 spin_unlock_irqrestore( 3725 instance->host->host_lock, 3726 flags); 3727 break; 3728 } 3729 } 3730 3731 megasas_return_cmd(instance, cmd); 3732 3733 /* 3734 * Set fast path IO to ZERO. 3735 * Validate Map will set proper value. 3736 * Meanwhile all IOs will go as LD IO. 3737 */ 3738 if (status == MFI_STAT_OK && 3739 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) { 3740 instance->map_id++; 3741 fusion->fast_path_io = 1; 3742 } else { 3743 fusion->fast_path_io = 0; 3744 } 3745 3746 if (instance->adapter_type >= INVADER_SERIES) 3747 megasas_set_ld_removed_by_fw(instance); 3748 3749 megasas_sync_map_info(instance); 3750 spin_unlock_irqrestore(instance->host->host_lock, 3751 flags); 3752 3753 break; 3754 } 3755 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3756 opcode == MR_DCMD_CTRL_EVENT_GET) { 3757 spin_lock_irqsave(&poll_aen_lock, flags); 3758 megasas_poll_wait_aen = 0; 3759 spin_unlock_irqrestore(&poll_aen_lock, flags); 3760 } 3761 3762 /* FW has an updated PD sequence */ 3763 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3764 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3765 3766 spin_lock_irqsave(instance->host->host_lock, flags); 3767 status = cmd->frame->hdr.cmd_status; 3768 instance->jbod_seq_cmd = NULL; 3769 megasas_return_cmd(instance, cmd); 3770 3771 if (status == MFI_STAT_OK) { 3772 instance->pd_seq_map_id++; 3773 /* Re-register a pd sync seq num cmd */ 3774 if (megasas_sync_pd_seq_num(instance, true)) 3775 instance->use_seqnum_jbod_fp = false; 3776 } else 3777 instance->use_seqnum_jbod_fp = false; 3778 3779 spin_unlock_irqrestore(instance->host->host_lock, flags); 3780 break; 3781 } 3782 3783 /* 3784 * See if got an event notification 3785 */ 3786 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 3787 megasas_service_aen(instance, cmd); 3788 else 3789 megasas_complete_int_cmd(instance, cmd); 3790 3791 break; 3792 3793 case MFI_CMD_ABORT: 3794 /* 3795 * Cmd issued to abort another cmd returned 3796 */ 3797 megasas_complete_abort(instance, cmd); 3798 break; 3799 3800 default: 3801 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3802 hdr->cmd); 3803 megasas_complete_int_cmd(instance, cmd); 3804 break; 3805 } 3806 } 3807 3808 /** 3809 * megasas_issue_pending_cmds_again - issue all pending cmds 3810 * in FW again because of the fw reset 3811 * @instance: Adapter soft state 3812 */ 3813 static inline void 3814 megasas_issue_pending_cmds_again(struct megasas_instance *instance) 3815 { 3816 struct megasas_cmd *cmd; 3817 struct list_head clist_local; 3818 union megasas_evt_class_locale class_locale; 3819 unsigned long flags; 3820 u32 seq_num; 3821 3822 INIT_LIST_HEAD(&clist_local); 3823 spin_lock_irqsave(&instance->hba_lock, flags); 3824 list_splice_init(&instance->internal_reset_pending_q, &clist_local); 3825 spin_unlock_irqrestore(&instance->hba_lock, flags); 3826 3827 while (!list_empty(&clist_local)) { 3828 cmd = list_entry((&clist_local)->next, 3829 struct megasas_cmd, list); 3830 list_del_init(&cmd->list); 3831 3832 if (cmd->sync_cmd || cmd->scmd) { 3833 dev_notice(&instance->pdev->dev, "command %p, %p:%d" 3834 "detected to be pending while HBA reset\n", 3835 cmd, cmd->scmd, cmd->sync_cmd); 3836 3837 cmd->retry_for_fw_reset++; 3838 3839 if (cmd->retry_for_fw_reset == 3) { 3840 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d" 3841 "was tried multiple times during reset." 3842 "Shutting down the HBA\n", 3843 cmd, cmd->scmd, cmd->sync_cmd); 3844 instance->instancet->disable_intr(instance); 3845 atomic_set(&instance->fw_reset_no_pci_access, 1); 3846 megaraid_sas_kill_hba(instance); 3847 return; 3848 } 3849 } 3850 3851 if (cmd->sync_cmd == 1) { 3852 if (cmd->scmd) { 3853 dev_notice(&instance->pdev->dev, "unexpected" 3854 "cmd attached to internal command!\n"); 3855 } 3856 dev_notice(&instance->pdev->dev, "%p synchronous cmd" 3857 "on the internal reset queue," 3858 "issue it again.\n", cmd); 3859 cmd->cmd_status_drv = DCMD_INIT; 3860 instance->instancet->fire_cmd(instance, 3861 cmd->frame_phys_addr, 3862 0, instance->reg_set); 3863 } else if (cmd->scmd) { 3864 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]" 3865 "detected on the internal queue, issue again.\n", 3866 cmd, cmd->scmd->cmnd[0]); 3867 3868 atomic_inc(&instance->fw_outstanding); 3869 instance->instancet->fire_cmd(instance, 3870 cmd->frame_phys_addr, 3871 cmd->frame_count-1, instance->reg_set); 3872 } else { 3873 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the" 3874 "internal reset defer list while re-issue!!\n", 3875 cmd); 3876 } 3877 } 3878 3879 if (instance->aen_cmd) { 3880 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n"); 3881 megasas_return_cmd(instance, instance->aen_cmd); 3882 3883 instance->aen_cmd = NULL; 3884 } 3885 3886 /* 3887 * Initiate AEN (Asynchronous Event Notification) 3888 */ 3889 seq_num = instance->last_seq_num; 3890 class_locale.members.reserved = 0; 3891 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3892 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3893 3894 megasas_register_aen(instance, seq_num, class_locale.word); 3895 } 3896 3897 /* 3898 * Move the internal reset pending commands to a deferred queue. 3899 * 3900 * We move the commands pending at internal reset time to a 3901 * pending queue. This queue would be flushed after successful 3902 * completion of the internal reset sequence. if the internal reset 3903 * did not complete in time, the kernel reset handler would flush 3904 * these commands. 3905 */ 3906 static void 3907 megasas_internal_reset_defer_cmds(struct megasas_instance *instance) 3908 { 3909 struct megasas_cmd *cmd; 3910 int i; 3911 u16 max_cmd = instance->max_fw_cmds; 3912 u32 defer_index; 3913 unsigned long flags; 3914 3915 defer_index = 0; 3916 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3917 for (i = 0; i < max_cmd; i++) { 3918 cmd = instance->cmd_list[i]; 3919 if (cmd->sync_cmd == 1 || cmd->scmd) { 3920 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p" 3921 "on the defer queue as internal\n", 3922 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3923 3924 if (!list_empty(&cmd->list)) { 3925 dev_notice(&instance->pdev->dev, "ERROR while" 3926 " moving this cmd:%p, %d %p, it was" 3927 "discovered on some list?\n", 3928 cmd, cmd->sync_cmd, cmd->scmd); 3929 3930 list_del_init(&cmd->list); 3931 } 3932 defer_index++; 3933 list_add_tail(&cmd->list, 3934 &instance->internal_reset_pending_q); 3935 } 3936 } 3937 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 3938 } 3939 3940 3941 static void 3942 process_fw_state_change_wq(struct work_struct *work) 3943 { 3944 struct megasas_instance *instance = 3945 container_of(work, struct megasas_instance, work_init); 3946 u32 wait; 3947 unsigned long flags; 3948 3949 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) { 3950 dev_notice(&instance->pdev->dev, "error, recovery st %x\n", 3951 atomic_read(&instance->adprecovery)); 3952 return ; 3953 } 3954 3955 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 3956 dev_notice(&instance->pdev->dev, "FW detected to be in fault" 3957 "state, restarting it...\n"); 3958 3959 instance->instancet->disable_intr(instance); 3960 atomic_set(&instance->fw_outstanding, 0); 3961 3962 atomic_set(&instance->fw_reset_no_pci_access, 1); 3963 instance->instancet->adp_reset(instance, instance->reg_set); 3964 atomic_set(&instance->fw_reset_no_pci_access, 0); 3965 3966 dev_notice(&instance->pdev->dev, "FW restarted successfully," 3967 "initiating next stage...\n"); 3968 3969 dev_notice(&instance->pdev->dev, "HBA recovery state machine," 3970 "state 2 starting...\n"); 3971 3972 /* waiting for about 20 second before start the second init */ 3973 for (wait = 0; wait < 30; wait++) { 3974 msleep(1000); 3975 } 3976 3977 if (megasas_transition_to_ready(instance, 1)) { 3978 dev_notice(&instance->pdev->dev, "adapter not ready\n"); 3979 3980 atomic_set(&instance->fw_reset_no_pci_access, 1); 3981 megaraid_sas_kill_hba(instance); 3982 return ; 3983 } 3984 3985 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 3986 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 3987 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) 3988 ) { 3989 *instance->consumer = *instance->producer; 3990 } else { 3991 *instance->consumer = 0; 3992 *instance->producer = 0; 3993 } 3994 3995 megasas_issue_init_mfi(instance); 3996 3997 spin_lock_irqsave(&instance->hba_lock, flags); 3998 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 3999 spin_unlock_irqrestore(&instance->hba_lock, flags); 4000 instance->instancet->enable_intr(instance); 4001 4002 megasas_issue_pending_cmds_again(instance); 4003 instance->issuepend_done = 1; 4004 } 4005 } 4006 4007 /** 4008 * megasas_deplete_reply_queue - Processes all completed commands 4009 * @instance: Adapter soft state 4010 * @alt_status: Alternate status to be returned to 4011 * SCSI mid-layer instead of the status 4012 * returned by the FW 4013 * Note: this must be called with hba lock held 4014 */ 4015 static int 4016 megasas_deplete_reply_queue(struct megasas_instance *instance, 4017 u8 alt_status) 4018 { 4019 u32 mfiStatus; 4020 u32 fw_state; 4021 4022 if (instance->instancet->check_reset(instance, instance->reg_set) == 1) 4023 return IRQ_HANDLED; 4024 4025 mfiStatus = instance->instancet->clear_intr(instance); 4026 if (mfiStatus == 0) { 4027 /* Hardware may not set outbound_intr_status in MSI-X mode */ 4028 if (!instance->msix_vectors) 4029 return IRQ_NONE; 4030 } 4031 4032 instance->mfiStatus = mfiStatus; 4033 4034 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 4035 fw_state = instance->instancet->read_fw_status_reg( 4036 instance) & MFI_STATE_MASK; 4037 4038 if (fw_state != MFI_STATE_FAULT) { 4039 dev_notice(&instance->pdev->dev, "fw state:%x\n", 4040 fw_state); 4041 } 4042 4043 if ((fw_state == MFI_STATE_FAULT) && 4044 (instance->disableOnlineCtrlReset == 0)) { 4045 dev_notice(&instance->pdev->dev, "wait adp restart\n"); 4046 4047 if ((instance->pdev->device == 4048 PCI_DEVICE_ID_LSI_SAS1064R) || 4049 (instance->pdev->device == 4050 PCI_DEVICE_ID_DELL_PERC5) || 4051 (instance->pdev->device == 4052 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 4053 4054 *instance->consumer = 4055 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 4056 } 4057 4058 4059 instance->instancet->disable_intr(instance); 4060 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4061 instance->issuepend_done = 0; 4062 4063 atomic_set(&instance->fw_outstanding, 0); 4064 megasas_internal_reset_defer_cmds(instance); 4065 4066 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n", 4067 fw_state, atomic_read(&instance->adprecovery)); 4068 4069 schedule_work(&instance->work_init); 4070 return IRQ_HANDLED; 4071 4072 } else { 4073 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n", 4074 fw_state, instance->disableOnlineCtrlReset); 4075 } 4076 } 4077 4078 tasklet_schedule(&instance->isr_tasklet); 4079 return IRQ_HANDLED; 4080 } 4081 4082 /** 4083 * megasas_isr - isr entry point 4084 * @irq: IRQ number 4085 * @devp: IRQ context address 4086 */ 4087 static irqreturn_t megasas_isr(int irq, void *devp) 4088 { 4089 struct megasas_irq_context *irq_context = devp; 4090 struct megasas_instance *instance = irq_context->instance; 4091 unsigned long flags; 4092 irqreturn_t rc; 4093 4094 if (atomic_read(&instance->fw_reset_no_pci_access)) 4095 return IRQ_HANDLED; 4096 4097 spin_lock_irqsave(&instance->hba_lock, flags); 4098 rc = megasas_deplete_reply_queue(instance, DID_OK); 4099 spin_unlock_irqrestore(&instance->hba_lock, flags); 4100 4101 return rc; 4102 } 4103 4104 /** 4105 * megasas_transition_to_ready - Move the FW to READY state 4106 * @instance: Adapter soft state 4107 * @ocr: Adapter reset state 4108 * 4109 * During the initialization, FW passes can potentially be in any one of 4110 * several possible states. If the FW in operational, waiting-for-handshake 4111 * states, driver must take steps to bring it to ready state. Otherwise, it 4112 * has to wait for the ready state. 4113 */ 4114 int 4115 megasas_transition_to_ready(struct megasas_instance *instance, int ocr) 4116 { 4117 int i; 4118 u8 max_wait; 4119 u32 fw_state; 4120 u32 abs_state, curr_abs_state; 4121 4122 abs_state = instance->instancet->read_fw_status_reg(instance); 4123 fw_state = abs_state & MFI_STATE_MASK; 4124 4125 if (fw_state != MFI_STATE_READY) 4126 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready" 4127 " state\n"); 4128 4129 while (fw_state != MFI_STATE_READY) { 4130 4131 switch (fw_state) { 4132 4133 case MFI_STATE_FAULT: 4134 dev_printk(KERN_ERR, &instance->pdev->dev, 4135 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n", 4136 abs_state & MFI_STATE_FAULT_CODE, 4137 abs_state & MFI_STATE_FAULT_SUBCODE, __func__); 4138 if (ocr) { 4139 max_wait = MEGASAS_RESET_WAIT_TIME; 4140 break; 4141 } else { 4142 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4143 megasas_dump_reg_set(instance->reg_set); 4144 return -ENODEV; 4145 } 4146 4147 case MFI_STATE_WAIT_HANDSHAKE: 4148 /* 4149 * Set the CLR bit in inbound doorbell 4150 */ 4151 if ((instance->pdev->device == 4152 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4153 (instance->pdev->device == 4154 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4155 (instance->adapter_type != MFI_SERIES)) 4156 writel( 4157 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4158 &instance->reg_set->doorbell); 4159 else 4160 writel( 4161 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 4162 &instance->reg_set->inbound_doorbell); 4163 4164 max_wait = MEGASAS_RESET_WAIT_TIME; 4165 break; 4166 4167 case MFI_STATE_BOOT_MESSAGE_PENDING: 4168 if ((instance->pdev->device == 4169 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4170 (instance->pdev->device == 4171 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4172 (instance->adapter_type != MFI_SERIES)) 4173 writel(MFI_INIT_HOTPLUG, 4174 &instance->reg_set->doorbell); 4175 else 4176 writel(MFI_INIT_HOTPLUG, 4177 &instance->reg_set->inbound_doorbell); 4178 4179 max_wait = MEGASAS_RESET_WAIT_TIME; 4180 break; 4181 4182 case MFI_STATE_OPERATIONAL: 4183 /* 4184 * Bring it to READY state; assuming max wait 10 secs 4185 */ 4186 instance->instancet->disable_intr(instance); 4187 if ((instance->pdev->device == 4188 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 4189 (instance->pdev->device == 4190 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 4191 (instance->adapter_type != MFI_SERIES)) { 4192 writel(MFI_RESET_FLAGS, 4193 &instance->reg_set->doorbell); 4194 4195 if (instance->adapter_type != MFI_SERIES) { 4196 for (i = 0; i < (10 * 1000); i += 20) { 4197 if (megasas_readl( 4198 instance, 4199 &instance-> 4200 reg_set-> 4201 doorbell) & 1) 4202 msleep(20); 4203 else 4204 break; 4205 } 4206 } 4207 } else 4208 writel(MFI_RESET_FLAGS, 4209 &instance->reg_set->inbound_doorbell); 4210 4211 max_wait = MEGASAS_RESET_WAIT_TIME; 4212 break; 4213 4214 case MFI_STATE_UNDEFINED: 4215 /* 4216 * This state should not last for more than 2 seconds 4217 */ 4218 max_wait = MEGASAS_RESET_WAIT_TIME; 4219 break; 4220 4221 case MFI_STATE_BB_INIT: 4222 max_wait = MEGASAS_RESET_WAIT_TIME; 4223 break; 4224 4225 case MFI_STATE_FW_INIT: 4226 max_wait = MEGASAS_RESET_WAIT_TIME; 4227 break; 4228 4229 case MFI_STATE_FW_INIT_2: 4230 max_wait = MEGASAS_RESET_WAIT_TIME; 4231 break; 4232 4233 case MFI_STATE_DEVICE_SCAN: 4234 max_wait = MEGASAS_RESET_WAIT_TIME; 4235 break; 4236 4237 case MFI_STATE_FLUSH_CACHE: 4238 max_wait = MEGASAS_RESET_WAIT_TIME; 4239 break; 4240 4241 default: 4242 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n", 4243 fw_state); 4244 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4245 megasas_dump_reg_set(instance->reg_set); 4246 return -ENODEV; 4247 } 4248 4249 /* 4250 * The cur_state should not last for more than max_wait secs 4251 */ 4252 for (i = 0; i < max_wait * 50; i++) { 4253 curr_abs_state = instance->instancet-> 4254 read_fw_status_reg(instance); 4255 4256 if (abs_state == curr_abs_state) { 4257 msleep(20); 4258 } else 4259 break; 4260 } 4261 4262 /* 4263 * Return error if fw_state hasn't changed after max_wait 4264 */ 4265 if (curr_abs_state == abs_state) { 4266 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed " 4267 "in %d secs\n", fw_state, max_wait); 4268 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n"); 4269 megasas_dump_reg_set(instance->reg_set); 4270 return -ENODEV; 4271 } 4272 4273 abs_state = curr_abs_state; 4274 fw_state = curr_abs_state & MFI_STATE_MASK; 4275 } 4276 dev_info(&instance->pdev->dev, "FW now in Ready state\n"); 4277 4278 return 0; 4279 } 4280 4281 /** 4282 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool 4283 * @instance: Adapter soft state 4284 */ 4285 static void megasas_teardown_frame_pool(struct megasas_instance *instance) 4286 { 4287 int i; 4288 u16 max_cmd = instance->max_mfi_cmds; 4289 struct megasas_cmd *cmd; 4290 4291 if (!instance->frame_dma_pool) 4292 return; 4293 4294 /* 4295 * Return all frames to pool 4296 */ 4297 for (i = 0; i < max_cmd; i++) { 4298 4299 cmd = instance->cmd_list[i]; 4300 4301 if (cmd->frame) 4302 dma_pool_free(instance->frame_dma_pool, cmd->frame, 4303 cmd->frame_phys_addr); 4304 4305 if (cmd->sense) 4306 dma_pool_free(instance->sense_dma_pool, cmd->sense, 4307 cmd->sense_phys_addr); 4308 } 4309 4310 /* 4311 * Now destroy the pool itself 4312 */ 4313 dma_pool_destroy(instance->frame_dma_pool); 4314 dma_pool_destroy(instance->sense_dma_pool); 4315 4316 instance->frame_dma_pool = NULL; 4317 instance->sense_dma_pool = NULL; 4318 } 4319 4320 /** 4321 * megasas_create_frame_pool - Creates DMA pool for cmd frames 4322 * @instance: Adapter soft state 4323 * 4324 * Each command packet has an embedded DMA memory buffer that is used for 4325 * filling MFI frame and the SG list that immediately follows the frame. This 4326 * function creates those DMA memory buffers for each command packet by using 4327 * PCI pool facility. 4328 */ 4329 static int megasas_create_frame_pool(struct megasas_instance *instance) 4330 { 4331 int i; 4332 u16 max_cmd; 4333 u32 frame_count; 4334 struct megasas_cmd *cmd; 4335 4336 max_cmd = instance->max_mfi_cmds; 4337 4338 /* 4339 * For MFI controllers. 4340 * max_num_sge = 60 4341 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) 4342 * Total 960 byte (15 MFI frame of 64 byte) 4343 * 4344 * Fusion adapter require only 3 extra frame. 4345 * max_num_sge = 16 (defined as MAX_IOCTL_SGE) 4346 * max_sge_sz = 12 byte (sizeof megasas_sge64) 4347 * Total 192 byte (3 MFI frame of 64 byte) 4348 */ 4349 frame_count = (instance->adapter_type == MFI_SERIES) ? 4350 (15 + 1) : (3 + 1); 4351 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 4352 /* 4353 * Use DMA pool facility provided by PCI layer 4354 */ 4355 instance->frame_dma_pool = dma_pool_create("megasas frame pool", 4356 &instance->pdev->dev, 4357 instance->mfi_frame_size, 256, 0); 4358 4359 if (!instance->frame_dma_pool) { 4360 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 4361 return -ENOMEM; 4362 } 4363 4364 instance->sense_dma_pool = dma_pool_create("megasas sense pool", 4365 &instance->pdev->dev, 128, 4366 4, 0); 4367 4368 if (!instance->sense_dma_pool) { 4369 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n"); 4370 4371 dma_pool_destroy(instance->frame_dma_pool); 4372 instance->frame_dma_pool = NULL; 4373 4374 return -ENOMEM; 4375 } 4376 4377 /* 4378 * Allocate and attach a frame to each of the commands in cmd_list. 4379 * By making cmd->index as the context instead of the &cmd, we can 4380 * always use 32bit context regardless of the architecture 4381 */ 4382 for (i = 0; i < max_cmd; i++) { 4383 4384 cmd = instance->cmd_list[i]; 4385 4386 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool, 4387 GFP_KERNEL, &cmd->frame_phys_addr); 4388 4389 cmd->sense = dma_pool_alloc(instance->sense_dma_pool, 4390 GFP_KERNEL, &cmd->sense_phys_addr); 4391 4392 /* 4393 * megasas_teardown_frame_pool() takes care of freeing 4394 * whatever has been allocated 4395 */ 4396 if (!cmd->frame || !cmd->sense) { 4397 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n"); 4398 megasas_teardown_frame_pool(instance); 4399 return -ENOMEM; 4400 } 4401 4402 cmd->frame->io.context = cpu_to_le32(cmd->index); 4403 cmd->frame->io.pad_0 = 0; 4404 if ((instance->adapter_type == MFI_SERIES) && reset_devices) 4405 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4406 } 4407 4408 return 0; 4409 } 4410 4411 /** 4412 * megasas_free_cmds - Free all the cmds in the free cmd pool 4413 * @instance: Adapter soft state 4414 */ 4415 void megasas_free_cmds(struct megasas_instance *instance) 4416 { 4417 int i; 4418 4419 /* First free the MFI frame pool */ 4420 megasas_teardown_frame_pool(instance); 4421 4422 /* Free all the commands in the cmd_list */ 4423 for (i = 0; i < instance->max_mfi_cmds; i++) 4424 4425 kfree(instance->cmd_list[i]); 4426 4427 /* Free the cmd_list buffer itself */ 4428 kfree(instance->cmd_list); 4429 instance->cmd_list = NULL; 4430 4431 INIT_LIST_HEAD(&instance->cmd_pool); 4432 } 4433 4434 /** 4435 * megasas_alloc_cmds - Allocates the command packets 4436 * @instance: Adapter soft state 4437 * 4438 * Each command that is issued to the FW, whether IO commands from the OS or 4439 * internal commands like IOCTLs, are wrapped in local data structure called 4440 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to 4441 * the FW. 4442 * 4443 * Each frame has a 32-bit field called context (tag). This context is used 4444 * to get back the megasas_cmd from the frame when a frame gets completed in 4445 * the ISR. Typically the address of the megasas_cmd itself would be used as 4446 * the context. But we wanted to keep the differences between 32 and 64 bit 4447 * systems to the mininum. We always use 32 bit integers for the context. In 4448 * this driver, the 32 bit values are the indices into an array cmd_list. 4449 * This array is used only to look up the megasas_cmd given the context. The 4450 * free commands themselves are maintained in a linked list called cmd_pool. 4451 */ 4452 int megasas_alloc_cmds(struct megasas_instance *instance) 4453 { 4454 int i; 4455 int j; 4456 u16 max_cmd; 4457 struct megasas_cmd *cmd; 4458 4459 max_cmd = instance->max_mfi_cmds; 4460 4461 /* 4462 * instance->cmd_list is an array of struct megasas_cmd pointers. 4463 * Allocate the dynamic array first and then allocate individual 4464 * commands. 4465 */ 4466 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 4467 4468 if (!instance->cmd_list) { 4469 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n"); 4470 return -ENOMEM; 4471 } 4472 4473 for (i = 0; i < max_cmd; i++) { 4474 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), 4475 GFP_KERNEL); 4476 4477 if (!instance->cmd_list[i]) { 4478 4479 for (j = 0; j < i; j++) 4480 kfree(instance->cmd_list[j]); 4481 4482 kfree(instance->cmd_list); 4483 instance->cmd_list = NULL; 4484 4485 return -ENOMEM; 4486 } 4487 } 4488 4489 for (i = 0; i < max_cmd; i++) { 4490 cmd = instance->cmd_list[i]; 4491 memset(cmd, 0, sizeof(struct megasas_cmd)); 4492 cmd->index = i; 4493 cmd->scmd = NULL; 4494 cmd->instance = instance; 4495 4496 list_add_tail(&cmd->list, &instance->cmd_pool); 4497 } 4498 4499 /* 4500 * Create a frame pool and assign one frame to each cmd 4501 */ 4502 if (megasas_create_frame_pool(instance)) { 4503 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); 4504 megasas_free_cmds(instance); 4505 return -ENOMEM; 4506 } 4507 4508 return 0; 4509 } 4510 4511 /* 4512 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state. 4513 * @instance: Adapter soft state 4514 * 4515 * Return 0 for only Fusion adapter, if driver load/unload is not in progress 4516 * or FW is not under OCR. 4517 */ 4518 inline int 4519 dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4520 4521 if (instance->adapter_type == MFI_SERIES) 4522 return KILL_ADAPTER; 4523 else if (instance->unload || 4524 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, 4525 &instance->reset_flags)) 4526 return IGNORE_TIMEOUT; 4527 else 4528 return INITIATE_OCR; 4529 } 4530 4531 static void 4532 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) 4533 { 4534 int ret; 4535 struct megasas_cmd *cmd; 4536 struct megasas_dcmd_frame *dcmd; 4537 4538 struct MR_PRIV_DEVICE *mr_device_priv_data; 4539 u16 device_id = 0; 4540 4541 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; 4542 cmd = megasas_get_cmd(instance); 4543 4544 if (!cmd) { 4545 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4546 return; 4547 } 4548 4549 dcmd = &cmd->frame->dcmd; 4550 4551 memset(instance->pd_info, 0, sizeof(*instance->pd_info)); 4552 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4553 4554 dcmd->mbox.s[0] = cpu_to_le16(device_id); 4555 dcmd->cmd = MFI_CMD_DCMD; 4556 dcmd->cmd_status = 0xFF; 4557 dcmd->sge_count = 1; 4558 dcmd->flags = MFI_FRAME_DIR_READ; 4559 dcmd->timeout = 0; 4560 dcmd->pad_0 = 0; 4561 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4562 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4563 4564 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h, 4565 sizeof(struct MR_PD_INFO)); 4566 4567 if ((instance->adapter_type != MFI_SERIES) && 4568 !instance->mask_interrupts) 4569 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4570 else 4571 ret = megasas_issue_polled(instance, cmd); 4572 4573 switch (ret) { 4574 case DCMD_SUCCESS: 4575 mr_device_priv_data = sdev->hostdata; 4576 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); 4577 mr_device_priv_data->interface_type = 4578 instance->pd_info->state.ddf.pdType.intf; 4579 break; 4580 4581 case DCMD_TIMEOUT: 4582 4583 switch (dcmd_timeout_ocr_possible(instance)) { 4584 case INITIATE_OCR: 4585 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4586 mutex_unlock(&instance->reset_mutex); 4587 megasas_reset_fusion(instance->host, 4588 MFI_IO_TIMEOUT_OCR); 4589 mutex_lock(&instance->reset_mutex); 4590 break; 4591 case KILL_ADAPTER: 4592 megaraid_sas_kill_hba(instance); 4593 break; 4594 case IGNORE_TIMEOUT: 4595 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4596 __func__, __LINE__); 4597 break; 4598 } 4599 4600 break; 4601 } 4602 4603 if (ret != DCMD_TIMEOUT) 4604 megasas_return_cmd(instance, cmd); 4605 4606 return; 4607 } 4608 /* 4609 * megasas_get_pd_list_info - Returns FW's pd_list structure 4610 * @instance: Adapter soft state 4611 * @pd_list: pd_list structure 4612 * 4613 * Issues an internal command (DCMD) to get the FW's controller PD 4614 * list structure. This information is mainly used to find out SYSTEM 4615 * supported by the FW. 4616 */ 4617 static int 4618 megasas_get_pd_list(struct megasas_instance *instance) 4619 { 4620 int ret = 0, pd_index = 0; 4621 struct megasas_cmd *cmd; 4622 struct megasas_dcmd_frame *dcmd; 4623 struct MR_PD_LIST *ci; 4624 struct MR_PD_ADDRESS *pd_addr; 4625 4626 if (instance->pd_list_not_supported) { 4627 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4628 "not supported by firmware\n"); 4629 return ret; 4630 } 4631 4632 ci = instance->pd_list_buf; 4633 4634 cmd = megasas_get_cmd(instance); 4635 4636 if (!cmd) { 4637 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n"); 4638 return -ENOMEM; 4639 } 4640 4641 dcmd = &cmd->frame->dcmd; 4642 4643 memset(ci, 0, sizeof(*ci)); 4644 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4645 4646 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4647 dcmd->mbox.b[1] = 0; 4648 dcmd->cmd = MFI_CMD_DCMD; 4649 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4650 dcmd->sge_count = 1; 4651 dcmd->flags = MFI_FRAME_DIR_READ; 4652 dcmd->timeout = 0; 4653 dcmd->pad_0 = 0; 4654 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4655 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4656 4657 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h, 4658 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST))); 4659 4660 if ((instance->adapter_type != MFI_SERIES) && 4661 !instance->mask_interrupts) 4662 ret = megasas_issue_blocked_cmd(instance, cmd, 4663 MFI_IO_TIMEOUT_SECS); 4664 else 4665 ret = megasas_issue_polled(instance, cmd); 4666 4667 switch (ret) { 4668 case DCMD_FAILED: 4669 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4670 "failed/not supported by firmware\n"); 4671 4672 if (instance->adapter_type != MFI_SERIES) 4673 megaraid_sas_kill_hba(instance); 4674 else 4675 instance->pd_list_not_supported = 1; 4676 break; 4677 case DCMD_TIMEOUT: 4678 4679 switch (dcmd_timeout_ocr_possible(instance)) { 4680 case INITIATE_OCR: 4681 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4682 /* 4683 * DCMD failed from AEN path. 4684 * AEN path already hold reset_mutex to avoid PCI access 4685 * while OCR is in progress. 4686 */ 4687 mutex_unlock(&instance->reset_mutex); 4688 megasas_reset_fusion(instance->host, 4689 MFI_IO_TIMEOUT_OCR); 4690 mutex_lock(&instance->reset_mutex); 4691 break; 4692 case KILL_ADAPTER: 4693 megaraid_sas_kill_hba(instance); 4694 break; 4695 case IGNORE_TIMEOUT: 4696 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n", 4697 __func__, __LINE__); 4698 break; 4699 } 4700 4701 break; 4702 4703 case DCMD_SUCCESS: 4704 pd_addr = ci->addr; 4705 if (megasas_dbg_lvl & LD_PD_DEBUG) 4706 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n", 4707 __func__, le32_to_cpu(ci->count)); 4708 4709 if ((le32_to_cpu(ci->count) > 4710 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) 4711 break; 4712 4713 memset(instance->local_pd_list, 0, 4714 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 4715 4716 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 4717 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = 4718 le16_to_cpu(pd_addr->deviceId); 4719 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = 4720 pd_addr->scsiDevType; 4721 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = 4722 MR_PD_STATE_SYSTEM; 4723 if (megasas_dbg_lvl & LD_PD_DEBUG) 4724 dev_info(&instance->pdev->dev, 4725 "PD%d: targetID: 0x%03x deviceType:0x%x\n", 4726 pd_index, le16_to_cpu(pd_addr->deviceId), 4727 pd_addr->scsiDevType); 4728 pd_addr++; 4729 } 4730 4731 memcpy(instance->pd_list, instance->local_pd_list, 4732 sizeof(instance->pd_list)); 4733 break; 4734 4735 } 4736 4737 if (ret != DCMD_TIMEOUT) 4738 megasas_return_cmd(instance, cmd); 4739 4740 return ret; 4741 } 4742 4743 /* 4744 * megasas_get_ld_list_info - Returns FW's ld_list structure 4745 * @instance: Adapter soft state 4746 * @ld_list: ld_list structure 4747 * 4748 * Issues an internal command (DCMD) to get the FW's controller PD 4749 * list structure. This information is mainly used to find out SYSTEM 4750 * supported by the FW. 4751 */ 4752 static int 4753 megasas_get_ld_list(struct megasas_instance *instance) 4754 { 4755 int ret = 0, ld_index = 0, ids = 0; 4756 struct megasas_cmd *cmd; 4757 struct megasas_dcmd_frame *dcmd; 4758 struct MR_LD_LIST *ci; 4759 dma_addr_t ci_h = 0; 4760 u32 ld_count; 4761 4762 ci = instance->ld_list_buf; 4763 ci_h = instance->ld_list_buf_h; 4764 4765 cmd = megasas_get_cmd(instance); 4766 4767 if (!cmd) { 4768 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n"); 4769 return -ENOMEM; 4770 } 4771 4772 dcmd = &cmd->frame->dcmd; 4773 4774 memset(ci, 0, sizeof(*ci)); 4775 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4776 4777 if (instance->supportmax256vd) 4778 dcmd->mbox.b[0] = 1; 4779 dcmd->cmd = MFI_CMD_DCMD; 4780 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4781 dcmd->sge_count = 1; 4782 dcmd->flags = MFI_FRAME_DIR_READ; 4783 dcmd->timeout = 0; 4784 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4785 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4786 dcmd->pad_0 = 0; 4787 4788 megasas_set_dma_settings(instance, dcmd, ci_h, 4789 sizeof(struct MR_LD_LIST)); 4790 4791 if ((instance->adapter_type != MFI_SERIES) && 4792 !instance->mask_interrupts) 4793 ret = megasas_issue_blocked_cmd(instance, cmd, 4794 MFI_IO_TIMEOUT_SECS); 4795 else 4796 ret = megasas_issue_polled(instance, cmd); 4797 4798 ld_count = le32_to_cpu(ci->ldCount); 4799 4800 switch (ret) { 4801 case DCMD_FAILED: 4802 megaraid_sas_kill_hba(instance); 4803 break; 4804 case DCMD_TIMEOUT: 4805 4806 switch (dcmd_timeout_ocr_possible(instance)) { 4807 case INITIATE_OCR: 4808 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4809 /* 4810 * DCMD failed from AEN path. 4811 * AEN path already hold reset_mutex to avoid PCI access 4812 * while OCR is in progress. 4813 */ 4814 mutex_unlock(&instance->reset_mutex); 4815 megasas_reset_fusion(instance->host, 4816 MFI_IO_TIMEOUT_OCR); 4817 mutex_lock(&instance->reset_mutex); 4818 break; 4819 case KILL_ADAPTER: 4820 megaraid_sas_kill_hba(instance); 4821 break; 4822 case IGNORE_TIMEOUT: 4823 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4824 __func__, __LINE__); 4825 break; 4826 } 4827 4828 break; 4829 4830 case DCMD_SUCCESS: 4831 if (megasas_dbg_lvl & LD_PD_DEBUG) 4832 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4833 __func__, ld_count); 4834 4835 if (ld_count > instance->fw_supported_vd_count) 4836 break; 4837 4838 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4839 4840 for (ld_index = 0; ld_index < ld_count; ld_index++) { 4841 if (ci->ldList[ld_index].state != 0) { 4842 ids = ci->ldList[ld_index].ref.targetId; 4843 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; 4844 if (megasas_dbg_lvl & LD_PD_DEBUG) 4845 dev_info(&instance->pdev->dev, 4846 "LD%d: targetID: 0x%03x\n", 4847 ld_index, ids); 4848 } 4849 } 4850 4851 break; 4852 } 4853 4854 if (ret != DCMD_TIMEOUT) 4855 megasas_return_cmd(instance, cmd); 4856 4857 return ret; 4858 } 4859 4860 /** 4861 * megasas_ld_list_query - Returns FW's ld_list structure 4862 * @instance: Adapter soft state 4863 * @query_type: ld_list structure type 4864 * 4865 * Issues an internal command (DCMD) to get the FW's controller PD 4866 * list structure. This information is mainly used to find out SYSTEM 4867 * supported by the FW. 4868 */ 4869 static int 4870 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 4871 { 4872 int ret = 0, ld_index = 0, ids = 0; 4873 struct megasas_cmd *cmd; 4874 struct megasas_dcmd_frame *dcmd; 4875 struct MR_LD_TARGETID_LIST *ci; 4876 dma_addr_t ci_h = 0; 4877 u32 tgtid_count; 4878 4879 ci = instance->ld_targetid_list_buf; 4880 ci_h = instance->ld_targetid_list_buf_h; 4881 4882 cmd = megasas_get_cmd(instance); 4883 4884 if (!cmd) { 4885 dev_warn(&instance->pdev->dev, 4886 "megasas_ld_list_query: Failed to get cmd\n"); 4887 return -ENOMEM; 4888 } 4889 4890 dcmd = &cmd->frame->dcmd; 4891 4892 memset(ci, 0, sizeof(*ci)); 4893 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4894 4895 dcmd->mbox.b[0] = query_type; 4896 if (instance->supportmax256vd) 4897 dcmd->mbox.b[2] = 1; 4898 4899 dcmd->cmd = MFI_CMD_DCMD; 4900 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4901 dcmd->sge_count = 1; 4902 dcmd->flags = MFI_FRAME_DIR_READ; 4903 dcmd->timeout = 0; 4904 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4905 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4906 dcmd->pad_0 = 0; 4907 4908 megasas_set_dma_settings(instance, dcmd, ci_h, 4909 sizeof(struct MR_LD_TARGETID_LIST)); 4910 4911 if ((instance->adapter_type != MFI_SERIES) && 4912 !instance->mask_interrupts) 4913 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4914 else 4915 ret = megasas_issue_polled(instance, cmd); 4916 4917 switch (ret) { 4918 case DCMD_FAILED: 4919 dev_info(&instance->pdev->dev, 4920 "DCMD not supported by firmware - %s %d\n", 4921 __func__, __LINE__); 4922 ret = megasas_get_ld_list(instance); 4923 break; 4924 case DCMD_TIMEOUT: 4925 switch (dcmd_timeout_ocr_possible(instance)) { 4926 case INITIATE_OCR: 4927 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 4928 /* 4929 * DCMD failed from AEN path. 4930 * AEN path already hold reset_mutex to avoid PCI access 4931 * while OCR is in progress. 4932 */ 4933 mutex_unlock(&instance->reset_mutex); 4934 megasas_reset_fusion(instance->host, 4935 MFI_IO_TIMEOUT_OCR); 4936 mutex_lock(&instance->reset_mutex); 4937 break; 4938 case KILL_ADAPTER: 4939 megaraid_sas_kill_hba(instance); 4940 break; 4941 case IGNORE_TIMEOUT: 4942 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 4943 __func__, __LINE__); 4944 break; 4945 } 4946 4947 break; 4948 case DCMD_SUCCESS: 4949 tgtid_count = le32_to_cpu(ci->count); 4950 4951 if (megasas_dbg_lvl & LD_PD_DEBUG) 4952 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n", 4953 __func__, tgtid_count); 4954 4955 if ((tgtid_count > (instance->fw_supported_vd_count))) 4956 break; 4957 4958 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4959 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4960 ids = ci->targetId[ld_index]; 4961 instance->ld_ids[ids] = ci->targetId[ld_index]; 4962 if (megasas_dbg_lvl & LD_PD_DEBUG) 4963 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n", 4964 ld_index, ci->targetId[ld_index]); 4965 } 4966 4967 break; 4968 } 4969 4970 if (ret != DCMD_TIMEOUT) 4971 megasas_return_cmd(instance, cmd); 4972 4973 return ret; 4974 } 4975 4976 /** 4977 * megasas_host_device_list_query 4978 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET 4979 * dcmd.mbox - reserved 4980 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure 4981 * Desc: This DCMD will return the combined device list 4982 * Status: MFI_STAT_OK - List returned successfully 4983 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been 4984 * disabled 4985 * @instance: Adapter soft state 4986 * @is_probe: Driver probe check 4987 * Return: 0 if DCMD succeeded 4988 * non-zero if failed 4989 */ 4990 static int 4991 megasas_host_device_list_query(struct megasas_instance *instance, 4992 bool is_probe) 4993 { 4994 int ret, i, target_id; 4995 struct megasas_cmd *cmd; 4996 struct megasas_dcmd_frame *dcmd; 4997 struct MR_HOST_DEVICE_LIST *ci; 4998 u32 count; 4999 dma_addr_t ci_h; 5000 5001 ci = instance->host_device_list_buf; 5002 ci_h = instance->host_device_list_buf_h; 5003 5004 cmd = megasas_get_cmd(instance); 5005 5006 if (!cmd) { 5007 dev_warn(&instance->pdev->dev, 5008 "%s: failed to get cmd\n", 5009 __func__); 5010 return -ENOMEM; 5011 } 5012 5013 dcmd = &cmd->frame->dcmd; 5014 5015 memset(ci, 0, sizeof(*ci)); 5016 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5017 5018 dcmd->mbox.b[0] = is_probe ? 0 : 1; 5019 dcmd->cmd = MFI_CMD_DCMD; 5020 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5021 dcmd->sge_count = 1; 5022 dcmd->flags = MFI_FRAME_DIR_READ; 5023 dcmd->timeout = 0; 5024 dcmd->pad_0 = 0; 5025 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); 5026 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); 5027 5028 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); 5029 5030 if (!instance->mask_interrupts) { 5031 ret = megasas_issue_blocked_cmd(instance, cmd, 5032 MFI_IO_TIMEOUT_SECS); 5033 } else { 5034 ret = megasas_issue_polled(instance, cmd); 5035 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5036 } 5037 5038 switch (ret) { 5039 case DCMD_SUCCESS: 5040 /* Fill the internal pd_list and ld_ids array based on 5041 * targetIds returned by FW 5042 */ 5043 count = le32_to_cpu(ci->count); 5044 5045 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT)) 5046 break; 5047 5048 if (megasas_dbg_lvl & LD_PD_DEBUG) 5049 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n", 5050 __func__, count); 5051 5052 memset(instance->local_pd_list, 0, 5053 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 5054 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 5055 for (i = 0; i < count; i++) { 5056 target_id = le16_to_cpu(ci->host_device_list[i].target_id); 5057 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { 5058 instance->local_pd_list[target_id].tid = target_id; 5059 instance->local_pd_list[target_id].driveType = 5060 ci->host_device_list[i].scsi_type; 5061 instance->local_pd_list[target_id].driveState = 5062 MR_PD_STATE_SYSTEM; 5063 if (megasas_dbg_lvl & LD_PD_DEBUG) 5064 dev_info(&instance->pdev->dev, 5065 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n", 5066 i, target_id, ci->host_device_list[i].scsi_type); 5067 } else { 5068 instance->ld_ids[target_id] = target_id; 5069 if (megasas_dbg_lvl & LD_PD_DEBUG) 5070 dev_info(&instance->pdev->dev, 5071 "Device %d: LD targetID: 0x%03x\n", 5072 i, target_id); 5073 } 5074 } 5075 5076 memcpy(instance->pd_list, instance->local_pd_list, 5077 sizeof(instance->pd_list)); 5078 break; 5079 5080 case DCMD_TIMEOUT: 5081 switch (dcmd_timeout_ocr_possible(instance)) { 5082 case INITIATE_OCR: 5083 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5084 mutex_unlock(&instance->reset_mutex); 5085 megasas_reset_fusion(instance->host, 5086 MFI_IO_TIMEOUT_OCR); 5087 mutex_lock(&instance->reset_mutex); 5088 break; 5089 case KILL_ADAPTER: 5090 megaraid_sas_kill_hba(instance); 5091 break; 5092 case IGNORE_TIMEOUT: 5093 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5094 __func__, __LINE__); 5095 break; 5096 } 5097 break; 5098 case DCMD_FAILED: 5099 dev_err(&instance->pdev->dev, 5100 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", 5101 __func__); 5102 break; 5103 } 5104 5105 if (ret != DCMD_TIMEOUT) 5106 megasas_return_cmd(instance, cmd); 5107 5108 return ret; 5109 } 5110 5111 /* 5112 * megasas_update_ext_vd_details : Update details w.r.t Extended VD 5113 * instance : Controller's instance 5114 */ 5115 static void megasas_update_ext_vd_details(struct megasas_instance *instance) 5116 { 5117 struct fusion_context *fusion; 5118 u32 ventura_map_sz = 0; 5119 5120 fusion = instance->ctrl_context; 5121 /* For MFI based controllers return dummy success */ 5122 if (!fusion) 5123 return; 5124 5125 instance->supportmax256vd = 5126 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs; 5127 /* Below is additional check to address future FW enhancement */ 5128 if (instance->ctrl_info_buf->max_lds > 64) 5129 instance->supportmax256vd = 1; 5130 5131 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 5132 * MEGASAS_MAX_DEV_PER_CHANNEL; 5133 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 5134 * MEGASAS_MAX_DEV_PER_CHANNEL; 5135 if (instance->supportmax256vd) { 5136 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 5137 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5138 } else { 5139 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 5140 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 5141 } 5142 5143 dev_info(&instance->pdev->dev, 5144 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n", 5145 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0, 5146 instance->ctrl_info_buf->max_lds); 5147 5148 if (instance->max_raid_mapsize) { 5149 ventura_map_sz = instance->max_raid_mapsize * 5150 MR_MIN_MAP_SIZE; /* 64k */ 5151 fusion->current_map_sz = ventura_map_sz; 5152 fusion->max_map_sz = ventura_map_sz; 5153 } else { 5154 fusion->old_map_sz = 5155 struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap, 5156 instance->fw_supported_vd_count); 5157 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 5158 5159 fusion->max_map_sz = 5160 max(fusion->old_map_sz, fusion->new_map_sz); 5161 5162 if (instance->supportmax256vd) 5163 fusion->current_map_sz = fusion->new_map_sz; 5164 else 5165 fusion->current_map_sz = fusion->old_map_sz; 5166 } 5167 /* irrespective of FW raid maps, driver raid map is constant */ 5168 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 5169 } 5170 5171 /* 5172 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 5173 * dcmd.hdr.length - number of bytes to read 5174 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES 5175 * Desc: Fill in snapdump properties 5176 * Status: MFI_STAT_OK- Command successful 5177 */ 5178 void megasas_get_snapdump_properties(struct megasas_instance *instance) 5179 { 5180 int ret = 0; 5181 struct megasas_cmd *cmd; 5182 struct megasas_dcmd_frame *dcmd; 5183 struct MR_SNAPDUMP_PROPERTIES *ci; 5184 dma_addr_t ci_h = 0; 5185 5186 ci = instance->snapdump_prop; 5187 ci_h = instance->snapdump_prop_h; 5188 5189 if (!ci) 5190 return; 5191 5192 cmd = megasas_get_cmd(instance); 5193 5194 if (!cmd) { 5195 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n"); 5196 return; 5197 } 5198 5199 dcmd = &cmd->frame->dcmd; 5200 5201 memset(ci, 0, sizeof(*ci)); 5202 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5203 5204 dcmd->cmd = MFI_CMD_DCMD; 5205 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5206 dcmd->sge_count = 1; 5207 dcmd->flags = MFI_FRAME_DIR_READ; 5208 dcmd->timeout = 0; 5209 dcmd->pad_0 = 0; 5210 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5211 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES); 5212 5213 megasas_set_dma_settings(instance, dcmd, ci_h, 5214 sizeof(struct MR_SNAPDUMP_PROPERTIES)); 5215 5216 if (!instance->mask_interrupts) { 5217 ret = megasas_issue_blocked_cmd(instance, cmd, 5218 MFI_IO_TIMEOUT_SECS); 5219 } else { 5220 ret = megasas_issue_polled(instance, cmd); 5221 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5222 } 5223 5224 switch (ret) { 5225 case DCMD_SUCCESS: 5226 instance->snapdump_wait_time = 5227 min_t(u8, ci->trigger_min_num_sec_before_ocr, 5228 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME); 5229 break; 5230 5231 case DCMD_TIMEOUT: 5232 switch (dcmd_timeout_ocr_possible(instance)) { 5233 case INITIATE_OCR: 5234 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5235 mutex_unlock(&instance->reset_mutex); 5236 megasas_reset_fusion(instance->host, 5237 MFI_IO_TIMEOUT_OCR); 5238 mutex_lock(&instance->reset_mutex); 5239 break; 5240 case KILL_ADAPTER: 5241 megaraid_sas_kill_hba(instance); 5242 break; 5243 case IGNORE_TIMEOUT: 5244 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5245 __func__, __LINE__); 5246 break; 5247 } 5248 } 5249 5250 if (ret != DCMD_TIMEOUT) 5251 megasas_return_cmd(instance, cmd); 5252 } 5253 5254 /** 5255 * megasas_get_ctrl_info - Returns FW's controller structure 5256 * @instance: Adapter soft state 5257 * 5258 * Issues an internal command (DCMD) to get the FW's controller structure. 5259 * This information is mainly used to find out the maximum IO transfer per 5260 * command supported by the FW. 5261 */ 5262 int 5263 megasas_get_ctrl_info(struct megasas_instance *instance) 5264 { 5265 int ret = 0; 5266 struct megasas_cmd *cmd; 5267 struct megasas_dcmd_frame *dcmd; 5268 struct megasas_ctrl_info *ci; 5269 dma_addr_t ci_h = 0; 5270 5271 ci = instance->ctrl_info_buf; 5272 ci_h = instance->ctrl_info_buf_h; 5273 5274 cmd = megasas_get_cmd(instance); 5275 5276 if (!cmd) { 5277 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n"); 5278 return -ENOMEM; 5279 } 5280 5281 dcmd = &cmd->frame->dcmd; 5282 5283 memset(ci, 0, sizeof(*ci)); 5284 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5285 5286 dcmd->cmd = MFI_CMD_DCMD; 5287 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5288 dcmd->sge_count = 1; 5289 dcmd->flags = MFI_FRAME_DIR_READ; 5290 dcmd->timeout = 0; 5291 dcmd->pad_0 = 0; 5292 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 5293 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 5294 dcmd->mbox.b[0] = 1; 5295 5296 megasas_set_dma_settings(instance, dcmd, ci_h, 5297 sizeof(struct megasas_ctrl_info)); 5298 5299 if ((instance->adapter_type != MFI_SERIES) && 5300 !instance->mask_interrupts) { 5301 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5302 } else { 5303 ret = megasas_issue_polled(instance, cmd); 5304 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5305 } 5306 5307 switch (ret) { 5308 case DCMD_SUCCESS: 5309 /* Save required controller information in 5310 * CPU endianness format. 5311 */ 5312 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 5313 le16_to_cpus((u16 *)&ci->properties.on_off_properties2); 5314 le32_to_cpus((u32 *)&ci->adapterOperations2); 5315 le32_to_cpus((u32 *)&ci->adapterOperations3); 5316 le16_to_cpus((u16 *)&ci->adapter_operations4); 5317 le32_to_cpus((u32 *)&ci->adapter_operations5); 5318 5319 /* Update the latest Ext VD info. 5320 * From Init path, store current firmware details. 5321 * From OCR path, detect any firmware properties changes. 5322 * in case of Firmware upgrade without system reboot. 5323 */ 5324 megasas_update_ext_vd_details(instance); 5325 instance->support_seqnum_jbod_fp = 5326 ci->adapterOperations3.useSeqNumJbodFP; 5327 instance->support_morethan256jbod = 5328 ci->adapter_operations4.support_pd_map_target_id; 5329 instance->support_nvme_passthru = 5330 ci->adapter_operations4.support_nvme_passthru; 5331 instance->support_pci_lane_margining = 5332 ci->adapter_operations5.support_pci_lane_margining; 5333 instance->task_abort_tmo = ci->TaskAbortTO; 5334 instance->max_reset_tmo = ci->MaxResetTO; 5335 5336 /*Check whether controller is iMR or MR */ 5337 instance->is_imr = (ci->memory_size ? 0 : 1); 5338 5339 instance->snapdump_wait_time = 5340 (ci->properties.on_off_properties2.enable_snap_dump ? 5341 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); 5342 5343 instance->enable_fw_dev_list = 5344 ci->properties.on_off_properties2.enable_fw_dev_list; 5345 5346 dev_info(&instance->pdev->dev, 5347 "controller type\t: %s(%dMB)\n", 5348 instance->is_imr ? "iMR" : "MR", 5349 le16_to_cpu(ci->memory_size)); 5350 5351 instance->disableOnlineCtrlReset = 5352 ci->properties.OnOffProperties.disableOnlineCtrlReset; 5353 instance->secure_jbod_support = 5354 ci->adapterOperations3.supportSecurityonJBOD; 5355 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 5356 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 5357 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 5358 instance->secure_jbod_support ? "Yes" : "No"); 5359 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", 5360 instance->support_nvme_passthru ? "Yes" : "No"); 5361 dev_info(&instance->pdev->dev, 5362 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", 5363 instance->task_abort_tmo, instance->max_reset_tmo); 5364 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n", 5365 instance->support_seqnum_jbod_fp ? "Yes" : "No"); 5366 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n", 5367 instance->support_pci_lane_margining ? "Yes" : "No"); 5368 5369 break; 5370 5371 case DCMD_TIMEOUT: 5372 switch (dcmd_timeout_ocr_possible(instance)) { 5373 case INITIATE_OCR: 5374 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5375 mutex_unlock(&instance->reset_mutex); 5376 megasas_reset_fusion(instance->host, 5377 MFI_IO_TIMEOUT_OCR); 5378 mutex_lock(&instance->reset_mutex); 5379 break; 5380 case KILL_ADAPTER: 5381 megaraid_sas_kill_hba(instance); 5382 break; 5383 case IGNORE_TIMEOUT: 5384 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5385 __func__, __LINE__); 5386 break; 5387 } 5388 break; 5389 case DCMD_FAILED: 5390 megaraid_sas_kill_hba(instance); 5391 break; 5392 5393 } 5394 5395 if (ret != DCMD_TIMEOUT) 5396 megasas_return_cmd(instance, cmd); 5397 5398 return ret; 5399 } 5400 5401 /* 5402 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer 5403 * to firmware 5404 * 5405 * @instance: Adapter soft state 5406 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature 5407 MR_CRASH_BUF_TURN_OFF = 0 5408 MR_CRASH_BUF_TURN_ON = 1 5409 * @return 0 on success non-zero on failure. 5410 * Issues an internal command (DCMD) to set parameters for crash dump feature. 5411 * Driver will send address of crash dump DMA buffer and set mbox to tell FW 5412 * that driver supports crash dump feature. This DCMD will be sent only if 5413 * crash dump feature is supported by the FW. 5414 * 5415 */ 5416 int megasas_set_crash_dump_params(struct megasas_instance *instance, 5417 u8 crash_buf_state) 5418 { 5419 int ret = 0; 5420 struct megasas_cmd *cmd; 5421 struct megasas_dcmd_frame *dcmd; 5422 5423 cmd = megasas_get_cmd(instance); 5424 5425 if (!cmd) { 5426 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n"); 5427 return -ENOMEM; 5428 } 5429 5430 5431 dcmd = &cmd->frame->dcmd; 5432 5433 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5434 dcmd->mbox.b[0] = crash_buf_state; 5435 dcmd->cmd = MFI_CMD_DCMD; 5436 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 5437 dcmd->sge_count = 1; 5438 dcmd->flags = MFI_FRAME_DIR_NONE; 5439 dcmd->timeout = 0; 5440 dcmd->pad_0 = 0; 5441 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 5442 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 5443 5444 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h, 5445 CRASH_DMA_BUF_SIZE); 5446 5447 if ((instance->adapter_type != MFI_SERIES) && 5448 !instance->mask_interrupts) 5449 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 5450 else 5451 ret = megasas_issue_polled(instance, cmd); 5452 5453 if (ret == DCMD_TIMEOUT) { 5454 switch (dcmd_timeout_ocr_possible(instance)) { 5455 case INITIATE_OCR: 5456 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 5457 megasas_reset_fusion(instance->host, 5458 MFI_IO_TIMEOUT_OCR); 5459 break; 5460 case KILL_ADAPTER: 5461 megaraid_sas_kill_hba(instance); 5462 break; 5463 case IGNORE_TIMEOUT: 5464 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", 5465 __func__, __LINE__); 5466 break; 5467 } 5468 } else 5469 megasas_return_cmd(instance, cmd); 5470 5471 return ret; 5472 } 5473 5474 /** 5475 * megasas_issue_init_mfi - Initializes the FW 5476 * @instance: Adapter soft state 5477 * 5478 * Issues the INIT MFI cmd 5479 */ 5480 static int 5481 megasas_issue_init_mfi(struct megasas_instance *instance) 5482 { 5483 __le32 context; 5484 struct megasas_cmd *cmd; 5485 struct megasas_init_frame *init_frame; 5486 struct megasas_init_queue_info *initq_info; 5487 dma_addr_t init_frame_h; 5488 dma_addr_t initq_info_h; 5489 5490 /* 5491 * Prepare a init frame. Note the init frame points to queue info 5492 * structure. Each frame has SGL allocated after first 64 bytes. For 5493 * this frame - since we don't need any SGL - we use SGL's space as 5494 * queue info structure 5495 * 5496 * We will not get a NULL command below. We just created the pool. 5497 */ 5498 cmd = megasas_get_cmd(instance); 5499 5500 init_frame = (struct megasas_init_frame *)cmd->frame; 5501 initq_info = (struct megasas_init_queue_info *) 5502 ((unsigned long)init_frame + 64); 5503 5504 init_frame_h = cmd->frame_phys_addr; 5505 initq_info_h = init_frame_h + 64; 5506 5507 context = init_frame->context; 5508 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 5509 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 5510 init_frame->context = context; 5511 5512 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 5513 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 5514 5515 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 5516 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 5517 5518 init_frame->cmd = MFI_CMD_INIT; 5519 init_frame->cmd_status = MFI_STAT_INVALID_STATUS; 5520 init_frame->queue_info_new_phys_addr_lo = 5521 cpu_to_le32(lower_32_bits(initq_info_h)); 5522 init_frame->queue_info_new_phys_addr_hi = 5523 cpu_to_le32(upper_32_bits(initq_info_h)); 5524 5525 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 5526 5527 /* 5528 * disable the intr before firing the init frame to FW 5529 */ 5530 instance->instancet->disable_intr(instance); 5531 5532 /* 5533 * Issue the init frame in polled mode 5534 */ 5535 5536 if (megasas_issue_polled(instance, cmd)) { 5537 dev_err(&instance->pdev->dev, "Failed to init firmware\n"); 5538 megasas_return_cmd(instance, cmd); 5539 goto fail_fw_init; 5540 } 5541 5542 megasas_return_cmd(instance, cmd); 5543 5544 return 0; 5545 5546 fail_fw_init: 5547 return -EINVAL; 5548 } 5549 5550 static u32 5551 megasas_init_adapter_mfi(struct megasas_instance *instance) 5552 { 5553 u32 context_sz; 5554 u32 reply_q_sz; 5555 5556 /* 5557 * Get various operational parameters from status register 5558 */ 5559 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF; 5560 /* 5561 * Reduce the max supported cmds by 1. This is to ensure that the 5562 * reply_q_sz (1 more than the max cmd that driver may send) 5563 * does not exceed max cmds that the FW can support 5564 */ 5565 instance->max_fw_cmds = instance->max_fw_cmds-1; 5566 instance->max_mfi_cmds = instance->max_fw_cmds; 5567 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >> 5568 0x10; 5569 /* 5570 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5571 * are reserved for IOCTL + driver's internal DCMDs. 5572 */ 5573 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5574 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5575 instance->max_scsi_cmds = (instance->max_fw_cmds - 5576 MEGASAS_SKINNY_INT_CMDS); 5577 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5578 } else { 5579 instance->max_scsi_cmds = (instance->max_fw_cmds - 5580 MEGASAS_INT_CMDS); 5581 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); 5582 } 5583 5584 instance->cur_can_queue = instance->max_scsi_cmds; 5585 /* 5586 * Create a pool of commands 5587 */ 5588 if (megasas_alloc_cmds(instance)) 5589 goto fail_alloc_cmds; 5590 5591 /* 5592 * Allocate memory for reply queue. Length of reply queue should 5593 * be _one_ more than the maximum commands handled by the firmware. 5594 * 5595 * Note: When FW completes commands, it places corresponding contex 5596 * values in this circular reply queue. This circular queue is a fairly 5597 * typical producer-consumer queue. FW is the producer (of completed 5598 * commands) and the driver is the consumer. 5599 */ 5600 context_sz = sizeof(u32); 5601 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 5602 5603 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev, 5604 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL); 5605 5606 if (!instance->reply_queue) { 5607 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5608 goto fail_reply_queue; 5609 } 5610 5611 if (megasas_issue_init_mfi(instance)) 5612 goto fail_fw_init; 5613 5614 if (megasas_get_ctrl_info(instance)) { 5615 dev_err(&instance->pdev->dev, "(%d): Could get controller info " 5616 "Fail from %s %d\n", instance->unique_id, 5617 __func__, __LINE__); 5618 goto fail_fw_init; 5619 } 5620 5621 instance->fw_support_ieee = 0; 5622 instance->fw_support_ieee = 5623 (instance->instancet->read_fw_status_reg(instance) & 5624 0x04000000); 5625 5626 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5627 instance->fw_support_ieee); 5628 5629 if (instance->fw_support_ieee) 5630 instance->flag_ieee = 1; 5631 5632 return 0; 5633 5634 fail_fw_init: 5635 5636 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 5637 instance->reply_queue, instance->reply_queue_h); 5638 fail_reply_queue: 5639 megasas_free_cmds(instance); 5640 5641 fail_alloc_cmds: 5642 return 1; 5643 } 5644 5645 static 5646 void megasas_setup_irq_poll(struct megasas_instance *instance) 5647 { 5648 struct megasas_irq_context *irq_ctx; 5649 u32 count, i; 5650 5651 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5652 5653 /* Initialize IRQ poll */ 5654 for (i = 0; i < count; i++) { 5655 irq_ctx = &instance->irq_context[i]; 5656 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i); 5657 irq_ctx->irq_poll_scheduled = false; 5658 irq_poll_init(&irq_ctx->irqpoll, 5659 instance->threshold_reply_count, 5660 megasas_irqpoll); 5661 } 5662 } 5663 5664 /* 5665 * megasas_setup_irqs_ioapic - register legacy interrupts. 5666 * @instance: Adapter soft state 5667 * 5668 * Do not enable interrupt, only setup ISRs. 5669 * 5670 * Return 0 on success. 5671 */ 5672 static int 5673 megasas_setup_irqs_ioapic(struct megasas_instance *instance) 5674 { 5675 struct pci_dev *pdev; 5676 5677 pdev = instance->pdev; 5678 instance->irq_context[0].instance = instance; 5679 instance->irq_context[0].MSIxIndex = 0; 5680 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", 5681 "megasas", instance->host->host_no); 5682 if (request_irq(pci_irq_vector(pdev, 0), 5683 instance->instancet->service_isr, IRQF_SHARED, 5684 instance->irq_context->name, &instance->irq_context[0])) { 5685 dev_err(&instance->pdev->dev, 5686 "Failed to register IRQ from %s %d\n", 5687 __func__, __LINE__); 5688 return -1; 5689 } 5690 instance->perf_mode = MR_LATENCY_PERF_MODE; 5691 instance->low_latency_index_start = 0; 5692 return 0; 5693 } 5694 5695 /** 5696 * megasas_setup_irqs_msix - register MSI-x interrupts. 5697 * @instance: Adapter soft state 5698 * @is_probe: Driver probe check 5699 * 5700 * Do not enable interrupt, only setup ISRs. 5701 * 5702 * Return 0 on success. 5703 */ 5704 static int 5705 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) 5706 { 5707 int i, j; 5708 struct pci_dev *pdev; 5709 5710 pdev = instance->pdev; 5711 5712 /* Try MSI-x */ 5713 for (i = 0; i < instance->msix_vectors; i++) { 5714 instance->irq_context[i].instance = instance; 5715 instance->irq_context[i].MSIxIndex = i; 5716 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", 5717 "megasas", instance->host->host_no, i); 5718 if (request_irq(pci_irq_vector(pdev, i), 5719 instance->instancet->service_isr, 0, instance->irq_context[i].name, 5720 &instance->irq_context[i])) { 5721 dev_err(&instance->pdev->dev, 5722 "Failed to register IRQ for vector %d.\n", i); 5723 for (j = 0; j < i; j++) { 5724 if (j < instance->low_latency_index_start) 5725 irq_update_affinity_hint( 5726 pci_irq_vector(pdev, j), NULL); 5727 free_irq(pci_irq_vector(pdev, j), 5728 &instance->irq_context[j]); 5729 } 5730 /* Retry irq register for IO_APIC*/ 5731 instance->msix_vectors = 0; 5732 instance->msix_load_balance = false; 5733 if (is_probe) { 5734 pci_free_irq_vectors(instance->pdev); 5735 return megasas_setup_irqs_ioapic(instance); 5736 } else { 5737 return -1; 5738 } 5739 } 5740 } 5741 5742 return 0; 5743 } 5744 5745 /* 5746 * megasas_destroy_irqs- unregister interrupts. 5747 * @instance: Adapter soft state 5748 * return: void 5749 */ 5750 static void 5751 megasas_destroy_irqs(struct megasas_instance *instance) { 5752 5753 int i; 5754 int count; 5755 struct megasas_irq_context *irq_ctx; 5756 5757 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 5758 if (instance->adapter_type != MFI_SERIES) { 5759 for (i = 0; i < count; i++) { 5760 irq_ctx = &instance->irq_context[i]; 5761 irq_poll_disable(&irq_ctx->irqpoll); 5762 } 5763 } 5764 5765 if (instance->msix_vectors) 5766 for (i = 0; i < instance->msix_vectors; i++) { 5767 if (i < instance->low_latency_index_start) 5768 irq_update_affinity_hint( 5769 pci_irq_vector(instance->pdev, i), NULL); 5770 free_irq(pci_irq_vector(instance->pdev, i), 5771 &instance->irq_context[i]); 5772 } 5773 else 5774 free_irq(pci_irq_vector(instance->pdev, 0), 5775 &instance->irq_context[0]); 5776 } 5777 5778 /** 5779 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 5780 * @instance: Adapter soft state 5781 * 5782 * Return 0 on success. 5783 */ 5784 void 5785 megasas_setup_jbod_map(struct megasas_instance *instance) 5786 { 5787 int i; 5788 struct fusion_context *fusion = instance->ctrl_context; 5789 size_t pd_seq_map_sz; 5790 5791 pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq, 5792 MAX_PHYSICAL_DEVICES); 5793 5794 instance->use_seqnum_jbod_fp = 5795 instance->support_seqnum_jbod_fp; 5796 if (reset_devices || !fusion || 5797 !instance->support_seqnum_jbod_fp) { 5798 dev_info(&instance->pdev->dev, 5799 "JBOD sequence map is disabled %s %d\n", 5800 __func__, __LINE__); 5801 instance->use_seqnum_jbod_fp = false; 5802 return; 5803 } 5804 5805 if (fusion->pd_seq_sync[0]) 5806 goto skip_alloc; 5807 5808 for (i = 0; i < JBOD_MAPS_COUNT; i++) { 5809 fusion->pd_seq_sync[i] = dma_alloc_coherent 5810 (&instance->pdev->dev, pd_seq_map_sz, 5811 &fusion->pd_seq_phys[i], GFP_KERNEL); 5812 if (!fusion->pd_seq_sync[i]) { 5813 dev_err(&instance->pdev->dev, 5814 "Failed to allocate memory from %s %d\n", 5815 __func__, __LINE__); 5816 if (i == 1) { 5817 dma_free_coherent(&instance->pdev->dev, 5818 pd_seq_map_sz, fusion->pd_seq_sync[0], 5819 fusion->pd_seq_phys[0]); 5820 fusion->pd_seq_sync[0] = NULL; 5821 } 5822 instance->use_seqnum_jbod_fp = false; 5823 return; 5824 } 5825 } 5826 5827 skip_alloc: 5828 if (!megasas_sync_pd_seq_num(instance, false) && 5829 !megasas_sync_pd_seq_num(instance, true)) 5830 instance->use_seqnum_jbod_fp = true; 5831 else 5832 instance->use_seqnum_jbod_fp = false; 5833 } 5834 5835 static void megasas_setup_reply_map(struct megasas_instance *instance) 5836 { 5837 const struct cpumask *mask; 5838 unsigned int queue, cpu, low_latency_index_start; 5839 5840 low_latency_index_start = instance->low_latency_index_start; 5841 5842 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) { 5843 mask = pci_irq_get_affinity(instance->pdev, queue); 5844 if (!mask) 5845 goto fallback; 5846 5847 for_each_cpu(cpu, mask) 5848 instance->reply_map[cpu] = queue; 5849 } 5850 return; 5851 5852 fallback: 5853 queue = low_latency_index_start; 5854 for_each_possible_cpu(cpu) { 5855 instance->reply_map[cpu] = queue; 5856 if (queue == (instance->msix_vectors - 1)) 5857 queue = low_latency_index_start; 5858 else 5859 queue++; 5860 } 5861 } 5862 5863 /** 5864 * megasas_get_device_list - Get the PD and LD device list from FW. 5865 * @instance: Adapter soft state 5866 * @return: Success or failure 5867 * 5868 * Issue DCMDs to Firmware to get the PD and LD list. 5869 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 5870 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 5871 */ 5872 static 5873 int megasas_get_device_list(struct megasas_instance *instance) 5874 { 5875 if (instance->enable_fw_dev_list) { 5876 if (megasas_host_device_list_query(instance, true)) 5877 return FAILED; 5878 } else { 5879 if (megasas_get_pd_list(instance) < 0) { 5880 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5881 return FAILED; 5882 } 5883 5884 if (megasas_ld_list_query(instance, 5885 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { 5886 dev_err(&instance->pdev->dev, "failed to get LD list\n"); 5887 return FAILED; 5888 } 5889 } 5890 5891 return SUCCESS; 5892 } 5893 5894 /** 5895 * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint 5896 * for high IOPS queues 5897 * @instance: Adapter soft state 5898 * return: void 5899 */ 5900 static inline void 5901 megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance) 5902 { 5903 int i; 5904 unsigned int irq; 5905 const struct cpumask *mask; 5906 5907 if (instance->perf_mode == MR_BALANCED_PERF_MODE) { 5908 mask = cpumask_of_node(dev_to_node(&instance->pdev->dev)); 5909 5910 for (i = 0; i < instance->low_latency_index_start; i++) { 5911 irq = pci_irq_vector(instance->pdev, i); 5912 irq_set_affinity_and_hint(irq, mask); 5913 } 5914 } 5915 } 5916 5917 static int 5918 __megasas_alloc_irq_vectors(struct megasas_instance *instance) 5919 { 5920 int i, irq_flags; 5921 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start }; 5922 struct irq_affinity *descp = &desc; 5923 5924 irq_flags = PCI_IRQ_MSIX; 5925 5926 if (instance->smp_affinity_enable) 5927 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; 5928 else 5929 descp = NULL; 5930 5931 /* Do not allocate msix vectors for poll_queues. 5932 * msix_vectors is always within a range of FW supported reply queue. 5933 */ 5934 i = pci_alloc_irq_vectors_affinity(instance->pdev, 5935 instance->low_latency_index_start, 5936 instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp); 5937 5938 return i; 5939 } 5940 5941 /** 5942 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors 5943 * @instance: Adapter soft state 5944 * return: void 5945 */ 5946 static void 5947 megasas_alloc_irq_vectors(struct megasas_instance *instance) 5948 { 5949 int i; 5950 unsigned int num_msix_req; 5951 5952 instance->iopoll_q_count = 0; 5953 if ((instance->adapter_type != MFI_SERIES) && 5954 poll_queues) { 5955 5956 instance->perf_mode = MR_LATENCY_PERF_MODE; 5957 instance->low_latency_index_start = 1; 5958 5959 /* reserve for default and non-mananged pre-vector. */ 5960 if (instance->msix_vectors > (poll_queues + 2)) 5961 instance->iopoll_q_count = poll_queues; 5962 else 5963 instance->iopoll_q_count = 0; 5964 5965 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5966 instance->msix_vectors = min(num_msix_req, 5967 instance->msix_vectors); 5968 5969 } 5970 5971 i = __megasas_alloc_irq_vectors(instance); 5972 5973 if (((instance->perf_mode == MR_BALANCED_PERF_MODE) 5974 || instance->iopoll_q_count) && 5975 (i != (instance->msix_vectors - instance->iopoll_q_count))) { 5976 if (instance->msix_vectors) 5977 pci_free_irq_vectors(instance->pdev); 5978 /* Disable Balanced IOPS mode and try realloc vectors */ 5979 instance->perf_mode = MR_LATENCY_PERF_MODE; 5980 instance->low_latency_index_start = 1; 5981 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 5982 5983 instance->msix_vectors = min(num_msix_req, 5984 instance->msix_vectors); 5985 5986 instance->iopoll_q_count = 0; 5987 i = __megasas_alloc_irq_vectors(instance); 5988 5989 } 5990 5991 dev_info(&instance->pdev->dev, 5992 "requested/available msix %d/%d poll_queue %d\n", 5993 instance->msix_vectors - instance->iopoll_q_count, 5994 i, instance->iopoll_q_count); 5995 5996 if (i > 0) 5997 instance->msix_vectors = i; 5998 else 5999 instance->msix_vectors = 0; 6000 6001 if (instance->smp_affinity_enable) 6002 megasas_set_high_iops_queue_affinity_and_hint(instance); 6003 } 6004 6005 /** 6006 * megasas_init_fw - Initializes the FW 6007 * @instance: Adapter soft state 6008 * 6009 * This is the main function for initializing firmware 6010 */ 6011 6012 static int megasas_init_fw(struct megasas_instance *instance) 6013 { 6014 u32 max_sectors_1; 6015 u32 max_sectors_2, tmp_sectors, msix_enable; 6016 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg; 6017 resource_size_t base_addr; 6018 void *base_addr_phys; 6019 struct megasas_ctrl_info *ctrl_info = NULL; 6020 unsigned long bar_list; 6021 int i, j, loop; 6022 struct IOV_111 *iovPtr; 6023 struct fusion_context *fusion; 6024 bool intr_coalescing; 6025 unsigned int num_msix_req; 6026 u16 lnksta, speed; 6027 6028 fusion = instance->ctrl_context; 6029 6030 /* Find first memory bar */ 6031 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 6032 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG); 6033 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 6034 "megasas: LSI")) { 6035 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 6036 return -EBUSY; 6037 } 6038 6039 base_addr = pci_resource_start(instance->pdev, instance->bar); 6040 instance->reg_set = ioremap(base_addr, 8192); 6041 6042 if (!instance->reg_set) { 6043 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); 6044 goto fail_ioremap; 6045 } 6046 6047 base_addr_phys = &base_addr; 6048 dev_printk(KERN_DEBUG, &instance->pdev->dev, 6049 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n", 6050 instance->bar, base_addr_phys, instance->reg_set); 6051 6052 if (instance->adapter_type != MFI_SERIES) 6053 instance->instancet = &megasas_instance_template_fusion; 6054 else { 6055 switch (instance->pdev->device) { 6056 case PCI_DEVICE_ID_LSI_SAS1078R: 6057 case PCI_DEVICE_ID_LSI_SAS1078DE: 6058 instance->instancet = &megasas_instance_template_ppc; 6059 break; 6060 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 6061 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 6062 instance->instancet = &megasas_instance_template_gen2; 6063 break; 6064 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 6065 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 6066 instance->instancet = &megasas_instance_template_skinny; 6067 break; 6068 case PCI_DEVICE_ID_LSI_SAS1064R: 6069 case PCI_DEVICE_ID_DELL_PERC5: 6070 default: 6071 instance->instancet = &megasas_instance_template_xscale; 6072 instance->pd_list_not_supported = 1; 6073 break; 6074 } 6075 } 6076 6077 if (megasas_transition_to_ready(instance, 0)) { 6078 dev_info(&instance->pdev->dev, 6079 "Failed to transition controller to ready from %s!\n", 6080 __func__); 6081 if (instance->adapter_type != MFI_SERIES) { 6082 status_reg = instance->instancet->read_fw_status_reg( 6083 instance); 6084 if (status_reg & MFI_RESET_ADAPTER) { 6085 if (megasas_adp_reset_wait_for_ready 6086 (instance, true, 0) == FAILED) 6087 goto fail_ready_state; 6088 } else { 6089 goto fail_ready_state; 6090 } 6091 } else { 6092 atomic_set(&instance->fw_reset_no_pci_access, 1); 6093 instance->instancet->adp_reset 6094 (instance, instance->reg_set); 6095 atomic_set(&instance->fw_reset_no_pci_access, 0); 6096 6097 /*waiting for about 30 second before retry*/ 6098 ssleep(30); 6099 6100 if (megasas_transition_to_ready(instance, 0)) 6101 goto fail_ready_state; 6102 } 6103 6104 dev_info(&instance->pdev->dev, 6105 "FW restarted successfully from %s!\n", 6106 __func__); 6107 } 6108 6109 megasas_init_ctrl_params(instance); 6110 6111 if (megasas_set_dma_mask(instance)) 6112 goto fail_ready_state; 6113 6114 if (megasas_alloc_ctrl_mem(instance)) 6115 goto fail_alloc_dma_buf; 6116 6117 if (megasas_alloc_ctrl_dma_buffers(instance)) 6118 goto fail_alloc_dma_buf; 6119 6120 fusion = instance->ctrl_context; 6121 6122 if (instance->adapter_type >= VENTURA_SERIES) { 6123 scratch_pad_2 = 6124 megasas_readl(instance, 6125 &instance->reg_set->outbound_scratch_pad_2); 6126 instance->max_raid_mapsize = ((scratch_pad_2 >> 6127 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 6128 MR_MAX_RAID_MAP_SIZE_MASK); 6129 } 6130 6131 instance->enable_sdev_max_qd = enable_sdev_max_qd; 6132 6133 switch (instance->adapter_type) { 6134 case VENTURA_SERIES: 6135 fusion->pcie_bw_limitation = true; 6136 break; 6137 case AERO_SERIES: 6138 fusion->r56_div_offload = true; 6139 break; 6140 default: 6141 break; 6142 } 6143 6144 /* Check if MSI-X is supported while in ready state */ 6145 msix_enable = (instance->instancet->read_fw_status_reg(instance) & 6146 0x4000000) >> 0x1a; 6147 if (msix_enable && !msix_disable) { 6148 6149 scratch_pad_1 = megasas_readl 6150 (instance, &instance->reg_set->outbound_scratch_pad_1); 6151 /* Check max MSI-X vectors */ 6152 if (fusion) { 6153 if (instance->adapter_type == THUNDERBOLT_SERIES) { 6154 /* Thunderbolt Series*/ 6155 instance->msix_vectors = (scratch_pad_1 6156 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 6157 } else { 6158 instance->msix_vectors = ((scratch_pad_1 6159 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 6160 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 6161 6162 /* 6163 * For Invader series, > 8 MSI-x vectors 6164 * supported by FW/HW implies combined 6165 * reply queue mode is enabled. 6166 * For Ventura series, > 16 MSI-x vectors 6167 * supported by FW/HW implies combined 6168 * reply queue mode is enabled. 6169 */ 6170 switch (instance->adapter_type) { 6171 case INVADER_SERIES: 6172 if (instance->msix_vectors > 8) 6173 instance->msix_combined = true; 6174 break; 6175 case AERO_SERIES: 6176 case VENTURA_SERIES: 6177 if (instance->msix_vectors > 16) 6178 instance->msix_combined = true; 6179 break; 6180 } 6181 6182 if (rdpq_enable) 6183 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 6184 1 : 0; 6185 6186 if (instance->adapter_type >= INVADER_SERIES && 6187 !instance->msix_combined) { 6188 instance->msix_load_balance = true; 6189 instance->smp_affinity_enable = false; 6190 } 6191 6192 /* Save 1-15 reply post index address to local memory 6193 * Index 0 is already saved from reg offset 6194 * MPI2_REPLY_POST_HOST_INDEX_OFFSET 6195 */ 6196 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { 6197 instance->reply_post_host_index_addr[loop] = 6198 (u32 __iomem *) 6199 ((u8 __iomem *)instance->reg_set + 6200 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET 6201 + (loop * 0x10)); 6202 } 6203 } 6204 6205 dev_info(&instance->pdev->dev, 6206 "firmware supports msix\t: (%d)", 6207 instance->msix_vectors); 6208 if (msix_vectors) 6209 instance->msix_vectors = min(msix_vectors, 6210 instance->msix_vectors); 6211 } else /* MFI adapters */ 6212 instance->msix_vectors = 1; 6213 6214 6215 /* 6216 * For Aero (if some conditions are met), driver will configure a 6217 * few additional reply queues with interrupt coalescing enabled. 6218 * These queues with interrupt coalescing enabled are called 6219 * High IOPS queues and rest of reply queues (based on number of 6220 * logical CPUs) are termed as Low latency queues. 6221 * 6222 * Total Number of reply queues = High IOPS queues + low latency queues 6223 * 6224 * For rest of fusion adapters, 1 additional reply queue will be 6225 * reserved for management commands, rest of reply queues 6226 * (based on number of logical CPUs) will be used for IOs and 6227 * referenced as IO queues. 6228 * Total Number of reply queues = 1 + IO queues 6229 * 6230 * MFI adapters supports single MSI-x so single reply queue 6231 * will be used for IO and management commands. 6232 */ 6233 6234 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ? 6235 true : false; 6236 if (intr_coalescing && 6237 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) && 6238 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES)) 6239 instance->perf_mode = MR_BALANCED_PERF_MODE; 6240 else 6241 instance->perf_mode = MR_LATENCY_PERF_MODE; 6242 6243 6244 if (instance->adapter_type == AERO_SERIES) { 6245 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta); 6246 speed = lnksta & PCI_EXP_LNKSTA_CLS; 6247 6248 /* 6249 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate 6250 * in latency perf mode and enable R1 PCI bandwidth algorithm 6251 */ 6252 if (speed < 0x4) { 6253 instance->perf_mode = MR_LATENCY_PERF_MODE; 6254 fusion->pcie_bw_limitation = true; 6255 } 6256 6257 /* 6258 * Performance mode settings provided through module parameter-perf_mode will 6259 * take affect only for: 6260 * 1. Aero family of adapters. 6261 * 2. When user sets module parameter- perf_mode in range of 0-2. 6262 */ 6263 if ((perf_mode >= MR_BALANCED_PERF_MODE) && 6264 (perf_mode <= MR_LATENCY_PERF_MODE)) 6265 instance->perf_mode = perf_mode; 6266 /* 6267 * If intr coalescing is not supported by controller FW, then IOPS 6268 * and Balanced modes are not feasible. 6269 */ 6270 if (!intr_coalescing) 6271 instance->perf_mode = MR_LATENCY_PERF_MODE; 6272 6273 } 6274 6275 if (instance->perf_mode == MR_BALANCED_PERF_MODE) 6276 instance->low_latency_index_start = 6277 MR_HIGH_IOPS_QUEUE_COUNT; 6278 else 6279 instance->low_latency_index_start = 1; 6280 6281 num_msix_req = num_online_cpus() + instance->low_latency_index_start; 6282 6283 instance->msix_vectors = min(num_msix_req, 6284 instance->msix_vectors); 6285 6286 megasas_alloc_irq_vectors(instance); 6287 if (!instance->msix_vectors) 6288 instance->msix_load_balance = false; 6289 } 6290 /* 6291 * MSI-X host index 0 is common for all adapter. 6292 * It is used for all MPT based Adapters. 6293 */ 6294 if (instance->msix_combined) { 6295 instance->reply_post_host_index_addr[0] = 6296 (u32 *)((u8 *)instance->reg_set + 6297 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); 6298 } else { 6299 instance->reply_post_host_index_addr[0] = 6300 (u32 *)((u8 *)instance->reg_set + 6301 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 6302 } 6303 6304 if (!instance->msix_vectors) { 6305 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_INTX); 6306 if (i < 0) 6307 goto fail_init_adapter; 6308 } 6309 6310 megasas_setup_reply_map(instance); 6311 6312 dev_info(&instance->pdev->dev, 6313 "current msix/online cpus\t: (%d/%d)\n", 6314 instance->msix_vectors, (unsigned int)num_online_cpus()); 6315 dev_info(&instance->pdev->dev, 6316 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled"); 6317 6318 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 6319 (unsigned long)instance); 6320 6321 /* 6322 * Below are default value for legacy Firmware. 6323 * non-fusion based controllers 6324 */ 6325 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 6326 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 6327 /* Get operational params, sge flags, send init cmd to controller */ 6328 if (instance->instancet->init_adapter(instance)) 6329 goto fail_init_adapter; 6330 6331 if (instance->adapter_type >= VENTURA_SERIES) { 6332 scratch_pad_3 = 6333 megasas_readl(instance, 6334 &instance->reg_set->outbound_scratch_pad_3); 6335 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >= 6336 MR_DEFAULT_NVME_PAGE_SHIFT) 6337 instance->nvme_page_size = 6338 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK)); 6339 6340 dev_info(&instance->pdev->dev, 6341 "NVME page size\t: (%d)\n", instance->nvme_page_size); 6342 } 6343 6344 if (instance->msix_vectors ? 6345 megasas_setup_irqs_msix(instance, 1) : 6346 megasas_setup_irqs_ioapic(instance)) 6347 goto fail_init_adapter; 6348 6349 if (instance->adapter_type != MFI_SERIES) 6350 megasas_setup_irq_poll(instance); 6351 6352 instance->instancet->enable_intr(instance); 6353 6354 dev_info(&instance->pdev->dev, "INIT adapter done\n"); 6355 6356 megasas_setup_jbod_map(instance); 6357 6358 if (megasas_get_device_list(instance) != SUCCESS) { 6359 dev_err(&instance->pdev->dev, 6360 "%s: megasas_get_device_list failed\n", 6361 __func__); 6362 goto fail_get_ld_pd_list; 6363 } 6364 6365 /* stream detection initialization */ 6366 if (instance->adapter_type >= VENTURA_SERIES) { 6367 fusion->stream_detect_by_ld = 6368 kcalloc(MAX_LOGICAL_DRIVES_EXT, 6369 sizeof(struct LD_STREAM_DETECT *), 6370 GFP_KERNEL); 6371 if (!fusion->stream_detect_by_ld) { 6372 dev_err(&instance->pdev->dev, 6373 "unable to allocate stream detection for pool of LDs\n"); 6374 goto fail_get_ld_pd_list; 6375 } 6376 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 6377 fusion->stream_detect_by_ld[i] = 6378 kzalloc(sizeof(struct LD_STREAM_DETECT), 6379 GFP_KERNEL); 6380 if (!fusion->stream_detect_by_ld[i]) { 6381 dev_err(&instance->pdev->dev, 6382 "unable to allocate stream detect by LD\n"); 6383 for (j = 0; j < i; ++j) 6384 kfree(fusion->stream_detect_by_ld[j]); 6385 kfree(fusion->stream_detect_by_ld); 6386 fusion->stream_detect_by_ld = NULL; 6387 goto fail_get_ld_pd_list; 6388 } 6389 fusion->stream_detect_by_ld[i]->mru_bit_map 6390 = MR_STREAM_BITMAP; 6391 } 6392 } 6393 6394 /* 6395 * Compute the max allowed sectors per IO: The controller info has two 6396 * limits on max sectors. Driver should use the minimum of these two. 6397 * 6398 * 1 << stripe_sz_ops.min = max sectors per strip 6399 * 6400 * Note that older firmwares ( < FW ver 30) didn't report information 6401 * to calculate max_sectors_1. So the number ended up as zero always. 6402 */ 6403 tmp_sectors = 0; 6404 ctrl_info = instance->ctrl_info_buf; 6405 6406 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 6407 le16_to_cpu(ctrl_info->max_strips_per_io); 6408 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 6409 6410 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2); 6411 6412 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; 6413 instance->passive = ctrl_info->cluster.passive; 6414 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId)); 6415 instance->UnevenSpanSupport = 6416 ctrl_info->adapterOperations2.supportUnevenSpans; 6417 if (instance->UnevenSpanSupport) { 6418 struct fusion_context *fusion = instance->ctrl_context; 6419 if (MR_ValidateMapInfo(instance, instance->map_id)) 6420 fusion->fast_path_io = 1; 6421 else 6422 fusion->fast_path_io = 0; 6423 6424 } 6425 if (ctrl_info->host_interface.SRIOV) { 6426 instance->requestorId = ctrl_info->iov.requestorId; 6427 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) { 6428 if (!ctrl_info->adapterOperations2.activePassive) 6429 instance->PlasmaFW111 = 1; 6430 6431 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n", 6432 instance->PlasmaFW111 ? "1.11" : "new"); 6433 6434 if (instance->PlasmaFW111) { 6435 iovPtr = (struct IOV_111 *) 6436 ((unsigned char *)ctrl_info + IOV_111_OFFSET); 6437 instance->requestorId = iovPtr->requestorId; 6438 } 6439 } 6440 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n", 6441 instance->requestorId); 6442 } 6443 6444 instance->crash_dump_fw_support = 6445 ctrl_info->adapterOperations3.supportCrashDump; 6446 instance->crash_dump_drv_support = 6447 (instance->crash_dump_fw_support && 6448 instance->crash_dump_buf); 6449 if (instance->crash_dump_drv_support) 6450 megasas_set_crash_dump_params(instance, 6451 MR_CRASH_BUF_TURN_OFF); 6452 6453 else { 6454 if (instance->crash_dump_buf) 6455 dma_free_coherent(&instance->pdev->dev, 6456 CRASH_DMA_BUF_SIZE, 6457 instance->crash_dump_buf, 6458 instance->crash_dump_h); 6459 instance->crash_dump_buf = NULL; 6460 } 6461 6462 if (instance->snapdump_wait_time) { 6463 megasas_get_snapdump_properties(instance); 6464 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n", 6465 instance->snapdump_wait_time); 6466 } 6467 6468 dev_info(&instance->pdev->dev, 6469 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 6470 le16_to_cpu(ctrl_info->pci.vendor_id), 6471 le16_to_cpu(ctrl_info->pci.device_id), 6472 le16_to_cpu(ctrl_info->pci.sub_vendor_id), 6473 le16_to_cpu(ctrl_info->pci.sub_device_id)); 6474 dev_info(&instance->pdev->dev, "unevenspan support : %s\n", 6475 instance->UnevenSpanSupport ? "yes" : "no"); 6476 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", 6477 instance->crash_dump_drv_support ? "yes" : "no"); 6478 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n", 6479 instance->use_seqnum_jbod_fp ? "enabled" : "disabled"); 6480 6481 instance->max_sectors_per_req = instance->max_num_sge * 6482 SGE_BUFFER_SIZE / 512; 6483 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 6484 instance->max_sectors_per_req = tmp_sectors; 6485 6486 /* Check for valid throttlequeuedepth module parameter */ 6487 if (throttlequeuedepth && 6488 throttlequeuedepth <= instance->max_scsi_cmds) 6489 instance->throttlequeuedepth = throttlequeuedepth; 6490 else 6491 instance->throttlequeuedepth = 6492 MEGASAS_THROTTLE_QUEUE_DEPTH; 6493 6494 if ((resetwaittime < 1) || 6495 (resetwaittime > MEGASAS_RESET_WAIT_TIME)) 6496 resetwaittime = MEGASAS_RESET_WAIT_TIME; 6497 6498 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT)) 6499 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT; 6500 6501 /* Launch SR-IOV heartbeat timer */ 6502 if (instance->requestorId) { 6503 if (!megasas_sriov_start_heartbeat(instance, 1)) { 6504 megasas_start_timer(instance); 6505 } else { 6506 instance->skip_heartbeat_timer_del = 1; 6507 goto fail_get_ld_pd_list; 6508 } 6509 } 6510 6511 /* 6512 * Create and start watchdog thread which will monitor 6513 * controller state every 1 sec and trigger OCR when 6514 * it enters fault state 6515 */ 6516 if (instance->adapter_type != MFI_SERIES) 6517 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 6518 goto fail_start_watchdog; 6519 6520 return 0; 6521 6522 fail_start_watchdog: 6523 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6524 del_timer_sync(&instance->sriov_heartbeat_timer); 6525 fail_get_ld_pd_list: 6526 instance->instancet->disable_intr(instance); 6527 megasas_destroy_irqs(instance); 6528 fail_init_adapter: 6529 if (instance->msix_vectors) 6530 pci_free_irq_vectors(instance->pdev); 6531 instance->msix_vectors = 0; 6532 fail_alloc_dma_buf: 6533 megasas_free_ctrl_dma_buffers(instance); 6534 megasas_free_ctrl_mem(instance); 6535 fail_ready_state: 6536 iounmap(instance->reg_set); 6537 6538 fail_ioremap: 6539 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6540 6541 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6542 __func__, __LINE__); 6543 return -EINVAL; 6544 } 6545 6546 /** 6547 * megasas_release_mfi - Reverses the FW initialization 6548 * @instance: Adapter soft state 6549 */ 6550 static void megasas_release_mfi(struct megasas_instance *instance) 6551 { 6552 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 6553 6554 if (instance->reply_queue) 6555 dma_free_coherent(&instance->pdev->dev, reply_q_sz, 6556 instance->reply_queue, instance->reply_queue_h); 6557 6558 megasas_free_cmds(instance); 6559 6560 iounmap(instance->reg_set); 6561 6562 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 6563 } 6564 6565 /** 6566 * megasas_get_seq_num - Gets latest event sequence numbers 6567 * @instance: Adapter soft state 6568 * @eli: FW event log sequence numbers information 6569 * 6570 * FW maintains a log of all events in a non-volatile area. Upper layers would 6571 * usually find out the latest sequence number of the events, the seq number at 6572 * the boot etc. They would "read" all the events below the latest seq number 6573 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq 6574 * number), they would subsribe to AEN (asynchronous event notification) and 6575 * wait for the events to happen. 6576 */ 6577 static int 6578 megasas_get_seq_num(struct megasas_instance *instance, 6579 struct megasas_evt_log_info *eli) 6580 { 6581 struct megasas_cmd *cmd; 6582 struct megasas_dcmd_frame *dcmd; 6583 struct megasas_evt_log_info *el_info; 6584 dma_addr_t el_info_h = 0; 6585 int ret; 6586 6587 cmd = megasas_get_cmd(instance); 6588 6589 if (!cmd) { 6590 return -ENOMEM; 6591 } 6592 6593 dcmd = &cmd->frame->dcmd; 6594 el_info = dma_alloc_coherent(&instance->pdev->dev, 6595 sizeof(struct megasas_evt_log_info), 6596 &el_info_h, GFP_KERNEL); 6597 if (!el_info) { 6598 megasas_return_cmd(instance, cmd); 6599 return -ENOMEM; 6600 } 6601 6602 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6603 6604 dcmd->cmd = MFI_CMD_DCMD; 6605 dcmd->cmd_status = 0x0; 6606 dcmd->sge_count = 1; 6607 dcmd->flags = MFI_FRAME_DIR_READ; 6608 dcmd->timeout = 0; 6609 dcmd->pad_0 = 0; 6610 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 6611 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 6612 6613 megasas_set_dma_settings(instance, dcmd, el_info_h, 6614 sizeof(struct megasas_evt_log_info)); 6615 6616 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 6617 if (ret != DCMD_SUCCESS) { 6618 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 6619 __func__, __LINE__); 6620 goto dcmd_failed; 6621 } 6622 6623 /* 6624 * Copy the data back into callers buffer 6625 */ 6626 eli->newest_seq_num = el_info->newest_seq_num; 6627 eli->oldest_seq_num = el_info->oldest_seq_num; 6628 eli->clear_seq_num = el_info->clear_seq_num; 6629 eli->shutdown_seq_num = el_info->shutdown_seq_num; 6630 eli->boot_seq_num = el_info->boot_seq_num; 6631 6632 dcmd_failed: 6633 dma_free_coherent(&instance->pdev->dev, 6634 sizeof(struct megasas_evt_log_info), 6635 el_info, el_info_h); 6636 6637 megasas_return_cmd(instance, cmd); 6638 6639 return ret; 6640 } 6641 6642 /** 6643 * megasas_register_aen - Registers for asynchronous event notification 6644 * @instance: Adapter soft state 6645 * @seq_num: The starting sequence number 6646 * @class_locale_word: Class of the event 6647 * 6648 * This function subscribes for AEN for events beyond the @seq_num. It requests 6649 * to be notified if and only if the event is of type @class_locale 6650 */ 6651 static int 6652 megasas_register_aen(struct megasas_instance *instance, u32 seq_num, 6653 u32 class_locale_word) 6654 { 6655 int ret_val; 6656 struct megasas_cmd *cmd; 6657 struct megasas_dcmd_frame *dcmd; 6658 union megasas_evt_class_locale curr_aen; 6659 union megasas_evt_class_locale prev_aen; 6660 6661 /* 6662 * If there an AEN pending already (aen_cmd), check if the 6663 * class_locale of that pending AEN is inclusive of the new 6664 * AEN request we currently have. If it is, then we don't have 6665 * to do anything. In other words, whichever events the current 6666 * AEN request is subscribing to, have already been subscribed 6667 * to. 6668 * 6669 * If the old_cmd is _not_ inclusive, then we have to abort 6670 * that command, form a class_locale that is superset of both 6671 * old and current and re-issue to the FW 6672 */ 6673 6674 curr_aen.word = class_locale_word; 6675 6676 if (instance->aen_cmd) { 6677 6678 prev_aen.word = 6679 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); 6680 6681 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) || 6682 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) { 6683 dev_info(&instance->pdev->dev, 6684 "%s %d out of range class %d send by application\n", 6685 __func__, __LINE__, curr_aen.members.class); 6686 return 0; 6687 } 6688 6689 /* 6690 * A class whose enum value is smaller is inclusive of all 6691 * higher values. If a PROGRESS (= -1) was previously 6692 * registered, then a new registration requests for higher 6693 * classes need not be sent to FW. They are automatically 6694 * included. 6695 * 6696 * Locale numbers don't have such hierarchy. They are bitmap 6697 * values 6698 */ 6699 if ((prev_aen.members.class <= curr_aen.members.class) && 6700 !((prev_aen.members.locale & curr_aen.members.locale) ^ 6701 curr_aen.members.locale)) { 6702 /* 6703 * Previously issued event registration includes 6704 * current request. Nothing to do. 6705 */ 6706 return 0; 6707 } else { 6708 curr_aen.members.locale |= prev_aen.members.locale; 6709 6710 if (prev_aen.members.class < curr_aen.members.class) 6711 curr_aen.members.class = prev_aen.members.class; 6712 6713 instance->aen_cmd->abort_aen = 1; 6714 ret_val = megasas_issue_blocked_abort_cmd(instance, 6715 instance-> 6716 aen_cmd, 30); 6717 6718 if (ret_val) { 6719 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort " 6720 "previous AEN command\n"); 6721 return ret_val; 6722 } 6723 } 6724 } 6725 6726 cmd = megasas_get_cmd(instance); 6727 6728 if (!cmd) 6729 return -ENOMEM; 6730 6731 dcmd = &cmd->frame->dcmd; 6732 6733 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); 6734 6735 /* 6736 * Prepare DCMD for aen registration 6737 */ 6738 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6739 6740 dcmd->cmd = MFI_CMD_DCMD; 6741 dcmd->cmd_status = 0x0; 6742 dcmd->sge_count = 1; 6743 dcmd->flags = MFI_FRAME_DIR_READ; 6744 dcmd->timeout = 0; 6745 dcmd->pad_0 = 0; 6746 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 6747 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 6748 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 6749 instance->last_seq_num = seq_num; 6750 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 6751 6752 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h, 6753 sizeof(struct megasas_evt_detail)); 6754 6755 if (instance->aen_cmd != NULL) { 6756 megasas_return_cmd(instance, cmd); 6757 return 0; 6758 } 6759 6760 /* 6761 * Store reference to the cmd used to register for AEN. When an 6762 * application wants us to register for AEN, we have to abort this 6763 * cmd and re-register with a new EVENT LOCALE supplied by that app 6764 */ 6765 instance->aen_cmd = cmd; 6766 6767 /* 6768 * Issue the aen registration frame 6769 */ 6770 instance->instancet->issue_dcmd(instance, cmd); 6771 6772 return 0; 6773 } 6774 6775 /* megasas_get_target_prop - Send DCMD with below details to firmware. 6776 * 6777 * This DCMD will fetch few properties of LD/system PD defined 6778 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. 6779 * 6780 * DCMD send by drivers whenever new target is added to the OS. 6781 * 6782 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP 6783 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. 6784 * 0 = system PD, 1 = LD. 6785 * dcmd.mbox.s[1] - TargetID for LD/system PD. 6786 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. 6787 * 6788 * @instance: Adapter soft state 6789 * @sdev: OS provided scsi device 6790 * 6791 * Returns 0 on success non-zero on failure. 6792 */ 6793 int 6794 megasas_get_target_prop(struct megasas_instance *instance, 6795 struct scsi_device *sdev) 6796 { 6797 int ret; 6798 struct megasas_cmd *cmd; 6799 struct megasas_dcmd_frame *dcmd; 6800 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 6801 sdev->id; 6802 6803 cmd = megasas_get_cmd(instance); 6804 6805 if (!cmd) { 6806 dev_err(&instance->pdev->dev, 6807 "Failed to get cmd %s\n", __func__); 6808 return -ENOMEM; 6809 } 6810 6811 dcmd = &cmd->frame->dcmd; 6812 6813 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); 6814 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 6815 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); 6816 6817 dcmd->mbox.s[1] = cpu_to_le16(targetId); 6818 dcmd->cmd = MFI_CMD_DCMD; 6819 dcmd->cmd_status = 0xFF; 6820 dcmd->sge_count = 1; 6821 dcmd->flags = MFI_FRAME_DIR_READ; 6822 dcmd->timeout = 0; 6823 dcmd->pad_0 = 0; 6824 dcmd->data_xfer_len = 6825 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 6826 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 6827 6828 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h, 6829 sizeof(struct MR_TARGET_PROPERTIES)); 6830 6831 if ((instance->adapter_type != MFI_SERIES) && 6832 !instance->mask_interrupts) 6833 ret = megasas_issue_blocked_cmd(instance, 6834 cmd, MFI_IO_TIMEOUT_SECS); 6835 else 6836 ret = megasas_issue_polled(instance, cmd); 6837 6838 switch (ret) { 6839 case DCMD_TIMEOUT: 6840 switch (dcmd_timeout_ocr_possible(instance)) { 6841 case INITIATE_OCR: 6842 cmd->flags |= DRV_DCMD_SKIP_REFIRE; 6843 mutex_unlock(&instance->reset_mutex); 6844 megasas_reset_fusion(instance->host, 6845 MFI_IO_TIMEOUT_OCR); 6846 mutex_lock(&instance->reset_mutex); 6847 break; 6848 case KILL_ADAPTER: 6849 megaraid_sas_kill_hba(instance); 6850 break; 6851 case IGNORE_TIMEOUT: 6852 dev_info(&instance->pdev->dev, 6853 "Ignore DCMD timeout: %s %d\n", 6854 __func__, __LINE__); 6855 break; 6856 } 6857 break; 6858 6859 default: 6860 megasas_return_cmd(instance, cmd); 6861 } 6862 if (ret != DCMD_SUCCESS) 6863 dev_err(&instance->pdev->dev, 6864 "return from %s %d return value %d\n", 6865 __func__, __LINE__, ret); 6866 6867 return ret; 6868 } 6869 6870 /** 6871 * megasas_start_aen - Subscribes to AEN during driver load time 6872 * @instance: Adapter soft state 6873 */ 6874 static int megasas_start_aen(struct megasas_instance *instance) 6875 { 6876 struct megasas_evt_log_info eli; 6877 union megasas_evt_class_locale class_locale; 6878 6879 /* 6880 * Get the latest sequence number from FW 6881 */ 6882 memset(&eli, 0, sizeof(eli)); 6883 6884 if (megasas_get_seq_num(instance, &eli)) 6885 return -1; 6886 6887 /* 6888 * Register AEN with FW for latest sequence number plus 1 6889 */ 6890 class_locale.members.reserved = 0; 6891 class_locale.members.locale = MR_EVT_LOCALE_ALL; 6892 class_locale.members.class = MR_EVT_CLASS_DEBUG; 6893 6894 return megasas_register_aen(instance, 6895 le32_to_cpu(eli.newest_seq_num) + 1, 6896 class_locale.word); 6897 } 6898 6899 /** 6900 * megasas_io_attach - Attaches this driver to SCSI mid-layer 6901 * @instance: Adapter soft state 6902 */ 6903 static int megasas_io_attach(struct megasas_instance *instance) 6904 { 6905 struct Scsi_Host *host = instance->host; 6906 6907 /* 6908 * Export parameters required by SCSI mid-layer 6909 */ 6910 host->unique_id = instance->unique_id; 6911 host->can_queue = instance->max_scsi_cmds; 6912 host->this_id = instance->init_id; 6913 host->sg_tablesize = instance->max_num_sge; 6914 6915 if (instance->fw_support_ieee) 6916 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; 6917 6918 /* 6919 * Check if the module parameter value for max_sectors can be used 6920 */ 6921 if (max_sectors && max_sectors < instance->max_sectors_per_req) 6922 instance->max_sectors_per_req = max_sectors; 6923 else { 6924 if (max_sectors) { 6925 if (((instance->pdev->device == 6926 PCI_DEVICE_ID_LSI_SAS1078GEN2) || 6927 (instance->pdev->device == 6928 PCI_DEVICE_ID_LSI_SAS0079GEN2)) && 6929 (max_sectors <= MEGASAS_MAX_SECTORS)) { 6930 instance->max_sectors_per_req = max_sectors; 6931 } else { 6932 dev_info(&instance->pdev->dev, "max_sectors should be > 0" 6933 "and <= %d (or < 1MB for GEN2 controller)\n", 6934 instance->max_sectors_per_req); 6935 } 6936 } 6937 } 6938 6939 host->max_sectors = instance->max_sectors_per_req; 6940 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; 6941 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 6942 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 6943 host->max_lun = MEGASAS_MAX_LUN; 6944 host->max_cmd_len = 16; 6945 6946 /* Use shared host tagset only for fusion adaptors 6947 * if there are managed interrupts (smp affinity enabled case). 6948 * Single msix_vectors in kdump, so shared host tag is also disabled. 6949 */ 6950 6951 host->host_tagset = 0; 6952 host->nr_hw_queues = 1; 6953 6954 if ((instance->adapter_type != MFI_SERIES) && 6955 (instance->msix_vectors > instance->low_latency_index_start) && 6956 host_tagset_enable && 6957 instance->smp_affinity_enable) { 6958 host->host_tagset = 1; 6959 host->nr_hw_queues = instance->msix_vectors - 6960 instance->low_latency_index_start + instance->iopoll_q_count; 6961 if (instance->iopoll_q_count) 6962 host->nr_maps = 3; 6963 } else { 6964 instance->iopoll_q_count = 0; 6965 } 6966 6967 dev_info(&instance->pdev->dev, 6968 "Max firmware commands: %d shared with default " 6969 "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds, 6970 host->nr_hw_queues - instance->iopoll_q_count, 6971 instance->iopoll_q_count); 6972 /* 6973 * Notify the mid-layer about the new controller 6974 */ 6975 if (scsi_add_host(host, &instance->pdev->dev)) { 6976 dev_err(&instance->pdev->dev, 6977 "Failed to add host from %s %d\n", 6978 __func__, __LINE__); 6979 return -ENODEV; 6980 } 6981 6982 return 0; 6983 } 6984 6985 /** 6986 * megasas_set_dma_mask - Set DMA mask for supported controllers 6987 * 6988 * @instance: Adapter soft state 6989 * Description: 6990 * 6991 * For Ventura, driver/FW will operate in 63bit DMA addresses. 6992 * 6993 * For invader- 6994 * By default, driver/FW will operate in 32bit DMA addresses 6995 * for consistent DMA mapping but if 32 bit consistent 6996 * DMA mask fails, driver will try with 63 bit consistent 6997 * mask provided FW is true 63bit DMA capable 6998 * 6999 * For older controllers(Thunderbolt and MFI based adapters)- 7000 * driver/FW will operate in 32 bit consistent DMA addresses. 7001 */ 7002 static int 7003 megasas_set_dma_mask(struct megasas_instance *instance) 7004 { 7005 u64 consistent_mask; 7006 struct pci_dev *pdev; 7007 u32 scratch_pad_1; 7008 7009 pdev = instance->pdev; 7010 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ? 7011 DMA_BIT_MASK(63) : DMA_BIT_MASK(32); 7012 7013 if (IS_DMA64) { 7014 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) && 7015 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7016 goto fail_set_dma_mask; 7017 7018 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) && 7019 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 7020 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 7021 /* 7022 * If 32 bit DMA mask fails, then try for 64 bit mask 7023 * for FW capable of handling 64 bit DMA. 7024 */ 7025 scratch_pad_1 = megasas_readl 7026 (instance, &instance->reg_set->outbound_scratch_pad_1); 7027 7028 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 7029 goto fail_set_dma_mask; 7030 else if (dma_set_mask_and_coherent(&pdev->dev, 7031 DMA_BIT_MASK(63))) 7032 goto fail_set_dma_mask; 7033 } 7034 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 7035 goto fail_set_dma_mask; 7036 7037 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32)) 7038 instance->consistent_mask_64bit = false; 7039 else 7040 instance->consistent_mask_64bit = true; 7041 7042 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 7043 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"), 7044 (instance->consistent_mask_64bit ? "63" : "32")); 7045 7046 return 0; 7047 7048 fail_set_dma_mask: 7049 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 7050 return -1; 7051 7052 } 7053 7054 /* 7055 * megasas_set_adapter_type - Set adapter type. 7056 * Supported controllers can be divided in 7057 * different categories- 7058 * enum MR_ADAPTER_TYPE { 7059 * MFI_SERIES = 1, 7060 * THUNDERBOLT_SERIES = 2, 7061 * INVADER_SERIES = 3, 7062 * VENTURA_SERIES = 4, 7063 * AERO_SERIES = 5, 7064 * }; 7065 * @instance: Adapter soft state 7066 * return: void 7067 */ 7068 static inline void megasas_set_adapter_type(struct megasas_instance *instance) 7069 { 7070 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) && 7071 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) { 7072 instance->adapter_type = MFI_SERIES; 7073 } else { 7074 switch (instance->pdev->device) { 7075 case PCI_DEVICE_ID_LSI_AERO_10E1: 7076 case PCI_DEVICE_ID_LSI_AERO_10E2: 7077 case PCI_DEVICE_ID_LSI_AERO_10E5: 7078 case PCI_DEVICE_ID_LSI_AERO_10E6: 7079 instance->adapter_type = AERO_SERIES; 7080 break; 7081 case PCI_DEVICE_ID_LSI_VENTURA: 7082 case PCI_DEVICE_ID_LSI_CRUSADER: 7083 case PCI_DEVICE_ID_LSI_HARPOON: 7084 case PCI_DEVICE_ID_LSI_TOMCAT: 7085 case PCI_DEVICE_ID_LSI_VENTURA_4PORT: 7086 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: 7087 instance->adapter_type = VENTURA_SERIES; 7088 break; 7089 case PCI_DEVICE_ID_LSI_FUSION: 7090 case PCI_DEVICE_ID_LSI_PLASMA: 7091 instance->adapter_type = THUNDERBOLT_SERIES; 7092 break; 7093 case PCI_DEVICE_ID_LSI_INVADER: 7094 case PCI_DEVICE_ID_LSI_INTRUDER: 7095 case PCI_DEVICE_ID_LSI_INTRUDER_24: 7096 case PCI_DEVICE_ID_LSI_CUTLASS_52: 7097 case PCI_DEVICE_ID_LSI_CUTLASS_53: 7098 case PCI_DEVICE_ID_LSI_FURY: 7099 instance->adapter_type = INVADER_SERIES; 7100 break; 7101 default: /* For all other supported controllers */ 7102 instance->adapter_type = MFI_SERIES; 7103 break; 7104 } 7105 } 7106 } 7107 7108 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 7109 { 7110 instance->producer = dma_alloc_coherent(&instance->pdev->dev, 7111 sizeof(u32), &instance->producer_h, GFP_KERNEL); 7112 instance->consumer = dma_alloc_coherent(&instance->pdev->dev, 7113 sizeof(u32), &instance->consumer_h, GFP_KERNEL); 7114 7115 if (!instance->producer || !instance->consumer) { 7116 dev_err(&instance->pdev->dev, 7117 "Failed to allocate memory for producer, consumer\n"); 7118 return -1; 7119 } 7120 7121 *instance->producer = 0; 7122 *instance->consumer = 0; 7123 return 0; 7124 } 7125 7126 /** 7127 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data 7128 * structures which are not common across MFI 7129 * adapters and fusion adapters. 7130 * For MFI based adapters, allocate producer and 7131 * consumer buffers. For fusion adapters, allocate 7132 * memory for fusion context. 7133 * @instance: Adapter soft state 7134 * return: 0 for SUCCESS 7135 */ 7136 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) 7137 { 7138 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int), 7139 GFP_KERNEL); 7140 if (!instance->reply_map) 7141 return -ENOMEM; 7142 7143 switch (instance->adapter_type) { 7144 case MFI_SERIES: 7145 if (megasas_alloc_mfi_ctrl_mem(instance)) 7146 return -ENOMEM; 7147 break; 7148 case AERO_SERIES: 7149 case VENTURA_SERIES: 7150 case THUNDERBOLT_SERIES: 7151 case INVADER_SERIES: 7152 if (megasas_alloc_fusion_context(instance)) 7153 return -ENOMEM; 7154 break; 7155 } 7156 7157 return 0; 7158 } 7159 7160 /* 7161 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and 7162 * producer, consumer buffers for MFI adapters 7163 * 7164 * @instance - Adapter soft instance 7165 * 7166 */ 7167 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) 7168 { 7169 kfree(instance->reply_map); 7170 if (instance->adapter_type == MFI_SERIES) { 7171 if (instance->producer) 7172 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7173 instance->producer, 7174 instance->producer_h); 7175 if (instance->consumer) 7176 dma_free_coherent(&instance->pdev->dev, sizeof(u32), 7177 instance->consumer, 7178 instance->consumer_h); 7179 } else { 7180 megasas_free_fusion_context(instance); 7181 } 7182 } 7183 7184 /** 7185 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during 7186 * driver load time 7187 * 7188 * @instance: Adapter soft instance 7189 * 7190 * @return: O for SUCCESS 7191 */ 7192 static inline 7193 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) 7194 { 7195 struct pci_dev *pdev = instance->pdev; 7196 struct fusion_context *fusion = instance->ctrl_context; 7197 7198 instance->evt_detail = dma_alloc_coherent(&pdev->dev, 7199 sizeof(struct megasas_evt_detail), 7200 &instance->evt_detail_h, GFP_KERNEL); 7201 7202 if (!instance->evt_detail) { 7203 dev_err(&instance->pdev->dev, 7204 "Failed to allocate event detail buffer\n"); 7205 return -ENOMEM; 7206 } 7207 7208 if (fusion) { 7209 fusion->ioc_init_request = 7210 dma_alloc_coherent(&pdev->dev, 7211 sizeof(struct MPI2_IOC_INIT_REQUEST), 7212 &fusion->ioc_init_request_phys, 7213 GFP_KERNEL); 7214 7215 if (!fusion->ioc_init_request) { 7216 dev_err(&pdev->dev, 7217 "Failed to allocate ioc init request\n"); 7218 return -ENOMEM; 7219 } 7220 7221 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev, 7222 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7223 &instance->snapdump_prop_h, GFP_KERNEL); 7224 7225 if (!instance->snapdump_prop) 7226 dev_err(&pdev->dev, 7227 "Failed to allocate snapdump properties buffer\n"); 7228 7229 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, 7230 HOST_DEVICE_LIST_SZ, 7231 &instance->host_device_list_buf_h, 7232 GFP_KERNEL); 7233 7234 if (!instance->host_device_list_buf) { 7235 dev_err(&pdev->dev, 7236 "Failed to allocate targetid list buffer\n"); 7237 return -ENOMEM; 7238 } 7239 7240 } 7241 7242 instance->pd_list_buf = 7243 dma_alloc_coherent(&pdev->dev, 7244 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7245 &instance->pd_list_buf_h, GFP_KERNEL); 7246 7247 if (!instance->pd_list_buf) { 7248 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 7249 return -ENOMEM; 7250 } 7251 7252 instance->ctrl_info_buf = 7253 dma_alloc_coherent(&pdev->dev, 7254 sizeof(struct megasas_ctrl_info), 7255 &instance->ctrl_info_buf_h, GFP_KERNEL); 7256 7257 if (!instance->ctrl_info_buf) { 7258 dev_err(&pdev->dev, 7259 "Failed to allocate controller info buffer\n"); 7260 return -ENOMEM; 7261 } 7262 7263 instance->ld_list_buf = 7264 dma_alloc_coherent(&pdev->dev, 7265 sizeof(struct MR_LD_LIST), 7266 &instance->ld_list_buf_h, GFP_KERNEL); 7267 7268 if (!instance->ld_list_buf) { 7269 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 7270 return -ENOMEM; 7271 } 7272 7273 instance->ld_targetid_list_buf = 7274 dma_alloc_coherent(&pdev->dev, 7275 sizeof(struct MR_LD_TARGETID_LIST), 7276 &instance->ld_targetid_list_buf_h, GFP_KERNEL); 7277 7278 if (!instance->ld_targetid_list_buf) { 7279 dev_err(&pdev->dev, 7280 "Failed to allocate LD targetid list buffer\n"); 7281 return -ENOMEM; 7282 } 7283 7284 if (!reset_devices) { 7285 instance->system_info_buf = 7286 dma_alloc_coherent(&pdev->dev, 7287 sizeof(struct MR_DRV_SYSTEM_INFO), 7288 &instance->system_info_h, GFP_KERNEL); 7289 instance->pd_info = 7290 dma_alloc_coherent(&pdev->dev, 7291 sizeof(struct MR_PD_INFO), 7292 &instance->pd_info_h, GFP_KERNEL); 7293 instance->tgt_prop = 7294 dma_alloc_coherent(&pdev->dev, 7295 sizeof(struct MR_TARGET_PROPERTIES), 7296 &instance->tgt_prop_h, GFP_KERNEL); 7297 instance->crash_dump_buf = 7298 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7299 &instance->crash_dump_h, GFP_KERNEL); 7300 7301 if (!instance->system_info_buf) 7302 dev_err(&instance->pdev->dev, 7303 "Failed to allocate system info buffer\n"); 7304 7305 if (!instance->pd_info) 7306 dev_err(&instance->pdev->dev, 7307 "Failed to allocate pd_info buffer\n"); 7308 7309 if (!instance->tgt_prop) 7310 dev_err(&instance->pdev->dev, 7311 "Failed to allocate tgt_prop buffer\n"); 7312 7313 if (!instance->crash_dump_buf) 7314 dev_err(&instance->pdev->dev, 7315 "Failed to allocate crash dump buffer\n"); 7316 } 7317 7318 return 0; 7319 } 7320 7321 /* 7322 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated 7323 * during driver load time 7324 * 7325 * @instance- Adapter soft instance 7326 * 7327 */ 7328 static inline 7329 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) 7330 { 7331 struct pci_dev *pdev = instance->pdev; 7332 struct fusion_context *fusion = instance->ctrl_context; 7333 7334 if (instance->evt_detail) 7335 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail), 7336 instance->evt_detail, 7337 instance->evt_detail_h); 7338 7339 if (fusion && fusion->ioc_init_request) 7340 dma_free_coherent(&pdev->dev, 7341 sizeof(struct MPI2_IOC_INIT_REQUEST), 7342 fusion->ioc_init_request, 7343 fusion->ioc_init_request_phys); 7344 7345 if (instance->pd_list_buf) 7346 dma_free_coherent(&pdev->dev, 7347 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 7348 instance->pd_list_buf, 7349 instance->pd_list_buf_h); 7350 7351 if (instance->ld_list_buf) 7352 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST), 7353 instance->ld_list_buf, 7354 instance->ld_list_buf_h); 7355 7356 if (instance->ld_targetid_list_buf) 7357 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST), 7358 instance->ld_targetid_list_buf, 7359 instance->ld_targetid_list_buf_h); 7360 7361 if (instance->ctrl_info_buf) 7362 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info), 7363 instance->ctrl_info_buf, 7364 instance->ctrl_info_buf_h); 7365 7366 if (instance->system_info_buf) 7367 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO), 7368 instance->system_info_buf, 7369 instance->system_info_h); 7370 7371 if (instance->pd_info) 7372 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO), 7373 instance->pd_info, instance->pd_info_h); 7374 7375 if (instance->tgt_prop) 7376 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES), 7377 instance->tgt_prop, instance->tgt_prop_h); 7378 7379 if (instance->crash_dump_buf) 7380 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 7381 instance->crash_dump_buf, 7382 instance->crash_dump_h); 7383 7384 if (instance->snapdump_prop) 7385 dma_free_coherent(&pdev->dev, 7386 sizeof(struct MR_SNAPDUMP_PROPERTIES), 7387 instance->snapdump_prop, 7388 instance->snapdump_prop_h); 7389 7390 if (instance->host_device_list_buf) 7391 dma_free_coherent(&pdev->dev, 7392 HOST_DEVICE_LIST_SZ, 7393 instance->host_device_list_buf, 7394 instance->host_device_list_buf_h); 7395 7396 } 7397 7398 /* 7399 * megasas_init_ctrl_params - Initialize controller's instance 7400 * parameters before FW init 7401 * @instance - Adapter soft instance 7402 * @return - void 7403 */ 7404 static inline void megasas_init_ctrl_params(struct megasas_instance *instance) 7405 { 7406 instance->fw_crash_state = UNAVAILABLE; 7407 7408 megasas_poll_wait_aen = 0; 7409 instance->issuepend_done = 1; 7410 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 7411 7412 /* 7413 * Initialize locks and queues 7414 */ 7415 INIT_LIST_HEAD(&instance->cmd_pool); 7416 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 7417 7418 atomic_set(&instance->fw_outstanding, 0); 7419 atomic64_set(&instance->total_io_count, 0); 7420 7421 init_waitqueue_head(&instance->int_cmd_wait_q); 7422 init_waitqueue_head(&instance->abort_cmd_wait_q); 7423 7424 mutex_init(&instance->crashdump_lock); 7425 spin_lock_init(&instance->mfi_pool_lock); 7426 spin_lock_init(&instance->hba_lock); 7427 spin_lock_init(&instance->stream_lock); 7428 spin_lock_init(&instance->completion_lock); 7429 7430 mutex_init(&instance->reset_mutex); 7431 7432 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 7433 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 7434 instance->flag_ieee = 1; 7435 7436 instance->flag = 0; 7437 instance->unload = 1; 7438 instance->last_time = 0; 7439 instance->disableOnlineCtrlReset = 1; 7440 instance->UnevenSpanSupport = 0; 7441 instance->smp_affinity_enable = smp_affinity_enable ? true : false; 7442 instance->msix_load_balance = false; 7443 7444 if (instance->adapter_type != MFI_SERIES) 7445 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 7446 else 7447 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 7448 } 7449 7450 /** 7451 * megasas_probe_one - PCI hotplug entry point 7452 * @pdev: PCI device structure 7453 * @id: PCI ids of supported hotplugged adapter 7454 */ 7455 static int megasas_probe_one(struct pci_dev *pdev, 7456 const struct pci_device_id *id) 7457 { 7458 int rval, pos; 7459 struct Scsi_Host *host; 7460 struct megasas_instance *instance; 7461 u16 control = 0; 7462 7463 switch (pdev->device) { 7464 case PCI_DEVICE_ID_LSI_AERO_10E0: 7465 case PCI_DEVICE_ID_LSI_AERO_10E3: 7466 case PCI_DEVICE_ID_LSI_AERO_10E4: 7467 case PCI_DEVICE_ID_LSI_AERO_10E7: 7468 dev_err(&pdev->dev, "Adapter is in non secure mode\n"); 7469 return 1; 7470 case PCI_DEVICE_ID_LSI_AERO_10E1: 7471 case PCI_DEVICE_ID_LSI_AERO_10E5: 7472 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n"); 7473 break; 7474 } 7475 7476 /* Reset MSI-X in the kdump kernel */ 7477 if (reset_devices) { 7478 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 7479 if (pos) { 7480 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 7481 &control); 7482 if (control & PCI_MSIX_FLAGS_ENABLE) { 7483 dev_info(&pdev->dev, "resetting MSI-X\n"); 7484 pci_write_config_word(pdev, 7485 pos + PCI_MSIX_FLAGS, 7486 control & 7487 ~PCI_MSIX_FLAGS_ENABLE); 7488 } 7489 } 7490 } 7491 7492 /* 7493 * PCI prepping: enable device set bus mastering and dma mask 7494 */ 7495 rval = pci_enable_device_mem(pdev); 7496 7497 if (rval) { 7498 return rval; 7499 } 7500 7501 pci_set_master(pdev); 7502 7503 host = scsi_host_alloc(&megasas_template, 7504 sizeof(struct megasas_instance)); 7505 7506 if (!host) { 7507 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 7508 goto fail_alloc_instance; 7509 } 7510 7511 instance = (struct megasas_instance *)host->hostdata; 7512 memset(instance, 0, sizeof(*instance)); 7513 atomic_set(&instance->fw_reset_no_pci_access, 0); 7514 7515 /* 7516 * Initialize PCI related and misc parameters 7517 */ 7518 instance->pdev = pdev; 7519 instance->host = host; 7520 instance->unique_id = pci_dev_id(pdev); 7521 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 7522 7523 megasas_set_adapter_type(instance); 7524 7525 /* 7526 * Initialize MFI Firmware 7527 */ 7528 if (megasas_init_fw(instance)) 7529 goto fail_init_mfi; 7530 7531 if (instance->requestorId) { 7532 if (instance->PlasmaFW111) { 7533 instance->vf_affiliation_111 = 7534 dma_alloc_coherent(&pdev->dev, 7535 sizeof(struct MR_LD_VF_AFFILIATION_111), 7536 &instance->vf_affiliation_111_h, 7537 GFP_KERNEL); 7538 if (!instance->vf_affiliation_111) 7539 dev_warn(&pdev->dev, "Can't allocate " 7540 "memory for VF affiliation buffer\n"); 7541 } else { 7542 instance->vf_affiliation = 7543 dma_alloc_coherent(&pdev->dev, 7544 (MAX_LOGICAL_DRIVES + 1) * 7545 sizeof(struct MR_LD_VF_AFFILIATION), 7546 &instance->vf_affiliation_h, 7547 GFP_KERNEL); 7548 if (!instance->vf_affiliation) 7549 dev_warn(&pdev->dev, "Can't allocate " 7550 "memory for VF affiliation buffer\n"); 7551 } 7552 } 7553 7554 /* 7555 * Store instance in PCI softstate 7556 */ 7557 pci_set_drvdata(pdev, instance); 7558 7559 /* 7560 * Add this controller to megasas_mgmt_info structure so that it 7561 * can be exported to management applications 7562 */ 7563 megasas_mgmt_info.count++; 7564 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; 7565 megasas_mgmt_info.max_index++; 7566 7567 /* 7568 * Register with SCSI mid-layer 7569 */ 7570 if (megasas_io_attach(instance)) 7571 goto fail_io_attach; 7572 7573 instance->unload = 0; 7574 /* 7575 * Trigger SCSI to scan our drives 7576 */ 7577 if (!instance->enable_fw_dev_list || 7578 (instance->host_device_list_buf->count > 0)) 7579 scsi_scan_host(host); 7580 7581 /* 7582 * Initiate AEN (Asynchronous Event Notification) 7583 */ 7584 if (megasas_start_aen(instance)) { 7585 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n"); 7586 goto fail_start_aen; 7587 } 7588 7589 megasas_setup_debugfs(instance); 7590 7591 /* Get current SR-IOV LD/VF affiliation */ 7592 if (instance->requestorId) 7593 megasas_get_ld_vf_affiliation(instance, 1); 7594 7595 return 0; 7596 7597 fail_start_aen: 7598 instance->unload = 1; 7599 scsi_remove_host(instance->host); 7600 fail_io_attach: 7601 megasas_mgmt_info.count--; 7602 megasas_mgmt_info.max_index--; 7603 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7604 7605 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7606 del_timer_sync(&instance->sriov_heartbeat_timer); 7607 7608 instance->instancet->disable_intr(instance); 7609 megasas_destroy_irqs(instance); 7610 7611 if (instance->adapter_type != MFI_SERIES) 7612 megasas_release_fusion(instance); 7613 else 7614 megasas_release_mfi(instance); 7615 7616 if (instance->msix_vectors) 7617 pci_free_irq_vectors(instance->pdev); 7618 instance->msix_vectors = 0; 7619 7620 if (instance->fw_crash_state != UNAVAILABLE) 7621 megasas_free_host_crash_buffer(instance); 7622 7623 if (instance->adapter_type != MFI_SERIES) 7624 megasas_fusion_stop_watchdog(instance); 7625 fail_init_mfi: 7626 scsi_host_put(host); 7627 fail_alloc_instance: 7628 pci_disable_device(pdev); 7629 7630 return -ENODEV; 7631 } 7632 7633 /** 7634 * megasas_flush_cache - Requests FW to flush all its caches 7635 * @instance: Adapter soft state 7636 */ 7637 static void megasas_flush_cache(struct megasas_instance *instance) 7638 { 7639 struct megasas_cmd *cmd; 7640 struct megasas_dcmd_frame *dcmd; 7641 7642 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7643 return; 7644 7645 cmd = megasas_get_cmd(instance); 7646 7647 if (!cmd) 7648 return; 7649 7650 dcmd = &cmd->frame->dcmd; 7651 7652 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7653 7654 dcmd->cmd = MFI_CMD_DCMD; 7655 dcmd->cmd_status = 0x0; 7656 dcmd->sge_count = 0; 7657 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7658 dcmd->timeout = 0; 7659 dcmd->pad_0 = 0; 7660 dcmd->data_xfer_len = 0; 7661 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 7662 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 7663 7664 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7665 != DCMD_SUCCESS) { 7666 dev_err(&instance->pdev->dev, 7667 "return from %s %d\n", __func__, __LINE__); 7668 return; 7669 } 7670 7671 megasas_return_cmd(instance, cmd); 7672 } 7673 7674 /** 7675 * megasas_shutdown_controller - Instructs FW to shutdown the controller 7676 * @instance: Adapter soft state 7677 * @opcode: Shutdown/Hibernate 7678 */ 7679 static void megasas_shutdown_controller(struct megasas_instance *instance, 7680 u32 opcode) 7681 { 7682 struct megasas_cmd *cmd; 7683 struct megasas_dcmd_frame *dcmd; 7684 7685 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 7686 return; 7687 7688 cmd = megasas_get_cmd(instance); 7689 7690 if (!cmd) 7691 return; 7692 7693 if (instance->aen_cmd) 7694 megasas_issue_blocked_abort_cmd(instance, 7695 instance->aen_cmd, MFI_IO_TIMEOUT_SECS); 7696 if (instance->map_update_cmd) 7697 megasas_issue_blocked_abort_cmd(instance, 7698 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS); 7699 if (instance->jbod_seq_cmd) 7700 megasas_issue_blocked_abort_cmd(instance, 7701 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS); 7702 7703 dcmd = &cmd->frame->dcmd; 7704 7705 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 7706 7707 dcmd->cmd = MFI_CMD_DCMD; 7708 dcmd->cmd_status = 0x0; 7709 dcmd->sge_count = 0; 7710 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 7711 dcmd->timeout = 0; 7712 dcmd->pad_0 = 0; 7713 dcmd->data_xfer_len = 0; 7714 dcmd->opcode = cpu_to_le32(opcode); 7715 7716 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) 7717 != DCMD_SUCCESS) { 7718 dev_err(&instance->pdev->dev, 7719 "return from %s %d\n", __func__, __LINE__); 7720 return; 7721 } 7722 7723 megasas_return_cmd(instance, cmd); 7724 } 7725 7726 /** 7727 * megasas_suspend - driver suspend entry point 7728 * @dev: Device structure 7729 */ 7730 static int __maybe_unused 7731 megasas_suspend(struct device *dev) 7732 { 7733 struct megasas_instance *instance; 7734 7735 instance = dev_get_drvdata(dev); 7736 7737 if (!instance) 7738 return 0; 7739 7740 instance->unload = 1; 7741 7742 dev_info(dev, "%s is called\n", __func__); 7743 7744 /* Shutdown SR-IOV heartbeat timer */ 7745 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7746 del_timer_sync(&instance->sriov_heartbeat_timer); 7747 7748 /* Stop the FW fault detection watchdog */ 7749 if (instance->adapter_type != MFI_SERIES) 7750 megasas_fusion_stop_watchdog(instance); 7751 7752 megasas_flush_cache(instance); 7753 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 7754 7755 /* cancel the delayed work if this work still in queue */ 7756 if (instance->ev != NULL) { 7757 struct megasas_aen_event *ev = instance->ev; 7758 cancel_delayed_work_sync(&ev->hotplug_work); 7759 instance->ev = NULL; 7760 } 7761 7762 tasklet_kill(&instance->isr_tasklet); 7763 7764 pci_set_drvdata(instance->pdev, instance); 7765 instance->instancet->disable_intr(instance); 7766 7767 megasas_destroy_irqs(instance); 7768 7769 if (instance->msix_vectors) 7770 pci_free_irq_vectors(instance->pdev); 7771 7772 return 0; 7773 } 7774 7775 /** 7776 * megasas_resume- driver resume entry point 7777 * @dev: Device structure 7778 */ 7779 static int __maybe_unused 7780 megasas_resume(struct device *dev) 7781 { 7782 int rval; 7783 struct Scsi_Host *host; 7784 struct megasas_instance *instance; 7785 u32 status_reg; 7786 7787 instance = dev_get_drvdata(dev); 7788 7789 if (!instance) 7790 return 0; 7791 7792 host = instance->host; 7793 7794 dev_info(dev, "%s is called\n", __func__); 7795 7796 /* 7797 * We expect the FW state to be READY 7798 */ 7799 7800 if (megasas_transition_to_ready(instance, 0)) { 7801 dev_info(&instance->pdev->dev, 7802 "Failed to transition controller to ready from %s!\n", 7803 __func__); 7804 if (instance->adapter_type != MFI_SERIES) { 7805 status_reg = 7806 instance->instancet->read_fw_status_reg(instance); 7807 if (!(status_reg & MFI_RESET_ADAPTER) || 7808 ((megasas_adp_reset_wait_for_ready 7809 (instance, true, 0)) == FAILED)) 7810 goto fail_ready_state; 7811 } else { 7812 atomic_set(&instance->fw_reset_no_pci_access, 1); 7813 instance->instancet->adp_reset 7814 (instance, instance->reg_set); 7815 atomic_set(&instance->fw_reset_no_pci_access, 0); 7816 7817 /* waiting for about 30 seconds before retry */ 7818 ssleep(30); 7819 7820 if (megasas_transition_to_ready(instance, 0)) 7821 goto fail_ready_state; 7822 } 7823 7824 dev_info(&instance->pdev->dev, 7825 "FW restarted successfully from %s!\n", 7826 __func__); 7827 } 7828 if (megasas_set_dma_mask(instance)) 7829 goto fail_set_dma_mask; 7830 7831 /* 7832 * Initialize MFI Firmware 7833 */ 7834 7835 atomic_set(&instance->fw_outstanding, 0); 7836 atomic_set(&instance->ldio_outstanding, 0); 7837 7838 /* Now re-enable MSI-X */ 7839 if (instance->msix_vectors) 7840 megasas_alloc_irq_vectors(instance); 7841 7842 if (!instance->msix_vectors) { 7843 rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, 7844 PCI_IRQ_INTX); 7845 if (rval < 0) 7846 goto fail_reenable_msix; 7847 } 7848 7849 megasas_setup_reply_map(instance); 7850 7851 if (instance->adapter_type != MFI_SERIES) { 7852 megasas_reset_reply_desc(instance); 7853 if (megasas_ioc_init_fusion(instance)) { 7854 megasas_free_cmds(instance); 7855 megasas_free_cmds_fusion(instance); 7856 goto fail_init_mfi; 7857 } 7858 if (!megasas_get_map_info(instance)) 7859 megasas_sync_map_info(instance); 7860 } else { 7861 *instance->producer = 0; 7862 *instance->consumer = 0; 7863 if (megasas_issue_init_mfi(instance)) 7864 goto fail_init_mfi; 7865 } 7866 7867 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) 7868 goto fail_init_mfi; 7869 7870 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 7871 (unsigned long)instance); 7872 7873 if (instance->msix_vectors ? 7874 megasas_setup_irqs_msix(instance, 0) : 7875 megasas_setup_irqs_ioapic(instance)) 7876 goto fail_init_mfi; 7877 7878 if (instance->adapter_type != MFI_SERIES) 7879 megasas_setup_irq_poll(instance); 7880 7881 /* Re-launch SR-IOV heartbeat timer */ 7882 if (instance->requestorId) { 7883 if (!megasas_sriov_start_heartbeat(instance, 0)) 7884 megasas_start_timer(instance); 7885 else { 7886 instance->skip_heartbeat_timer_del = 1; 7887 goto fail_init_mfi; 7888 } 7889 } 7890 7891 instance->instancet->enable_intr(instance); 7892 megasas_setup_jbod_map(instance); 7893 instance->unload = 0; 7894 7895 /* 7896 * Initiate AEN (Asynchronous Event Notification) 7897 */ 7898 if (megasas_start_aen(instance)) 7899 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7900 7901 /* Re-launch FW fault watchdog */ 7902 if (instance->adapter_type != MFI_SERIES) 7903 if (megasas_fusion_start_watchdog(instance) != SUCCESS) 7904 goto fail_start_watchdog; 7905 7906 return 0; 7907 7908 fail_start_watchdog: 7909 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7910 del_timer_sync(&instance->sriov_heartbeat_timer); 7911 fail_init_mfi: 7912 megasas_free_ctrl_dma_buffers(instance); 7913 megasas_free_ctrl_mem(instance); 7914 scsi_host_put(host); 7915 7916 fail_reenable_msix: 7917 fail_set_dma_mask: 7918 fail_ready_state: 7919 7920 return -ENODEV; 7921 } 7922 7923 static inline int 7924 megasas_wait_for_adapter_operational(struct megasas_instance *instance) 7925 { 7926 int wait_time = MEGASAS_RESET_WAIT_TIME * 2; 7927 int i; 7928 u8 adp_state; 7929 7930 for (i = 0; i < wait_time; i++) { 7931 adp_state = atomic_read(&instance->adprecovery); 7932 if ((adp_state == MEGASAS_HBA_OPERATIONAL) || 7933 (adp_state == MEGASAS_HW_CRITICAL_ERROR)) 7934 break; 7935 7936 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) 7937 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n"); 7938 7939 msleep(1000); 7940 } 7941 7942 if (adp_state != MEGASAS_HBA_OPERATIONAL) { 7943 dev_info(&instance->pdev->dev, 7944 "%s HBA failed to become operational, adp_state %d\n", 7945 __func__, adp_state); 7946 return 1; 7947 } 7948 7949 return 0; 7950 } 7951 7952 /** 7953 * megasas_detach_one - PCI hot"un"plug entry point 7954 * @pdev: PCI device structure 7955 */ 7956 static void megasas_detach_one(struct pci_dev *pdev) 7957 { 7958 int i; 7959 struct Scsi_Host *host; 7960 struct megasas_instance *instance; 7961 struct fusion_context *fusion; 7962 size_t pd_seq_map_sz; 7963 7964 instance = pci_get_drvdata(pdev); 7965 7966 if (!instance) 7967 return; 7968 7969 host = instance->host; 7970 fusion = instance->ctrl_context; 7971 7972 /* Shutdown SR-IOV heartbeat timer */ 7973 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7974 del_timer_sync(&instance->sriov_heartbeat_timer); 7975 7976 /* Stop the FW fault detection watchdog */ 7977 if (instance->adapter_type != MFI_SERIES) 7978 megasas_fusion_stop_watchdog(instance); 7979 7980 if (instance->fw_crash_state != UNAVAILABLE) 7981 megasas_free_host_crash_buffer(instance); 7982 scsi_remove_host(instance->host); 7983 instance->unload = 1; 7984 7985 if (megasas_wait_for_adapter_operational(instance)) 7986 goto skip_firing_dcmds; 7987 7988 megasas_flush_cache(instance); 7989 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 7990 7991 skip_firing_dcmds: 7992 /* cancel the delayed work if this work still in queue*/ 7993 if (instance->ev != NULL) { 7994 struct megasas_aen_event *ev = instance->ev; 7995 cancel_delayed_work_sync(&ev->hotplug_work); 7996 instance->ev = NULL; 7997 } 7998 7999 /* cancel all wait events */ 8000 wake_up_all(&instance->int_cmd_wait_q); 8001 8002 tasklet_kill(&instance->isr_tasklet); 8003 8004 /* 8005 * Take the instance off the instance array. Note that we will not 8006 * decrement the max_index. We let this array be sparse array 8007 */ 8008 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8009 if (megasas_mgmt_info.instance[i] == instance) { 8010 megasas_mgmt_info.count--; 8011 megasas_mgmt_info.instance[i] = NULL; 8012 8013 break; 8014 } 8015 } 8016 8017 instance->instancet->disable_intr(instance); 8018 8019 megasas_destroy_irqs(instance); 8020 8021 if (instance->msix_vectors) 8022 pci_free_irq_vectors(instance->pdev); 8023 8024 if (instance->adapter_type >= VENTURA_SERIES) { 8025 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 8026 kfree(fusion->stream_detect_by_ld[i]); 8027 kfree(fusion->stream_detect_by_ld); 8028 fusion->stream_detect_by_ld = NULL; 8029 } 8030 8031 8032 if (instance->adapter_type != MFI_SERIES) { 8033 megasas_release_fusion(instance); 8034 pd_seq_map_sz = 8035 struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, 8036 seq, MAX_PHYSICAL_DEVICES); 8037 for (i = 0; i < 2 ; i++) { 8038 if (fusion->ld_map[i]) 8039 dma_free_coherent(&instance->pdev->dev, 8040 fusion->max_map_sz, 8041 fusion->ld_map[i], 8042 fusion->ld_map_phys[i]); 8043 if (fusion->ld_drv_map[i]) { 8044 if (is_vmalloc_addr(fusion->ld_drv_map[i])) 8045 vfree(fusion->ld_drv_map[i]); 8046 else 8047 free_pages((ulong)fusion->ld_drv_map[i], 8048 fusion->drv_map_pages); 8049 } 8050 8051 if (fusion->pd_seq_sync[i]) 8052 dma_free_coherent(&instance->pdev->dev, 8053 pd_seq_map_sz, 8054 fusion->pd_seq_sync[i], 8055 fusion->pd_seq_phys[i]); 8056 } 8057 } else { 8058 megasas_release_mfi(instance); 8059 } 8060 8061 if (instance->vf_affiliation) 8062 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) * 8063 sizeof(struct MR_LD_VF_AFFILIATION), 8064 instance->vf_affiliation, 8065 instance->vf_affiliation_h); 8066 8067 if (instance->vf_affiliation_111) 8068 dma_free_coherent(&pdev->dev, 8069 sizeof(struct MR_LD_VF_AFFILIATION_111), 8070 instance->vf_affiliation_111, 8071 instance->vf_affiliation_111_h); 8072 8073 if (instance->hb_host_mem) 8074 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM), 8075 instance->hb_host_mem, 8076 instance->hb_host_mem_h); 8077 8078 megasas_free_ctrl_dma_buffers(instance); 8079 8080 megasas_free_ctrl_mem(instance); 8081 8082 megasas_destroy_debugfs(instance); 8083 8084 scsi_host_put(host); 8085 8086 pci_disable_device(pdev); 8087 } 8088 8089 /** 8090 * megasas_shutdown - Shutdown entry point 8091 * @pdev: PCI device structure 8092 */ 8093 static void megasas_shutdown(struct pci_dev *pdev) 8094 { 8095 struct megasas_instance *instance = pci_get_drvdata(pdev); 8096 8097 if (!instance) 8098 return; 8099 8100 instance->unload = 1; 8101 8102 if (megasas_wait_for_adapter_operational(instance)) 8103 goto skip_firing_dcmds; 8104 8105 megasas_flush_cache(instance); 8106 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 8107 8108 skip_firing_dcmds: 8109 instance->instancet->disable_intr(instance); 8110 megasas_destroy_irqs(instance); 8111 8112 if (instance->msix_vectors) 8113 pci_free_irq_vectors(instance->pdev); 8114 } 8115 8116 /* 8117 * megasas_mgmt_open - char node "open" entry point 8118 * @inode: char node inode 8119 * @filep: char node file 8120 */ 8121 static int megasas_mgmt_open(struct inode *inode, struct file *filep) 8122 { 8123 /* 8124 * Allow only those users with admin rights 8125 */ 8126 if (!capable(CAP_SYS_ADMIN)) 8127 return -EACCES; 8128 8129 return 0; 8130 } 8131 8132 /* 8133 * megasas_mgmt_fasync - Async notifier registration from applications 8134 * @fd: char node file descriptor number 8135 * @filep: char node file 8136 * @mode: notifier on/off 8137 * 8138 * This function adds the calling process to a driver global queue. When an 8139 * event occurs, SIGIO will be sent to all processes in this queue. 8140 */ 8141 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) 8142 { 8143 int rc; 8144 8145 mutex_lock(&megasas_async_queue_mutex); 8146 8147 rc = fasync_helper(fd, filep, mode, &megasas_async_queue); 8148 8149 mutex_unlock(&megasas_async_queue_mutex); 8150 8151 if (rc >= 0) { 8152 /* For sanity check when we get ioctl */ 8153 filep->private_data = filep; 8154 return 0; 8155 } 8156 8157 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); 8158 8159 return rc; 8160 } 8161 8162 /* 8163 * megasas_mgmt_poll - char node "poll" entry point 8164 * @filep: char node file 8165 * @wait: Events to poll for 8166 */ 8167 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) 8168 { 8169 __poll_t mask; 8170 unsigned long flags; 8171 8172 poll_wait(file, &megasas_poll_wait, wait); 8173 spin_lock_irqsave(&poll_aen_lock, flags); 8174 if (megasas_poll_wait_aen) 8175 mask = (EPOLLIN | EPOLLRDNORM); 8176 else 8177 mask = 0; 8178 megasas_poll_wait_aen = 0; 8179 spin_unlock_irqrestore(&poll_aen_lock, flags); 8180 return mask; 8181 } 8182 8183 /* 8184 * megasas_set_crash_dump_params_ioctl: 8185 * Send CRASH_DUMP_MODE DCMD to all controllers 8186 * @cmd: MFI command frame 8187 */ 8188 8189 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd) 8190 { 8191 struct megasas_instance *local_instance; 8192 int i, error = 0; 8193 int crash_support; 8194 8195 crash_support = cmd->frame->dcmd.mbox.w[0]; 8196 8197 for (i = 0; i < megasas_mgmt_info.max_index; i++) { 8198 local_instance = megasas_mgmt_info.instance[i]; 8199 if (local_instance && local_instance->crash_dump_drv_support) { 8200 if ((atomic_read(&local_instance->adprecovery) == 8201 MEGASAS_HBA_OPERATIONAL) && 8202 !megasas_set_crash_dump_params(local_instance, 8203 crash_support)) { 8204 local_instance->crash_dump_app_support = 8205 crash_support; 8206 dev_info(&local_instance->pdev->dev, 8207 "Application firmware crash " 8208 "dump mode set success\n"); 8209 error = 0; 8210 } else { 8211 dev_info(&local_instance->pdev->dev, 8212 "Application firmware crash " 8213 "dump mode set failed\n"); 8214 error = -1; 8215 } 8216 } 8217 } 8218 return error; 8219 } 8220 8221 /** 8222 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 8223 * @instance: Adapter soft state 8224 * @user_ioc: User's ioctl packet 8225 * @ioc: ioctl packet 8226 */ 8227 static int 8228 megasas_mgmt_fw_ioctl(struct megasas_instance *instance, 8229 struct megasas_iocpacket __user * user_ioc, 8230 struct megasas_iocpacket *ioc) 8231 { 8232 struct megasas_sge64 *kern_sge64 = NULL; 8233 struct megasas_sge32 *kern_sge32 = NULL; 8234 struct megasas_cmd *cmd; 8235 void *kbuff_arr[MAX_IOCTL_SGE]; 8236 dma_addr_t buf_handle = 0; 8237 int error = 0, i; 8238 void *sense = NULL; 8239 dma_addr_t sense_handle; 8240 void *sense_ptr; 8241 u32 opcode = 0; 8242 int ret = DCMD_SUCCESS; 8243 8244 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 8245 8246 if (ioc->sge_count > MAX_IOCTL_SGE) { 8247 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n", 8248 ioc->sge_count, MAX_IOCTL_SGE); 8249 return -EINVAL; 8250 } 8251 8252 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) || 8253 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) && 8254 !instance->support_nvme_passthru) || 8255 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) && 8256 !instance->support_pci_lane_margining)) { 8257 dev_err(&instance->pdev->dev, 8258 "Received invalid ioctl command 0x%x\n", 8259 ioc->frame.hdr.cmd); 8260 return -ENOTSUPP; 8261 } 8262 8263 cmd = megasas_get_cmd(instance); 8264 if (!cmd) { 8265 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 8266 return -ENOMEM; 8267 } 8268 8269 /* 8270 * User's IOCTL packet has 2 frames (maximum). Copy those two 8271 * frames into our cmd's frames. cmd->frame's context will get 8272 * overwritten when we copy from user's frames. So set that value 8273 * alone separately 8274 */ 8275 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 8276 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 8277 cmd->frame->hdr.pad_0 = 0; 8278 8279 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE); 8280 8281 if (instance->consistent_mask_64bit) 8282 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 | 8283 MFI_FRAME_SENSE64)); 8284 else 8285 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 | 8286 MFI_FRAME_SENSE64)); 8287 8288 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD) 8289 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 8290 8291 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 8292 mutex_lock(&instance->reset_mutex); 8293 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 8294 megasas_return_cmd(instance, cmd); 8295 mutex_unlock(&instance->reset_mutex); 8296 return -1; 8297 } 8298 mutex_unlock(&instance->reset_mutex); 8299 } 8300 8301 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 8302 error = megasas_set_crash_dump_params_ioctl(cmd); 8303 megasas_return_cmd(instance, cmd); 8304 return error; 8305 } 8306 8307 /* 8308 * The management interface between applications and the fw uses 8309 * MFI frames. E.g, RAID configuration changes, LD property changes 8310 * etc are accomplishes through different kinds of MFI frames. The 8311 * driver needs to care only about substituting user buffers with 8312 * kernel buffers in SGLs. The location of SGL is embedded in the 8313 * struct iocpacket itself. 8314 */ 8315 if (instance->consistent_mask_64bit) 8316 kern_sge64 = (struct megasas_sge64 *) 8317 ((unsigned long)cmd->frame + ioc->sgl_off); 8318 else 8319 kern_sge32 = (struct megasas_sge32 *) 8320 ((unsigned long)cmd->frame + ioc->sgl_off); 8321 8322 /* 8323 * For each user buffer, create a mirror buffer and copy in 8324 */ 8325 for (i = 0; i < ioc->sge_count; i++) { 8326 if (!ioc->sgl[i].iov_len) 8327 continue; 8328 8329 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, 8330 ioc->sgl[i].iov_len, 8331 &buf_handle, GFP_KERNEL); 8332 if (!kbuff_arr[i]) { 8333 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc " 8334 "kernel SGL buffer for IOCTL\n"); 8335 error = -ENOMEM; 8336 goto out; 8337 } 8338 8339 /* 8340 * We don't change the dma_coherent_mask, so 8341 * dma_alloc_coherent only returns 32bit addresses 8342 */ 8343 if (instance->consistent_mask_64bit) { 8344 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 8345 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8346 } else { 8347 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 8348 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 8349 } 8350 8351 /* 8352 * We created a kernel buffer corresponding to the 8353 * user buffer. Now copy in from the user buffer 8354 */ 8355 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, 8356 (u32) (ioc->sgl[i].iov_len))) { 8357 error = -EFAULT; 8358 goto out; 8359 } 8360 } 8361 8362 if (ioc->sense_len) { 8363 /* make sure the pointer is part of the frame */ 8364 if (ioc->sense_off > 8365 (sizeof(union megasas_frame) - sizeof(__le64))) { 8366 error = -EINVAL; 8367 goto out; 8368 } 8369 8370 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, 8371 &sense_handle, GFP_KERNEL); 8372 if (!sense) { 8373 error = -ENOMEM; 8374 goto out; 8375 } 8376 8377 /* always store 64 bits regardless of addressing */ 8378 sense_ptr = (void *)cmd->frame + ioc->sense_off; 8379 put_unaligned_le64(sense_handle, sense_ptr); 8380 } 8381 8382 /* 8383 * Set the sync_cmd flag so that the ISR knows not to complete this 8384 * cmd to the SCSI mid-layer 8385 */ 8386 cmd->sync_cmd = 1; 8387 8388 ret = megasas_issue_blocked_cmd(instance, cmd, 0); 8389 switch (ret) { 8390 case DCMD_INIT: 8391 case DCMD_BUSY: 8392 cmd->sync_cmd = 0; 8393 dev_err(&instance->pdev->dev, 8394 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", 8395 __func__, __LINE__, cmd->frame->hdr.cmd, opcode, 8396 cmd->cmd_status_drv); 8397 error = -EBUSY; 8398 goto out; 8399 } 8400 8401 cmd->sync_cmd = 0; 8402 8403 if (instance->unload == 1) { 8404 dev_info(&instance->pdev->dev, "Driver unload is in progress " 8405 "don't submit data to application\n"); 8406 goto out; 8407 } 8408 /* 8409 * copy out the kernel buffers to user buffers 8410 */ 8411 for (i = 0; i < ioc->sge_count; i++) { 8412 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], 8413 ioc->sgl[i].iov_len)) { 8414 error = -EFAULT; 8415 goto out; 8416 } 8417 } 8418 8419 /* 8420 * copy out the sense 8421 */ 8422 if (ioc->sense_len) { 8423 void __user *uptr; 8424 /* 8425 * sense_ptr points to the location that has the user 8426 * sense buffer address 8427 */ 8428 sense_ptr = (void *)ioc->frame.raw + ioc->sense_off; 8429 if (in_compat_syscall()) 8430 uptr = compat_ptr(get_unaligned((compat_uptr_t *) 8431 sense_ptr)); 8432 else 8433 uptr = get_unaligned((void __user **)sense_ptr); 8434 8435 if (copy_to_user(uptr, sense, ioc->sense_len)) { 8436 dev_err(&instance->pdev->dev, "Failed to copy out to user " 8437 "sense data\n"); 8438 error = -EFAULT; 8439 goto out; 8440 } 8441 } 8442 8443 /* 8444 * copy the status codes returned by the fw 8445 */ 8446 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 8447 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 8448 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n"); 8449 error = -EFAULT; 8450 } 8451 8452 out: 8453 if (sense) { 8454 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 8455 sense, sense_handle); 8456 } 8457 8458 for (i = 0; i < ioc->sge_count; i++) { 8459 if (kbuff_arr[i]) { 8460 if (instance->consistent_mask_64bit) 8461 dma_free_coherent(&instance->pdev->dev, 8462 le32_to_cpu(kern_sge64[i].length), 8463 kbuff_arr[i], 8464 le64_to_cpu(kern_sge64[i].phys_addr)); 8465 else 8466 dma_free_coherent(&instance->pdev->dev, 8467 le32_to_cpu(kern_sge32[i].length), 8468 kbuff_arr[i], 8469 le32_to_cpu(kern_sge32[i].phys_addr)); 8470 kbuff_arr[i] = NULL; 8471 } 8472 } 8473 8474 megasas_return_cmd(instance, cmd); 8475 return error; 8476 } 8477 8478 static struct megasas_iocpacket * 8479 megasas_compat_iocpacket_get_user(void __user *arg) 8480 { 8481 struct megasas_iocpacket *ioc; 8482 struct compat_megasas_iocpacket __user *cioc = arg; 8483 size_t size; 8484 int err = -EFAULT; 8485 int i; 8486 8487 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 8488 if (!ioc) 8489 return ERR_PTR(-ENOMEM); 8490 size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame); 8491 if (copy_from_user(ioc, arg, size)) 8492 goto out; 8493 8494 for (i = 0; i < MAX_IOCTL_SGE; i++) { 8495 compat_uptr_t iov_base; 8496 8497 if (get_user(iov_base, &cioc->sgl[i].iov_base) || 8498 get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len)) 8499 goto out; 8500 8501 ioc->sgl[i].iov_base = compat_ptr(iov_base); 8502 } 8503 8504 return ioc; 8505 out: 8506 kfree(ioc); 8507 return ERR_PTR(err); 8508 } 8509 8510 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) 8511 { 8512 struct megasas_iocpacket __user *user_ioc = 8513 (struct megasas_iocpacket __user *)arg; 8514 struct megasas_iocpacket *ioc; 8515 struct megasas_instance *instance; 8516 int error; 8517 8518 if (in_compat_syscall()) 8519 ioc = megasas_compat_iocpacket_get_user(user_ioc); 8520 else 8521 ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket)); 8522 8523 if (IS_ERR(ioc)) 8524 return PTR_ERR(ioc); 8525 8526 instance = megasas_lookup_instance(ioc->host_no); 8527 if (!instance) { 8528 error = -ENODEV; 8529 goto out_kfree_ioc; 8530 } 8531 8532 /* Block ioctls in VF mode */ 8533 if (instance->requestorId && !allow_vf_ioctls) { 8534 error = -ENODEV; 8535 goto out_kfree_ioc; 8536 } 8537 8538 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8539 dev_err(&instance->pdev->dev, "Controller in crit error\n"); 8540 error = -ENODEV; 8541 goto out_kfree_ioc; 8542 } 8543 8544 if (instance->unload == 1) { 8545 error = -ENODEV; 8546 goto out_kfree_ioc; 8547 } 8548 8549 if (down_interruptible(&instance->ioctl_sem)) { 8550 error = -ERESTARTSYS; 8551 goto out_kfree_ioc; 8552 } 8553 8554 if (megasas_wait_for_adapter_operational(instance)) { 8555 error = -ENODEV; 8556 goto out_up; 8557 } 8558 8559 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 8560 out_up: 8561 up(&instance->ioctl_sem); 8562 8563 out_kfree_ioc: 8564 kfree(ioc); 8565 return error; 8566 } 8567 8568 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) 8569 { 8570 struct megasas_instance *instance; 8571 struct megasas_aen aen; 8572 int error; 8573 8574 if (file->private_data != file) { 8575 printk(KERN_DEBUG "megasas: fasync_helper was not " 8576 "called first\n"); 8577 return -EINVAL; 8578 } 8579 8580 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) 8581 return -EFAULT; 8582 8583 instance = megasas_lookup_instance(aen.host_no); 8584 8585 if (!instance) 8586 return -ENODEV; 8587 8588 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { 8589 return -ENODEV; 8590 } 8591 8592 if (instance->unload == 1) { 8593 return -ENODEV; 8594 } 8595 8596 if (megasas_wait_for_adapter_operational(instance)) 8597 return -ENODEV; 8598 8599 mutex_lock(&instance->reset_mutex); 8600 error = megasas_register_aen(instance, aen.seq_num, 8601 aen.class_locale_word); 8602 mutex_unlock(&instance->reset_mutex); 8603 return error; 8604 } 8605 8606 /** 8607 * megasas_mgmt_ioctl - char node ioctl entry point 8608 * @file: char device file pointer 8609 * @cmd: ioctl command 8610 * @arg: ioctl command arguments address 8611 */ 8612 static long 8613 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8614 { 8615 switch (cmd) { 8616 case MEGASAS_IOC_FIRMWARE: 8617 return megasas_mgmt_ioctl_fw(file, arg); 8618 8619 case MEGASAS_IOC_GET_AEN: 8620 return megasas_mgmt_ioctl_aen(file, arg); 8621 } 8622 8623 return -ENOTTY; 8624 } 8625 8626 #ifdef CONFIG_COMPAT 8627 static long 8628 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, 8629 unsigned long arg) 8630 { 8631 switch (cmd) { 8632 case MEGASAS_IOC_FIRMWARE32: 8633 return megasas_mgmt_ioctl_fw(file, arg); 8634 case MEGASAS_IOC_GET_AEN: 8635 return megasas_mgmt_ioctl_aen(file, arg); 8636 } 8637 8638 return -ENOTTY; 8639 } 8640 #endif 8641 8642 /* 8643 * File operations structure for management interface 8644 */ 8645 static const struct file_operations megasas_mgmt_fops = { 8646 .owner = THIS_MODULE, 8647 .open = megasas_mgmt_open, 8648 .fasync = megasas_mgmt_fasync, 8649 .unlocked_ioctl = megasas_mgmt_ioctl, 8650 .poll = megasas_mgmt_poll, 8651 #ifdef CONFIG_COMPAT 8652 .compat_ioctl = megasas_mgmt_compat_ioctl, 8653 #endif 8654 .llseek = noop_llseek, 8655 }; 8656 8657 static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume); 8658 8659 /* 8660 * PCI hotplug support registration structure 8661 */ 8662 static struct pci_driver megasas_pci_driver = { 8663 8664 .name = "megaraid_sas", 8665 .id_table = megasas_pci_table, 8666 .probe = megasas_probe_one, 8667 .remove = megasas_detach_one, 8668 .driver.pm = &megasas_pm_ops, 8669 .shutdown = megasas_shutdown, 8670 }; 8671 8672 /* 8673 * Sysfs driver attributes 8674 */ 8675 static ssize_t version_show(struct device_driver *dd, char *buf) 8676 { 8677 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", 8678 MEGASAS_VERSION); 8679 } 8680 static DRIVER_ATTR_RO(version); 8681 8682 static ssize_t release_date_show(struct device_driver *dd, char *buf) 8683 { 8684 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", 8685 MEGASAS_RELDATE); 8686 } 8687 static DRIVER_ATTR_RO(release_date); 8688 8689 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf) 8690 { 8691 return sprintf(buf, "%u\n", support_poll_for_event); 8692 } 8693 static DRIVER_ATTR_RO(support_poll_for_event); 8694 8695 static ssize_t support_device_change_show(struct device_driver *dd, char *buf) 8696 { 8697 return sprintf(buf, "%u\n", support_device_change); 8698 } 8699 static DRIVER_ATTR_RO(support_device_change); 8700 8701 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf) 8702 { 8703 return sprintf(buf, "%u\n", megasas_dbg_lvl); 8704 } 8705 8706 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf, 8707 size_t count) 8708 { 8709 int retval = count; 8710 8711 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) { 8712 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 8713 retval = -EINVAL; 8714 } 8715 return retval; 8716 } 8717 static DRIVER_ATTR_RW(dbg_lvl); 8718 8719 static ssize_t 8720 support_nvme_encapsulation_show(struct device_driver *dd, char *buf) 8721 { 8722 return sprintf(buf, "%u\n", support_nvme_encapsulation); 8723 } 8724 8725 static DRIVER_ATTR_RO(support_nvme_encapsulation); 8726 8727 static ssize_t 8728 support_pci_lane_margining_show(struct device_driver *dd, char *buf) 8729 { 8730 return sprintf(buf, "%u\n", support_pci_lane_margining); 8731 } 8732 8733 static DRIVER_ATTR_RO(support_pci_lane_margining); 8734 8735 static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 8736 { 8737 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 8738 scsi_remove_device(sdev); 8739 scsi_device_put(sdev); 8740 } 8741 8742 /** 8743 * megasas_update_device_list - Update the PD and LD device list from FW 8744 * after an AEN event notification 8745 * @instance: Adapter soft state 8746 * @event_type: Indicates type of event (PD or LD event) 8747 * 8748 * @return: Success or failure 8749 * 8750 * Issue DCMDs to Firmware to update the internal device list in driver. 8751 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination 8752 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. 8753 */ 8754 static 8755 int megasas_update_device_list(struct megasas_instance *instance, 8756 int event_type) 8757 { 8758 int dcmd_ret; 8759 8760 if (instance->enable_fw_dev_list) { 8761 return megasas_host_device_list_query(instance, false); 8762 } else { 8763 if (event_type & SCAN_PD_CHANNEL) { 8764 dcmd_ret = megasas_get_pd_list(instance); 8765 if (dcmd_ret != DCMD_SUCCESS) 8766 return dcmd_ret; 8767 } 8768 8769 if (event_type & SCAN_VD_CHANNEL) { 8770 if (!instance->requestorId || 8771 megasas_get_ld_vf_affiliation(instance, 0)) { 8772 return megasas_ld_list_query(instance, 8773 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8774 } 8775 } 8776 } 8777 return DCMD_SUCCESS; 8778 } 8779 8780 /** 8781 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer 8782 * after an AEN event notification 8783 * @instance: Adapter soft state 8784 * @scan_type: Indicates type of devices (PD/LD) to add 8785 * @return void 8786 */ 8787 static 8788 void megasas_add_remove_devices(struct megasas_instance *instance, 8789 int scan_type) 8790 { 8791 int i, j; 8792 u16 pd_index = 0; 8793 u16 ld_index = 0; 8794 u16 channel = 0, id = 0; 8795 struct Scsi_Host *host; 8796 struct scsi_device *sdev1; 8797 struct MR_HOST_DEVICE_LIST *targetid_list = NULL; 8798 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; 8799 8800 host = instance->host; 8801 8802 if (instance->enable_fw_dev_list) { 8803 targetid_list = instance->host_device_list_buf; 8804 for (i = 0; i < targetid_list->count; i++) { 8805 targetid_entry = &targetid_list->host_device_list[i]; 8806 if (targetid_entry->flags.u.bits.is_sys_pd) { 8807 channel = le16_to_cpu(targetid_entry->target_id) / 8808 MEGASAS_MAX_DEV_PER_CHANNEL; 8809 id = le16_to_cpu(targetid_entry->target_id) % 8810 MEGASAS_MAX_DEV_PER_CHANNEL; 8811 } else { 8812 channel = MEGASAS_MAX_PD_CHANNELS + 8813 (le16_to_cpu(targetid_entry->target_id) / 8814 MEGASAS_MAX_DEV_PER_CHANNEL); 8815 id = le16_to_cpu(targetid_entry->target_id) % 8816 MEGASAS_MAX_DEV_PER_CHANNEL; 8817 } 8818 sdev1 = scsi_device_lookup(host, channel, id, 0); 8819 if (!sdev1) { 8820 scsi_add_device(host, channel, id, 0); 8821 } else { 8822 scsi_device_put(sdev1); 8823 } 8824 } 8825 } 8826 8827 if (scan_type & SCAN_PD_CHANNEL) { 8828 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 8829 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8830 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; 8831 sdev1 = scsi_device_lookup(host, i, j, 0); 8832 if (instance->pd_list[pd_index].driveState == 8833 MR_PD_STATE_SYSTEM) { 8834 if (!sdev1) 8835 scsi_add_device(host, i, j, 0); 8836 else 8837 scsi_device_put(sdev1); 8838 } else { 8839 if (sdev1) 8840 megasas_remove_scsi_device(sdev1); 8841 } 8842 } 8843 } 8844 } 8845 8846 if (scan_type & SCAN_VD_CHANNEL) { 8847 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 8848 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 8849 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 8850 sdev1 = scsi_device_lookup(host, 8851 MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8852 if (instance->ld_ids[ld_index] != 0xff) { 8853 if (!sdev1) 8854 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); 8855 else 8856 scsi_device_put(sdev1); 8857 } else { 8858 if (sdev1) 8859 megasas_remove_scsi_device(sdev1); 8860 } 8861 } 8862 } 8863 } 8864 8865 } 8866 8867 static void 8868 megasas_aen_polling(struct work_struct *work) 8869 { 8870 struct megasas_aen_event *ev = 8871 container_of(work, struct megasas_aen_event, hotplug_work.work); 8872 struct megasas_instance *instance = ev->instance; 8873 union megasas_evt_class_locale class_locale; 8874 int event_type = 0; 8875 u32 seq_num; 8876 u16 ld_target_id; 8877 int error; 8878 u8 dcmd_ret = DCMD_SUCCESS; 8879 struct scsi_device *sdev1; 8880 8881 if (!instance) { 8882 printk(KERN_ERR "invalid instance!\n"); 8883 kfree(ev); 8884 return; 8885 } 8886 8887 /* Don't run the event workqueue thread if OCR is running */ 8888 mutex_lock(&instance->reset_mutex); 8889 8890 instance->ev = NULL; 8891 if (instance->evt_detail) { 8892 megasas_decode_evt(instance); 8893 8894 switch (le32_to_cpu(instance->evt_detail->code)) { 8895 8896 case MR_EVT_PD_INSERTED: 8897 case MR_EVT_PD_REMOVED: 8898 event_type = SCAN_PD_CHANNEL; 8899 break; 8900 8901 case MR_EVT_LD_OFFLINE: 8902 case MR_EVT_LD_DELETED: 8903 ld_target_id = instance->evt_detail->args.ld.target_id; 8904 sdev1 = scsi_device_lookup(instance->host, 8905 MEGASAS_MAX_PD_CHANNELS + 8906 (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), 8907 (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 8908 0); 8909 if (sdev1) { 8910 mutex_unlock(&instance->reset_mutex); 8911 megasas_remove_scsi_device(sdev1); 8912 mutex_lock(&instance->reset_mutex); 8913 } 8914 8915 event_type = SCAN_VD_CHANNEL; 8916 break; 8917 case MR_EVT_LD_CREATED: 8918 event_type = SCAN_VD_CHANNEL; 8919 break; 8920 8921 case MR_EVT_CFG_CLEARED: 8922 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 8923 case MR_EVT_FOREIGN_CFG_IMPORTED: 8924 case MR_EVT_LD_STATE_CHANGE: 8925 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; 8926 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", 8927 instance->host->host_no); 8928 break; 8929 8930 case MR_EVT_CTRL_PROP_CHANGED: 8931 dcmd_ret = megasas_get_ctrl_info(instance); 8932 if (dcmd_ret == DCMD_SUCCESS && 8933 instance->snapdump_wait_time) { 8934 megasas_get_snapdump_properties(instance); 8935 dev_info(&instance->pdev->dev, 8936 "Snap dump wait time\t: %d\n", 8937 instance->snapdump_wait_time); 8938 } 8939 break; 8940 default: 8941 event_type = 0; 8942 break; 8943 } 8944 } else { 8945 dev_err(&instance->pdev->dev, "invalid evt_detail!\n"); 8946 mutex_unlock(&instance->reset_mutex); 8947 kfree(ev); 8948 return; 8949 } 8950 8951 if (event_type) 8952 dcmd_ret = megasas_update_device_list(instance, event_type); 8953 8954 mutex_unlock(&instance->reset_mutex); 8955 8956 if (event_type && dcmd_ret == DCMD_SUCCESS) 8957 megasas_add_remove_devices(instance, event_type); 8958 8959 if (dcmd_ret == DCMD_SUCCESS) 8960 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 8961 else 8962 seq_num = instance->last_seq_num; 8963 8964 /* Register AEN with FW for latest sequence number plus 1 */ 8965 class_locale.members.reserved = 0; 8966 class_locale.members.locale = MR_EVT_LOCALE_ALL; 8967 class_locale.members.class = MR_EVT_CLASS_DEBUG; 8968 8969 if (instance->aen_cmd != NULL) { 8970 kfree(ev); 8971 return; 8972 } 8973 8974 mutex_lock(&instance->reset_mutex); 8975 error = megasas_register_aen(instance, seq_num, 8976 class_locale.word); 8977 if (error) 8978 dev_err(&instance->pdev->dev, 8979 "register aen failed error %x\n", error); 8980 8981 mutex_unlock(&instance->reset_mutex); 8982 kfree(ev); 8983 } 8984 8985 /** 8986 * megasas_init - Driver load entry point 8987 */ 8988 static int __init megasas_init(void) 8989 { 8990 int rval; 8991 8992 /* 8993 * Booted in kdump kernel, minimize memory footprints by 8994 * disabling few features 8995 */ 8996 if (reset_devices) { 8997 msix_vectors = 1; 8998 rdpq_enable = 0; 8999 dual_qdepth_disable = 1; 9000 poll_queues = 0; 9001 } 9002 9003 /* 9004 * Announce driver version and other information 9005 */ 9006 pr_info("megasas: %s\n", MEGASAS_VERSION); 9007 9008 megasas_dbg_lvl = 0; 9009 support_poll_for_event = 2; 9010 support_device_change = 1; 9011 support_nvme_encapsulation = true; 9012 support_pci_lane_margining = true; 9013 9014 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 9015 9016 /* 9017 * Register character device node 9018 */ 9019 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); 9020 9021 if (rval < 0) { 9022 printk(KERN_DEBUG "megasas: failed to open device node\n"); 9023 return rval; 9024 } 9025 9026 megasas_mgmt_majorno = rval; 9027 9028 megasas_init_debugfs(); 9029 9030 /* 9031 * Register ourselves as PCI hotplug module 9032 */ 9033 rval = pci_register_driver(&megasas_pci_driver); 9034 9035 if (rval) { 9036 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n"); 9037 goto err_pcidrv; 9038 } 9039 9040 if ((event_log_level < MFI_EVT_CLASS_DEBUG) || 9041 (event_log_level > MFI_EVT_CLASS_DEAD)) { 9042 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); 9043 event_log_level = MFI_EVT_CLASS_CRITICAL; 9044 } 9045 9046 rval = driver_create_file(&megasas_pci_driver.driver, 9047 &driver_attr_version); 9048 if (rval) 9049 goto err_dcf_attr_ver; 9050 9051 rval = driver_create_file(&megasas_pci_driver.driver, 9052 &driver_attr_release_date); 9053 if (rval) 9054 goto err_dcf_rel_date; 9055 9056 rval = driver_create_file(&megasas_pci_driver.driver, 9057 &driver_attr_support_poll_for_event); 9058 if (rval) 9059 goto err_dcf_support_poll_for_event; 9060 9061 rval = driver_create_file(&megasas_pci_driver.driver, 9062 &driver_attr_dbg_lvl); 9063 if (rval) 9064 goto err_dcf_dbg_lvl; 9065 rval = driver_create_file(&megasas_pci_driver.driver, 9066 &driver_attr_support_device_change); 9067 if (rval) 9068 goto err_dcf_support_device_change; 9069 9070 rval = driver_create_file(&megasas_pci_driver.driver, 9071 &driver_attr_support_nvme_encapsulation); 9072 if (rval) 9073 goto err_dcf_support_nvme_encapsulation; 9074 9075 rval = driver_create_file(&megasas_pci_driver.driver, 9076 &driver_attr_support_pci_lane_margining); 9077 if (rval) 9078 goto err_dcf_support_pci_lane_margining; 9079 9080 return rval; 9081 9082 err_dcf_support_pci_lane_margining: 9083 driver_remove_file(&megasas_pci_driver.driver, 9084 &driver_attr_support_nvme_encapsulation); 9085 9086 err_dcf_support_nvme_encapsulation: 9087 driver_remove_file(&megasas_pci_driver.driver, 9088 &driver_attr_support_device_change); 9089 9090 err_dcf_support_device_change: 9091 driver_remove_file(&megasas_pci_driver.driver, 9092 &driver_attr_dbg_lvl); 9093 err_dcf_dbg_lvl: 9094 driver_remove_file(&megasas_pci_driver.driver, 9095 &driver_attr_support_poll_for_event); 9096 err_dcf_support_poll_for_event: 9097 driver_remove_file(&megasas_pci_driver.driver, 9098 &driver_attr_release_date); 9099 err_dcf_rel_date: 9100 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9101 err_dcf_attr_ver: 9102 pci_unregister_driver(&megasas_pci_driver); 9103 err_pcidrv: 9104 megasas_exit_debugfs(); 9105 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9106 return rval; 9107 } 9108 9109 /** 9110 * megasas_exit - Driver unload entry point 9111 */ 9112 static void __exit megasas_exit(void) 9113 { 9114 driver_remove_file(&megasas_pci_driver.driver, 9115 &driver_attr_dbg_lvl); 9116 driver_remove_file(&megasas_pci_driver.driver, 9117 &driver_attr_support_poll_for_event); 9118 driver_remove_file(&megasas_pci_driver.driver, 9119 &driver_attr_support_device_change); 9120 driver_remove_file(&megasas_pci_driver.driver, 9121 &driver_attr_release_date); 9122 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 9123 driver_remove_file(&megasas_pci_driver.driver, 9124 &driver_attr_support_nvme_encapsulation); 9125 driver_remove_file(&megasas_pci_driver.driver, 9126 &driver_attr_support_pci_lane_margining); 9127 9128 pci_unregister_driver(&megasas_pci_driver); 9129 megasas_exit_debugfs(); 9130 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 9131 } 9132 9133 module_init(megasas_init); 9134 module_exit(megasas_exit); 9135