xref: /linux/drivers/scsi/megaraid/megaraid_sas_fusion.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2009-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  FILE: megaraid_sas_fusion.c
21  *
22  *  Authors: Avago Technologies
23  *           Sumant Patro
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
45 #include <linux/fs.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_dbg.h>
56 #include <linux/dmi.h>
57 
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
60 
61 
62 extern void megasas_free_cmds(struct megasas_instance *instance);
63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
64 					   *instance);
65 extern void
66 megasas_complete_cmd(struct megasas_instance *instance,
67 		     struct megasas_cmd *cmd, u8 alt_status);
68 int
69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
70 	      int seconds);
71 
72 void
73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
74 int megasas_alloc_cmds(struct megasas_instance *instance);
75 int
76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
77 int
78 megasas_issue_polled(struct megasas_instance *instance,
79 		     struct megasas_cmd *cmd);
80 void
81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
82 
83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
84 void megaraid_sas_kill_hba(struct megasas_instance *instance);
85 
86 extern u32 megasas_dbg_lvl;
87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
89 				  int initial);
90 void megasas_start_timer(struct megasas_instance *instance,
91 			struct timer_list *timer,
92 			 void *fn, unsigned long interval);
93 extern struct megasas_mgmt_info megasas_mgmt_info;
94 extern unsigned int resetwaittime;
95 extern unsigned int dual_qdepth_disable;
96 static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
97 static void megasas_free_reply_fusion(struct megasas_instance *instance);
98 
99 
100 
101 /**
102  * megasas_enable_intr_fusion -	Enables interrupts
103  * @regs:			MFI register set
104  */
105 void
106 megasas_enable_intr_fusion(struct megasas_instance *instance)
107 {
108 	struct megasas_register_set __iomem *regs;
109 	regs = instance->reg_set;
110 
111 	instance->mask_interrupts = 0;
112 	/* For Thunderbolt/Invader also clear intr on enable */
113 	writel(~0, &regs->outbound_intr_status);
114 	readl(&regs->outbound_intr_status);
115 
116 	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
117 
118 	/* Dummy readl to force pci flush */
119 	readl(&regs->outbound_intr_mask);
120 }
121 
122 /**
123  * megasas_disable_intr_fusion - Disables interrupt
124  * @regs:			 MFI register set
125  */
126 void
127 megasas_disable_intr_fusion(struct megasas_instance *instance)
128 {
129 	u32 mask = 0xFFFFFFFF;
130 	u32 status;
131 	struct megasas_register_set __iomem *regs;
132 	regs = instance->reg_set;
133 	instance->mask_interrupts = 1;
134 
135 	writel(mask, &regs->outbound_intr_mask);
136 	/* Dummy readl to force pci flush */
137 	status = readl(&regs->outbound_intr_mask);
138 }
139 
140 int
141 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
142 {
143 	u32 status;
144 	/*
145 	 * Check if it is our interrupt
146 	 */
147 	status = readl(&regs->outbound_intr_status);
148 
149 	if (status & 1) {
150 		writel(status, &regs->outbound_intr_status);
151 		readl(&regs->outbound_intr_status);
152 		return 1;
153 	}
154 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
155 		return 0;
156 
157 	return 1;
158 }
159 
160 /**
161  * megasas_get_cmd_fusion -	Get a command from the free pool
162  * @instance:		Adapter soft state
163  *
164  * Returns a blk_tag indexed mpt frame
165  */
166 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
167 						  *instance, u32 blk_tag)
168 {
169 	struct fusion_context *fusion;
170 
171 	fusion = instance->ctrl_context;
172 	return fusion->cmd_list[blk_tag];
173 }
174 
175 /**
176  * megasas_return_cmd_fusion -	Return a cmd to free command pool
177  * @instance:		Adapter soft state
178  * @cmd:		Command packet to be returned to free command pool
179  */
180 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
181 	struct megasas_cmd_fusion *cmd)
182 {
183 	cmd->scmd = NULL;
184 	memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
185 }
186 
187 /**
188  * megasas_fire_cmd_fusion -	Sends command to the FW
189  */
190 static void
191 megasas_fire_cmd_fusion(struct megasas_instance *instance,
192 		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
193 {
194 #if defined(writeq) && defined(CONFIG_64BIT)
195 	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
196 			le32_to_cpu(req_desc->u.low));
197 
198 	writeq(req_data, &instance->reg_set->inbound_low_queue_port);
199 #else
200 	unsigned long flags;
201 
202 	spin_lock_irqsave(&instance->hba_lock, flags);
203 	writel(le32_to_cpu(req_desc->u.low),
204 		&instance->reg_set->inbound_low_queue_port);
205 	writel(le32_to_cpu(req_desc->u.high),
206 		&instance->reg_set->inbound_high_queue_port);
207 	mmiowb();
208 	spin_unlock_irqrestore(&instance->hba_lock, flags);
209 #endif
210 }
211 
212 /**
213  * megasas_fusion_update_can_queue -	Do all Adapter Queue depth related calculations here
214  * @instance:							Adapter soft state
215  * fw_boot_context:						Whether this function called during probe or after OCR
216  *
217  * This function is only for fusion controllers.
218  * Update host can queue, if firmware downgrade max supported firmware commands.
219  * Firmware upgrade case will be skiped because underlying firmware has
220  * more resource than exposed to the OS.
221  *
222  */
223 static void
224 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
225 {
226 	u16 cur_max_fw_cmds = 0;
227 	u16 ldio_threshold = 0;
228 	struct megasas_register_set __iomem *reg_set;
229 
230 	reg_set = instance->reg_set;
231 
232 	cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
233 
234 	if (dual_qdepth_disable || !cur_max_fw_cmds)
235 		cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
236 	else
237 		ldio_threshold =
238 			(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
239 
240 	dev_info(&instance->pdev->dev,
241 			"Current firmware maximum commands: %d\t LDIO threshold: %d\n",
242 			cur_max_fw_cmds, ldio_threshold);
243 
244 	if (fw_boot_context == OCR_CONTEXT) {
245 		cur_max_fw_cmds = cur_max_fw_cmds - 1;
246 		if (cur_max_fw_cmds <= instance->max_fw_cmds) {
247 			instance->cur_can_queue =
248 				cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
249 						MEGASAS_FUSION_IOCTL_CMDS);
250 			instance->host->can_queue = instance->cur_can_queue;
251 			instance->ldio_threshold = ldio_threshold;
252 		}
253 	} else {
254 		instance->max_fw_cmds = cur_max_fw_cmds;
255 		instance->ldio_threshold = ldio_threshold;
256 
257 		if (!instance->is_rdpq)
258 			instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
259 
260 		if (reset_devices)
261 			instance->max_fw_cmds = min(instance->max_fw_cmds,
262 						(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
263 		/*
264 		* Reduce the max supported cmds by 1. This is to ensure that the
265 		* reply_q_sz (1 more than the max cmd that driver may send)
266 		* does not exceed max cmds that the FW can support
267 		*/
268 		instance->max_fw_cmds = instance->max_fw_cmds-1;
269 
270 		instance->max_scsi_cmds = instance->max_fw_cmds -
271 				(MEGASAS_FUSION_INTERNAL_CMDS +
272 				MEGASAS_FUSION_IOCTL_CMDS);
273 		instance->cur_can_queue = instance->max_scsi_cmds;
274 	}
275 }
276 /**
277  * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
278  * @instance:		Adapter soft state
279  */
280 void
281 megasas_free_cmds_fusion(struct megasas_instance *instance)
282 {
283 	int i;
284 	struct fusion_context *fusion = instance->ctrl_context;
285 	struct megasas_cmd_fusion *cmd;
286 
287 	/* SG, Sense */
288 	for (i = 0; i < instance->max_fw_cmds; i++) {
289 		cmd = fusion->cmd_list[i];
290 		if (cmd) {
291 			if (cmd->sg_frame)
292 				pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
293 				      cmd->sg_frame_phys_addr);
294 			if (cmd->sense)
295 				pci_pool_free(fusion->sense_dma_pool, cmd->sense,
296 				      cmd->sense_phys_addr);
297 		}
298 	}
299 
300 	if (fusion->sg_dma_pool) {
301 		pci_pool_destroy(fusion->sg_dma_pool);
302 		fusion->sg_dma_pool = NULL;
303 	}
304 	if (fusion->sense_dma_pool) {
305 		pci_pool_destroy(fusion->sense_dma_pool);
306 		fusion->sense_dma_pool = NULL;
307 	}
308 
309 
310 	/* Reply Frame, Desc*/
311 	if (instance->is_rdpq)
312 		megasas_free_rdpq_fusion(instance);
313 	else
314 		megasas_free_reply_fusion(instance);
315 
316 	/* Request Frame, Desc*/
317 	if (fusion->req_frames_desc)
318 		dma_free_coherent(&instance->pdev->dev,
319 			fusion->request_alloc_sz, fusion->req_frames_desc,
320 			fusion->req_frames_desc_phys);
321 	if (fusion->io_request_frames)
322 		pci_pool_free(fusion->io_request_frames_pool,
323 			fusion->io_request_frames,
324 			fusion->io_request_frames_phys);
325 	if (fusion->io_request_frames_pool) {
326 		pci_pool_destroy(fusion->io_request_frames_pool);
327 		fusion->io_request_frames_pool = NULL;
328 	}
329 
330 
331 	/* cmd_list */
332 	for (i = 0; i < instance->max_fw_cmds; i++)
333 		kfree(fusion->cmd_list[i]);
334 
335 	kfree(fusion->cmd_list);
336 }
337 
338 /**
339  * megasas_create_sg_sense_fusion -	Creates DMA pool for cmd frames
340  * @instance:			Adapter soft state
341  *
342  */
343 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
344 {
345 	int i;
346 	u32 max_cmd;
347 	struct fusion_context *fusion;
348 	struct megasas_cmd_fusion *cmd;
349 
350 	fusion = instance->ctrl_context;
351 	max_cmd = instance->max_fw_cmds;
352 
353 
354 	fusion->sg_dma_pool =
355 			pci_pool_create("mr_sg", instance->pdev,
356 				instance->max_chain_frame_sz, 4, 0);
357 	/* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
358 	fusion->sense_dma_pool =
359 			pci_pool_create("mr_sense", instance->pdev,
360 				SCSI_SENSE_BUFFERSIZE, 64, 0);
361 
362 	if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
363 		dev_err(&instance->pdev->dev,
364 			"Failed from %s %d\n",  __func__, __LINE__);
365 		return -ENOMEM;
366 	}
367 
368 	/*
369 	 * Allocate and attach a frame to each of the commands in cmd_list
370 	 */
371 	for (i = 0; i < max_cmd; i++) {
372 		cmd = fusion->cmd_list[i];
373 		cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
374 					GFP_KERNEL, &cmd->sg_frame_phys_addr);
375 
376 		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
377 					GFP_KERNEL, &cmd->sense_phys_addr);
378 		if (!cmd->sg_frame || !cmd->sense) {
379 			dev_err(&instance->pdev->dev,
380 				"Failed from %s %d\n",  __func__, __LINE__);
381 			return -ENOMEM;
382 		}
383 	}
384 	return 0;
385 }
386 
387 int
388 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
389 {
390 	u32 max_cmd, i;
391 	struct fusion_context *fusion;
392 
393 	fusion = instance->ctrl_context;
394 
395 	max_cmd = instance->max_fw_cmds;
396 
397 	/*
398 	 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
399 	 * Allocate the dynamic array first and then allocate individual
400 	 * commands.
401 	 */
402 	fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
403 						GFP_KERNEL);
404 	if (!fusion->cmd_list) {
405 		dev_err(&instance->pdev->dev,
406 			"Failed from %s %d\n",  __func__, __LINE__);
407 		return -ENOMEM;
408 	}
409 
410 	for (i = 0; i < max_cmd; i++) {
411 		fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
412 					      GFP_KERNEL);
413 		if (!fusion->cmd_list[i]) {
414 			dev_err(&instance->pdev->dev,
415 				"Failed from %s %d\n",  __func__, __LINE__);
416 			return -ENOMEM;
417 		}
418 	}
419 	return 0;
420 }
421 int
422 megasas_alloc_request_fusion(struct megasas_instance *instance)
423 {
424 	struct fusion_context *fusion;
425 
426 	fusion = instance->ctrl_context;
427 
428 	fusion->req_frames_desc =
429 		dma_alloc_coherent(&instance->pdev->dev,
430 			fusion->request_alloc_sz,
431 			&fusion->req_frames_desc_phys, GFP_KERNEL);
432 	if (!fusion->req_frames_desc) {
433 		dev_err(&instance->pdev->dev,
434 			"Failed from %s %d\n",  __func__, __LINE__);
435 		return -ENOMEM;
436 	}
437 
438 	fusion->io_request_frames_pool =
439 			pci_pool_create("mr_ioreq", instance->pdev,
440 				fusion->io_frames_alloc_sz, 16, 0);
441 
442 	if (!fusion->io_request_frames_pool) {
443 		dev_err(&instance->pdev->dev,
444 			"Failed from %s %d\n",  __func__, __LINE__);
445 		return -ENOMEM;
446 	}
447 
448 	fusion->io_request_frames =
449 			pci_pool_alloc(fusion->io_request_frames_pool,
450 				GFP_KERNEL, &fusion->io_request_frames_phys);
451 	if (!fusion->io_request_frames) {
452 		dev_err(&instance->pdev->dev,
453 			"Failed from %s %d\n",  __func__, __LINE__);
454 		return -ENOMEM;
455 	}
456 	return 0;
457 }
458 
459 int
460 megasas_alloc_reply_fusion(struct megasas_instance *instance)
461 {
462 	int i, count;
463 	struct fusion_context *fusion;
464 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
465 	fusion = instance->ctrl_context;
466 
467 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
468 	fusion->reply_frames_desc_pool =
469 			pci_pool_create("mr_reply", instance->pdev,
470 				fusion->reply_alloc_sz * count, 16, 0);
471 
472 	if (!fusion->reply_frames_desc_pool) {
473 		dev_err(&instance->pdev->dev,
474 			"Failed from %s %d\n",  __func__, __LINE__);
475 		return -ENOMEM;
476 	}
477 
478 	fusion->reply_frames_desc[0] =
479 		pci_pool_alloc(fusion->reply_frames_desc_pool,
480 			GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
481 	if (!fusion->reply_frames_desc[0]) {
482 		dev_err(&instance->pdev->dev,
483 			"Failed from %s %d\n",  __func__, __LINE__);
484 		return -ENOMEM;
485 	}
486 	reply_desc = fusion->reply_frames_desc[0];
487 	for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
488 		reply_desc->Words = cpu_to_le64(ULLONG_MAX);
489 
490 	/* This is not a rdpq mode, but driver still populate
491 	 * reply_frame_desc array to use same msix index in ISR path.
492 	 */
493 	for (i = 0; i < (count - 1); i++)
494 		fusion->reply_frames_desc[i + 1] =
495 			fusion->reply_frames_desc[i] +
496 			(fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
497 
498 	return 0;
499 }
500 
501 int
502 megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
503 {
504 	int i, j, count;
505 	struct fusion_context *fusion;
506 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
507 
508 	fusion = instance->ctrl_context;
509 
510 	fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
511 				sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
512 				&fusion->rdpq_phys);
513 	if (!fusion->rdpq_virt) {
514 		dev_err(&instance->pdev->dev,
515 			"Failed from %s %d\n",  __func__, __LINE__);
516 		return -ENOMEM;
517 	}
518 
519 	memset(fusion->rdpq_virt, 0,
520 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
521 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
522 	fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
523 							 instance->pdev, fusion->reply_alloc_sz, 16, 0);
524 
525 	if (!fusion->reply_frames_desc_pool) {
526 		dev_err(&instance->pdev->dev,
527 			"Failed from %s %d\n",  __func__, __LINE__);
528 		return -ENOMEM;
529 	}
530 
531 	for (i = 0; i < count; i++) {
532 		fusion->reply_frames_desc[i] =
533 				pci_pool_alloc(fusion->reply_frames_desc_pool,
534 					GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
535 		if (!fusion->reply_frames_desc[i]) {
536 			dev_err(&instance->pdev->dev,
537 				"Failed from %s %d\n",  __func__, __LINE__);
538 			return -ENOMEM;
539 		}
540 
541 		fusion->rdpq_virt[i].RDPQBaseAddress =
542 			fusion->reply_frames_desc_phys[i];
543 
544 		reply_desc = fusion->reply_frames_desc[i];
545 		for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
546 			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
547 	}
548 	return 0;
549 }
550 
551 static void
552 megasas_free_rdpq_fusion(struct megasas_instance *instance) {
553 
554 	int i;
555 	struct fusion_context *fusion;
556 
557 	fusion = instance->ctrl_context;
558 
559 	for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
560 		if (fusion->reply_frames_desc[i])
561 			pci_pool_free(fusion->reply_frames_desc_pool,
562 				fusion->reply_frames_desc[i],
563 				fusion->reply_frames_desc_phys[i]);
564 	}
565 
566 	if (fusion->reply_frames_desc_pool)
567 		pci_pool_destroy(fusion->reply_frames_desc_pool);
568 
569 	if (fusion->rdpq_virt)
570 		pci_free_consistent(instance->pdev,
571 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
572 			fusion->rdpq_virt, fusion->rdpq_phys);
573 }
574 
575 static void
576 megasas_free_reply_fusion(struct megasas_instance *instance) {
577 
578 	struct fusion_context *fusion;
579 
580 	fusion = instance->ctrl_context;
581 
582 	if (fusion->reply_frames_desc[0])
583 		pci_pool_free(fusion->reply_frames_desc_pool,
584 			fusion->reply_frames_desc[0],
585 			fusion->reply_frames_desc_phys[0]);
586 
587 	if (fusion->reply_frames_desc_pool)
588 		pci_pool_destroy(fusion->reply_frames_desc_pool);
589 
590 }
591 
592 
593 /**
594  * megasas_alloc_cmds_fusion -	Allocates the command packets
595  * @instance:		Adapter soft state
596  *
597  *
598  * Each frame has a 32-bit field called context. This context is used to get
599  * back the megasas_cmd_fusion from the frame when a frame gets completed
600  * In this driver, the 32 bit values are the indices into an array cmd_list.
601  * This array is used only to look up the megasas_cmd_fusion given the context.
602  * The free commands themselves are maintained in a linked list called cmd_pool.
603  *
604  * cmds are formed in the io_request and sg_frame members of the
605  * megasas_cmd_fusion. The context field is used to get a request descriptor
606  * and is used as SMID of the cmd.
607  * SMID value range is from 1 to max_fw_cmds.
608  */
609 int
610 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
611 {
612 	int i;
613 	struct fusion_context *fusion;
614 	struct megasas_cmd_fusion *cmd;
615 	u32 offset;
616 	dma_addr_t io_req_base_phys;
617 	u8 *io_req_base;
618 
619 
620 	fusion = instance->ctrl_context;
621 
622 	if (megasas_alloc_cmdlist_fusion(instance))
623 		goto fail_exit;
624 
625 	if (megasas_alloc_request_fusion(instance))
626 		goto fail_exit;
627 
628 	if (instance->is_rdpq) {
629 		if (megasas_alloc_rdpq_fusion(instance))
630 			goto fail_exit;
631 	} else
632 		if (megasas_alloc_reply_fusion(instance))
633 			goto fail_exit;
634 
635 
636 	/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
637 	io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
638 	io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
639 
640 	/*
641 	 * Add all the commands to command pool (fusion->cmd_pool)
642 	 */
643 
644 	/* SMID 0 is reserved. Set SMID/index from 1 */
645 	for (i = 0; i < instance->max_fw_cmds; i++) {
646 		cmd = fusion->cmd_list[i];
647 		offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
648 		memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
649 		cmd->index = i + 1;
650 		cmd->scmd = NULL;
651 		cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
652 				(i - instance->max_scsi_cmds) :
653 				(u32)ULONG_MAX; /* Set to Invalid */
654 		cmd->instance = instance;
655 		cmd->io_request =
656 			(struct MPI2_RAID_SCSI_IO_REQUEST *)
657 		  (io_req_base + offset);
658 		memset(cmd->io_request, 0,
659 		       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
660 		cmd->io_request_phys_addr = io_req_base_phys + offset;
661 	}
662 
663 	if (megasas_create_sg_sense_fusion(instance))
664 		goto fail_exit;
665 
666 	return 0;
667 
668 fail_exit:
669 	megasas_free_cmds_fusion(instance);
670 	return -ENOMEM;
671 }
672 
673 /**
674  * wait_and_poll -	Issues a polling command
675  * @instance:			Adapter soft state
676  * @cmd:			Command packet to be issued
677  *
678  * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
679  */
680 int
681 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
682 	int seconds)
683 {
684 	int i;
685 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
686 	struct fusion_context *fusion;
687 
688 	u32 msecs = seconds * 1000;
689 
690 	fusion = instance->ctrl_context;
691 	/*
692 	 * Wait for cmd_status to change
693 	 */
694 	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
695 		rmb();
696 		msleep(20);
697 	}
698 
699 	if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
700 		return DCMD_TIMEOUT;
701 	else if (frame_hdr->cmd_status == MFI_STAT_OK)
702 		return DCMD_SUCCESS;
703 	else
704 		return DCMD_FAILED;
705 }
706 
707 /**
708  * megasas_ioc_init_fusion -	Initializes the FW
709  * @instance:		Adapter soft state
710  *
711  * Issues the IOC Init cmd
712  */
713 int
714 megasas_ioc_init_fusion(struct megasas_instance *instance)
715 {
716 	struct megasas_init_frame *init_frame;
717 	struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
718 	dma_addr_t	ioc_init_handle;
719 	struct megasas_cmd *cmd;
720 	u8 ret, cur_rdpq_mode;
721 	struct fusion_context *fusion;
722 	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
723 	int i;
724 	struct megasas_header *frame_hdr;
725 	const char *sys_info;
726 	MFI_CAPABILITIES *drv_ops;
727 	u32 scratch_pad_2;
728 
729 	fusion = instance->ctrl_context;
730 
731 	cmd = megasas_get_cmd(instance);
732 
733 	if (!cmd) {
734 		dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
735 		ret = 1;
736 		goto fail_get_cmd;
737 	}
738 
739 	scratch_pad_2 = readl
740 		(&instance->reg_set->outbound_scratch_pad_2);
741 
742 	cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
743 
744 	if (instance->is_rdpq && !cur_rdpq_mode) {
745 		dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
746 			" from RDPQ mode to non RDPQ mode\n");
747 		ret = 1;
748 		goto fail_fw_init;
749 	}
750 
751 	IOCInitMessage =
752 	  dma_alloc_coherent(&instance->pdev->dev,
753 			     sizeof(struct MPI2_IOC_INIT_REQUEST),
754 			     &ioc_init_handle, GFP_KERNEL);
755 
756 	if (!IOCInitMessage) {
757 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
758 		       "IOCInitMessage\n");
759 		ret = 1;
760 		goto fail_fw_init;
761 	}
762 
763 	memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
764 
765 	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
766 	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
767 	IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
768 	IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
769 	IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
770 
771 	IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
772 	IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
773 			cpu_to_le64(fusion->rdpq_phys) :
774 			cpu_to_le64(fusion->reply_frames_desc_phys[0]);
775 	IOCInitMessage->MsgFlags = instance->is_rdpq ?
776 			MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
777 	IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
778 	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
779 	init_frame = (struct megasas_init_frame *)cmd->frame;
780 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
781 
782 	frame_hdr = &cmd->frame->hdr;
783 	frame_hdr->cmd_status = 0xFF;
784 	frame_hdr->flags = cpu_to_le16(
785 		le16_to_cpu(frame_hdr->flags) |
786 		MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
787 
788 	init_frame->cmd	= MFI_CMD_INIT;
789 	init_frame->cmd_status = 0xFF;
790 
791 	drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
792 
793 	/* driver support Extended MSIX */
794 	if (fusion->adapter_type == INVADER_SERIES)
795 		drv_ops->mfi_capabilities.support_additional_msix = 1;
796 	/* driver supports HA / Remote LUN over Fast Path interface */
797 	drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
798 
799 	drv_ops->mfi_capabilities.support_max_255lds = 1;
800 	drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
801 	drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
802 
803 	if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
804 		drv_ops->mfi_capabilities.support_ext_io_size = 1;
805 
806 	drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
807 	if (!dual_qdepth_disable)
808 		drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
809 
810 	drv_ops->mfi_capabilities.support_qd_throttling = 1;
811 	/* Convert capability to LE32 */
812 	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
813 
814 	sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
815 	if (instance->system_info_buf && sys_info) {
816 		memcpy(instance->system_info_buf->systemId, sys_info,
817 			strlen(sys_info) > 64 ? 64 : strlen(sys_info));
818 		instance->system_info_buf->systemIdLength =
819 			strlen(sys_info) > 64 ? 64 : strlen(sys_info);
820 		init_frame->system_info_lo = instance->system_info_h;
821 		init_frame->system_info_hi = 0;
822 	}
823 
824 	init_frame->queue_info_new_phys_addr_hi =
825 		cpu_to_le32(upper_32_bits(ioc_init_handle));
826 	init_frame->queue_info_new_phys_addr_lo =
827 		cpu_to_le32(lower_32_bits(ioc_init_handle));
828 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
829 
830 	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
831 	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
832 	req_desc.MFAIo.RequestFlags =
833 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
834 		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
835 
836 	/*
837 	 * disable the intr before firing the init frame
838 	 */
839 	instance->instancet->disable_intr(instance);
840 
841 	for (i = 0; i < (10 * 1000); i += 20) {
842 		if (readl(&instance->reg_set->doorbell) & 1)
843 			msleep(20);
844 		else
845 			break;
846 	}
847 
848 	megasas_fire_cmd_fusion(instance, &req_desc);
849 
850 	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
851 
852 	frame_hdr = &cmd->frame->hdr;
853 	if (frame_hdr->cmd_status != 0) {
854 		ret = 1;
855 		goto fail_fw_init;
856 	}
857 	dev_info(&instance->pdev->dev, "Init cmd success\n");
858 
859 	ret = 0;
860 
861 fail_fw_init:
862 	megasas_return_cmd(instance, cmd);
863 	if (IOCInitMessage)
864 		dma_free_coherent(&instance->pdev->dev,
865 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
866 				  IOCInitMessage, ioc_init_handle);
867 fail_get_cmd:
868 	return ret;
869 }
870 
871 /**
872  * megasas_sync_pd_seq_num -	JBOD SEQ MAP
873  * @instance:		Adapter soft state
874  * @pend:		set to 1, if it is pended jbod map.
875  *
876  * Issue Jbod map to the firmware. If it is pended command,
877  * issue command and return. If it is first instance of jbod map
878  * issue and receive command.
879  */
880 int
881 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
882 	int ret = 0;
883 	u32 pd_seq_map_sz;
884 	struct megasas_cmd *cmd;
885 	struct megasas_dcmd_frame *dcmd;
886 	struct fusion_context *fusion = instance->ctrl_context;
887 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
888 	dma_addr_t pd_seq_h;
889 
890 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
891 	pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
892 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
893 			(sizeof(struct MR_PD_CFG_SEQ) *
894 			(MAX_PHYSICAL_DEVICES - 1));
895 
896 	cmd = megasas_get_cmd(instance);
897 	if (!cmd) {
898 		dev_err(&instance->pdev->dev,
899 			"Could not get mfi cmd. Fail from %s %d\n",
900 			__func__, __LINE__);
901 		return -ENOMEM;
902 	}
903 
904 	dcmd = &cmd->frame->dcmd;
905 
906 	memset(pd_sync, 0, pd_seq_map_sz);
907 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
908 	dcmd->cmd = MFI_CMD_DCMD;
909 	dcmd->cmd_status = 0xFF;
910 	dcmd->sge_count = 1;
911 	dcmd->timeout = 0;
912 	dcmd->pad_0 = 0;
913 	dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
914 	dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
915 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
916 	dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
917 
918 	if (pend) {
919 		dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
920 		dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
921 		instance->jbod_seq_cmd = cmd;
922 		instance->instancet->issue_dcmd(instance, cmd);
923 		return 0;
924 	}
925 
926 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
927 
928 	/* Below code is only for non pended DCMD */
929 	if (instance->ctrl_context && !instance->mask_interrupts)
930 		ret = megasas_issue_blocked_cmd(instance, cmd,
931 			MFI_IO_TIMEOUT_SECS);
932 	else
933 		ret = megasas_issue_polled(instance, cmd);
934 
935 	if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
936 		dev_warn(&instance->pdev->dev,
937 			"driver supports max %d JBOD, but FW reports %d\n",
938 			MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
939 		ret = -EINVAL;
940 	}
941 
942 	if (ret == DCMD_TIMEOUT && instance->ctrl_context)
943 		megaraid_sas_kill_hba(instance);
944 
945 	if (ret == DCMD_SUCCESS)
946 		instance->pd_seq_map_id++;
947 
948 	megasas_return_cmd(instance, cmd);
949 	return ret;
950 }
951 
952 /*
953  * megasas_get_ld_map_info -	Returns FW's ld_map structure
954  * @instance:				Adapter soft state
955  * @pend:				Pend the command or not
956  * Issues an internal command (DCMD) to get the FW's controller PD
957  * list structure.  This information is mainly used to find out SYSTEM
958  * supported by the FW.
959  * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
960  * dcmd.mbox.b[0]	- number of LDs being sync'd
961  * dcmd.mbox.b[1]	- 0 - complete command immediately.
962  *			- 1 - pend till config change
963  * dcmd.mbox.b[2]	- 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
964  *			- 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
965  *				uses extended struct MR_FW_RAID_MAP_EXT
966  */
967 static int
968 megasas_get_ld_map_info(struct megasas_instance *instance)
969 {
970 	int ret = 0;
971 	struct megasas_cmd *cmd;
972 	struct megasas_dcmd_frame *dcmd;
973 	void *ci;
974 	dma_addr_t ci_h = 0;
975 	u32 size_map_info;
976 	struct fusion_context *fusion;
977 
978 	cmd = megasas_get_cmd(instance);
979 
980 	if (!cmd) {
981 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
982 		return -ENOMEM;
983 	}
984 
985 	fusion = instance->ctrl_context;
986 
987 	if (!fusion) {
988 		megasas_return_cmd(instance, cmd);
989 		return -ENXIO;
990 	}
991 
992 	dcmd = &cmd->frame->dcmd;
993 
994 	size_map_info = fusion->current_map_sz;
995 
996 	ci = (void *) fusion->ld_map[(instance->map_id & 1)];
997 	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
998 
999 	if (!ci) {
1000 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
1001 		megasas_return_cmd(instance, cmd);
1002 		return -ENOMEM;
1003 	}
1004 
1005 	memset(ci, 0, fusion->max_map_sz);
1006 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1007 #if VD_EXT_DEBUG
1008 	dev_dbg(&instance->pdev->dev,
1009 		"%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
1010 		__func__, cpu_to_le32(size_map_info));
1011 #endif
1012 	dcmd->cmd = MFI_CMD_DCMD;
1013 	dcmd->cmd_status = 0xFF;
1014 	dcmd->sge_count = 1;
1015 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
1016 	dcmd->timeout = 0;
1017 	dcmd->pad_0 = 0;
1018 	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1019 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1020 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1021 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1022 
1023 	if (instance->ctrl_context && !instance->mask_interrupts)
1024 		ret = megasas_issue_blocked_cmd(instance, cmd,
1025 			MFI_IO_TIMEOUT_SECS);
1026 	else
1027 		ret = megasas_issue_polled(instance, cmd);
1028 
1029 	if (ret == DCMD_TIMEOUT && instance->ctrl_context)
1030 		megaraid_sas_kill_hba(instance);
1031 
1032 	megasas_return_cmd(instance, cmd);
1033 
1034 	return ret;
1035 }
1036 
1037 u8
1038 megasas_get_map_info(struct megasas_instance *instance)
1039 {
1040 	struct fusion_context *fusion = instance->ctrl_context;
1041 
1042 	fusion->fast_path_io = 0;
1043 	if (!megasas_get_ld_map_info(instance)) {
1044 		if (MR_ValidateMapInfo(instance)) {
1045 			fusion->fast_path_io = 1;
1046 			return 0;
1047 		}
1048 	}
1049 	return 1;
1050 }
1051 
1052 /*
1053  * megasas_sync_map_info -	Returns FW's ld_map structure
1054  * @instance:				Adapter soft state
1055  *
1056  * Issues an internal command (DCMD) to get the FW's controller PD
1057  * list structure.  This information is mainly used to find out SYSTEM
1058  * supported by the FW.
1059  */
1060 int
1061 megasas_sync_map_info(struct megasas_instance *instance)
1062 {
1063 	int ret = 0, i;
1064 	struct megasas_cmd *cmd;
1065 	struct megasas_dcmd_frame *dcmd;
1066 	u32 size_sync_info, num_lds;
1067 	struct fusion_context *fusion;
1068 	struct MR_LD_TARGET_SYNC *ci = NULL;
1069 	struct MR_DRV_RAID_MAP_ALL *map;
1070 	struct MR_LD_RAID  *raid;
1071 	struct MR_LD_TARGET_SYNC *ld_sync;
1072 	dma_addr_t ci_h = 0;
1073 	u32 size_map_info;
1074 
1075 	cmd = megasas_get_cmd(instance);
1076 
1077 	if (!cmd) {
1078 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
1079 		return -ENOMEM;
1080 	}
1081 
1082 	fusion = instance->ctrl_context;
1083 
1084 	if (!fusion) {
1085 		megasas_return_cmd(instance, cmd);
1086 		return 1;
1087 	}
1088 
1089 	map = fusion->ld_drv_map[instance->map_id & 1];
1090 
1091 	num_lds = le16_to_cpu(map->raidMap.ldCount);
1092 
1093 	dcmd = &cmd->frame->dcmd;
1094 
1095 	size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
1096 
1097 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1098 
1099 	ci = (struct MR_LD_TARGET_SYNC *)
1100 	  fusion->ld_map[(instance->map_id - 1) & 1];
1101 	memset(ci, 0, fusion->max_map_sz);
1102 
1103 	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1104 
1105 	ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
1106 
1107 	for (i = 0; i < num_lds; i++, ld_sync++) {
1108 		raid = MR_LdRaidGet(i, map);
1109 		ld_sync->targetId = MR_GetLDTgtId(i, map);
1110 		ld_sync->seqNum = raid->seqNum;
1111 	}
1112 
1113 	size_map_info = fusion->current_map_sz;
1114 
1115 	dcmd->cmd = MFI_CMD_DCMD;
1116 	dcmd->cmd_status = 0xFF;
1117 	dcmd->sge_count = 1;
1118 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
1119 	dcmd->timeout = 0;
1120 	dcmd->pad_0 = 0;
1121 	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1122 	dcmd->mbox.b[0] = num_lds;
1123 	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1124 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1125 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1126 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1127 
1128 	instance->map_update_cmd = cmd;
1129 
1130 	instance->instancet->issue_dcmd(instance, cmd);
1131 
1132 	return ret;
1133 }
1134 
1135 /*
1136  * meagasas_display_intel_branding - Display branding string
1137  * @instance: per adapter object
1138  *
1139  * Return nothing.
1140  */
1141 static void
1142 megasas_display_intel_branding(struct megasas_instance *instance)
1143 {
1144 	if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1145 		return;
1146 
1147 	switch (instance->pdev->device) {
1148 	case PCI_DEVICE_ID_LSI_INVADER:
1149 		switch (instance->pdev->subsystem_device) {
1150 		case MEGARAID_INTEL_RS3DC080_SSDID:
1151 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1152 				instance->host->host_no,
1153 				MEGARAID_INTEL_RS3DC080_BRANDING);
1154 			break;
1155 		case MEGARAID_INTEL_RS3DC040_SSDID:
1156 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1157 				instance->host->host_no,
1158 				MEGARAID_INTEL_RS3DC040_BRANDING);
1159 			break;
1160 		case MEGARAID_INTEL_RS3SC008_SSDID:
1161 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1162 				instance->host->host_no,
1163 				MEGARAID_INTEL_RS3SC008_BRANDING);
1164 			break;
1165 		case MEGARAID_INTEL_RS3MC044_SSDID:
1166 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1167 				instance->host->host_no,
1168 				MEGARAID_INTEL_RS3MC044_BRANDING);
1169 			break;
1170 		default:
1171 			break;
1172 		}
1173 		break;
1174 	case PCI_DEVICE_ID_LSI_FURY:
1175 		switch (instance->pdev->subsystem_device) {
1176 		case MEGARAID_INTEL_RS3WC080_SSDID:
1177 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1178 				instance->host->host_no,
1179 				MEGARAID_INTEL_RS3WC080_BRANDING);
1180 			break;
1181 		case MEGARAID_INTEL_RS3WC040_SSDID:
1182 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1183 				instance->host->host_no,
1184 				MEGARAID_INTEL_RS3WC040_BRANDING);
1185 			break;
1186 		default:
1187 			break;
1188 		}
1189 		break;
1190 	case PCI_DEVICE_ID_LSI_CUTLASS_52:
1191 	case PCI_DEVICE_ID_LSI_CUTLASS_53:
1192 		switch (instance->pdev->subsystem_device) {
1193 		case MEGARAID_INTEL_RMS3BC160_SSDID:
1194 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1195 				instance->host->host_no,
1196 				MEGARAID_INTEL_RMS3BC160_BRANDING);
1197 			break;
1198 		default:
1199 			break;
1200 		}
1201 		break;
1202 	default:
1203 		break;
1204 	}
1205 }
1206 
1207 /**
1208  * megasas_init_adapter_fusion -	Initializes the FW
1209  * @instance:		Adapter soft state
1210  *
1211  * This is the main function for initializing firmware.
1212  */
1213 u32
1214 megasas_init_adapter_fusion(struct megasas_instance *instance)
1215 {
1216 	struct megasas_register_set __iomem *reg_set;
1217 	struct fusion_context *fusion;
1218 	u32 max_cmd, scratch_pad_2;
1219 	int i = 0, count;
1220 
1221 	fusion = instance->ctrl_context;
1222 
1223 	reg_set = instance->reg_set;
1224 
1225 	megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1226 
1227 	/*
1228 	 * Reduce the max supported cmds by 1. This is to ensure that the
1229 	 * reply_q_sz (1 more than the max cmd that driver may send)
1230 	 * does not exceed max cmds that the FW can support
1231 	 */
1232 	instance->max_fw_cmds = instance->max_fw_cmds-1;
1233 
1234 	/*
1235 	 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1236 	 */
1237 	instance->max_mfi_cmds =
1238 		MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1239 
1240 	max_cmd = instance->max_fw_cmds;
1241 
1242 	fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1243 
1244 	fusion->request_alloc_sz =
1245 		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
1246 	fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1247 		*(fusion->reply_q_depth);
1248 	fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1249 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
1250 		 (max_cmd + 1)); /* Extra 1 for SMID 0 */
1251 
1252 	scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1253 	/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1254 	 * Firmware support extended IO chain frame which is 4 times more than
1255 	 * legacy Firmware.
1256 	 * Legacy Firmware - Frame size is (8 * 128) = 1K
1257 	 * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
1258 	 */
1259 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1260 		instance->max_chain_frame_sz =
1261 			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1262 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1263 	else
1264 		instance->max_chain_frame_sz =
1265 			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1266 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1267 
1268 	if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1269 		dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1270 			instance->max_chain_frame_sz,
1271 			MEGASAS_CHAIN_FRAME_SZ_MIN);
1272 		instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1273 	}
1274 
1275 	fusion->max_sge_in_main_msg =
1276 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1277 			- offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1278 
1279 	fusion->max_sge_in_chain =
1280 		instance->max_chain_frame_sz
1281 			/ sizeof(union MPI2_SGE_IO_UNION);
1282 
1283 	instance->max_num_sge =
1284 		rounddown_pow_of_two(fusion->max_sge_in_main_msg
1285 			+ fusion->max_sge_in_chain - 2);
1286 
1287 	/* Used for pass thru MFI frame (DCMD) */
1288 	fusion->chain_offset_mfi_pthru =
1289 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1290 
1291 	fusion->chain_offset_io_request =
1292 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1293 		 sizeof(union MPI2_SGE_IO_UNION))/16;
1294 
1295 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1296 	for (i = 0 ; i < count; i++)
1297 		fusion->last_reply_idx[i] = 0;
1298 
1299 	/*
1300 	 * For fusion adapters, 3 commands for IOCTL and 5 commands
1301 	 * for driver's internal DCMDs.
1302 	 */
1303 	instance->max_scsi_cmds = instance->max_fw_cmds -
1304 				(MEGASAS_FUSION_INTERNAL_CMDS +
1305 				MEGASAS_FUSION_IOCTL_CMDS);
1306 	sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1307 
1308 	/*
1309 	 * Allocate memory for descriptors
1310 	 * Create a pool of commands
1311 	 */
1312 	if (megasas_alloc_cmds(instance))
1313 		goto fail_alloc_mfi_cmds;
1314 	if (megasas_alloc_cmds_fusion(instance))
1315 		goto fail_alloc_cmds;
1316 
1317 	if (megasas_ioc_init_fusion(instance))
1318 		goto fail_ioc_init;
1319 
1320 	megasas_display_intel_branding(instance);
1321 	if (megasas_get_ctrl_info(instance)) {
1322 		dev_err(&instance->pdev->dev,
1323 			"Could not get controller info. Fail from %s %d\n",
1324 			__func__, __LINE__);
1325 		goto fail_ioc_init;
1326 	}
1327 
1328 	instance->flag_ieee = 1;
1329 	fusion->fast_path_io = 0;
1330 
1331 	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1332 	for (i = 0; i < 2; i++) {
1333 		fusion->ld_map[i] = NULL;
1334 		fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
1335 			fusion->drv_map_pages);
1336 		if (!fusion->ld_drv_map[i]) {
1337 			dev_err(&instance->pdev->dev, "Could not allocate "
1338 				"memory for local map info for %d pages\n",
1339 				fusion->drv_map_pages);
1340 			if (i == 1)
1341 				free_pages((ulong)fusion->ld_drv_map[0],
1342 					fusion->drv_map_pages);
1343 			goto fail_ioc_init;
1344 		}
1345 		memset(fusion->ld_drv_map[i], 0,
1346 			((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1347 	}
1348 
1349 	for (i = 0; i < 2; i++) {
1350 		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1351 						       fusion->max_map_sz,
1352 						       &fusion->ld_map_phys[i],
1353 						       GFP_KERNEL);
1354 		if (!fusion->ld_map[i]) {
1355 			dev_err(&instance->pdev->dev, "Could not allocate memory "
1356 			       "for map info\n");
1357 			goto fail_map_info;
1358 		}
1359 	}
1360 
1361 	if (!megasas_get_map_info(instance))
1362 		megasas_sync_map_info(instance);
1363 
1364 	return 0;
1365 
1366 fail_map_info:
1367 	if (i == 1)
1368 		dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
1369 				  fusion->ld_map[0], fusion->ld_map_phys[0]);
1370 fail_ioc_init:
1371 	megasas_free_cmds_fusion(instance);
1372 fail_alloc_cmds:
1373 	megasas_free_cmds(instance);
1374 fail_alloc_mfi_cmds:
1375 	return 1;
1376 }
1377 
1378 /**
1379  * map_cmd_status -	Maps FW cmd status to OS cmd status
1380  * @cmd :		Pointer to cmd
1381  * @status :		status of cmd returned by FW
1382  * @ext_status :	ext status of cmd returned by FW
1383  */
1384 
1385 void
1386 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1387 {
1388 
1389 	switch (status) {
1390 
1391 	case MFI_STAT_OK:
1392 		cmd->scmd->result = DID_OK << 16;
1393 		break;
1394 
1395 	case MFI_STAT_SCSI_IO_FAILED:
1396 	case MFI_STAT_LD_INIT_IN_PROGRESS:
1397 		cmd->scmd->result = (DID_ERROR << 16) | ext_status;
1398 		break;
1399 
1400 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1401 
1402 		cmd->scmd->result = (DID_OK << 16) | ext_status;
1403 		if (ext_status == SAM_STAT_CHECK_CONDITION) {
1404 			memset(cmd->scmd->sense_buffer, 0,
1405 			       SCSI_SENSE_BUFFERSIZE);
1406 			memcpy(cmd->scmd->sense_buffer, cmd->sense,
1407 			       SCSI_SENSE_BUFFERSIZE);
1408 			cmd->scmd->result |= DRIVER_SENSE << 24;
1409 		}
1410 		break;
1411 
1412 	case MFI_STAT_LD_OFFLINE:
1413 	case MFI_STAT_DEVICE_NOT_FOUND:
1414 		cmd->scmd->result = DID_BAD_TARGET << 16;
1415 		break;
1416 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1417 		cmd->scmd->result = DID_IMM_RETRY << 16;
1418 		break;
1419 	default:
1420 		dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
1421 		cmd->scmd->result = DID_ERROR << 16;
1422 		break;
1423 	}
1424 }
1425 
1426 /**
1427  * megasas_make_sgl_fusion -	Prepares 32-bit SGL
1428  * @instance:		Adapter soft state
1429  * @scp:		SCSI command from the mid-layer
1430  * @sgl_ptr:		SGL to be filled in
1431  * @cmd:		cmd we are working on
1432  *
1433  * If successful, this function returns the number of SG elements.
1434  */
1435 static int
1436 megasas_make_sgl_fusion(struct megasas_instance *instance,
1437 			struct scsi_cmnd *scp,
1438 			struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1439 			struct megasas_cmd_fusion *cmd)
1440 {
1441 	int i, sg_processed, sge_count;
1442 	struct scatterlist *os_sgl;
1443 	struct fusion_context *fusion;
1444 
1445 	fusion = instance->ctrl_context;
1446 
1447 	if (fusion->adapter_type == INVADER_SERIES) {
1448 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1449 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1450 		sgl_ptr_end->Flags = 0;
1451 	}
1452 
1453 	sge_count = scsi_dma_map(scp);
1454 
1455 	BUG_ON(sge_count < 0);
1456 
1457 	if (sge_count > instance->max_num_sge || !sge_count)
1458 		return sge_count;
1459 
1460 	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1461 		sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1462 		sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1463 		sgl_ptr->Flags = 0;
1464 		if (fusion->adapter_type == INVADER_SERIES)
1465 			if (i == sge_count - 1)
1466 				sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1467 		sgl_ptr++;
1468 
1469 		sg_processed = i + 1;
1470 
1471 		if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
1472 		    (sge_count > fusion->max_sge_in_main_msg)) {
1473 
1474 			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1475 			if (fusion->adapter_type == INVADER_SERIES) {
1476 				if ((le16_to_cpu(cmd->io_request->IoFlags) &
1477 					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1478 					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1479 					cmd->io_request->ChainOffset =
1480 						fusion->
1481 						chain_offset_io_request;
1482 				else
1483 					cmd->io_request->ChainOffset = 0;
1484 			} else
1485 				cmd->io_request->ChainOffset =
1486 					fusion->chain_offset_io_request;
1487 
1488 			sg_chain = sgl_ptr;
1489 			/* Prepare chain element */
1490 			sg_chain->NextChainOffset = 0;
1491 			if (fusion->adapter_type == INVADER_SERIES)
1492 				sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1493 			else
1494 				sg_chain->Flags =
1495 					(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1496 					 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1497 			sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1498 			sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1499 
1500 			sgl_ptr =
1501 			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1502 			memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1503 		}
1504 	}
1505 
1506 	return sge_count;
1507 }
1508 
1509 /**
1510  * megasas_set_pd_lba -	Sets PD LBA
1511  * @cdb:		CDB
1512  * @cdb_len:		cdb length
1513  * @start_blk:		Start block of IO
1514  *
1515  * Used to set the PD LBA in CDB for FP IOs
1516  */
1517 void
1518 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1519 		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1520 		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1521 {
1522 	struct MR_LD_RAID *raid;
1523 	u32 ld;
1524 	u64 start_blk = io_info->pdBlock;
1525 	u8 *cdb = io_request->CDB.CDB32;
1526 	u32 num_blocks = io_info->numBlocks;
1527 	u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1528 
1529 	/* Check if T10 PI (DIF) is enabled for this LD */
1530 	ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1531 	raid = MR_LdRaidGet(ld, local_map_ptr);
1532 	if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1533 		memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1534 		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1535 		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
1536 
1537 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1538 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1539 		else
1540 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1541 		cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1542 
1543 		/* LBA */
1544 		cdb[12] = (u8)((start_blk >> 56) & 0xff);
1545 		cdb[13] = (u8)((start_blk >> 48) & 0xff);
1546 		cdb[14] = (u8)((start_blk >> 40) & 0xff);
1547 		cdb[15] = (u8)((start_blk >> 32) & 0xff);
1548 		cdb[16] = (u8)((start_blk >> 24) & 0xff);
1549 		cdb[17] = (u8)((start_blk >> 16) & 0xff);
1550 		cdb[18] = (u8)((start_blk >> 8) & 0xff);
1551 		cdb[19] = (u8)(start_blk & 0xff);
1552 
1553 		/* Logical block reference tag */
1554 		io_request->CDB.EEDP32.PrimaryReferenceTag =
1555 			cpu_to_be32(ref_tag);
1556 		io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1557 		io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1558 
1559 		/* Transfer length */
1560 		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1561 		cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1562 		cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1563 		cdb[31] = (u8)(num_blocks & 0xff);
1564 
1565 		/* set SCSI IO EEDPFlags */
1566 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1567 			io_request->EEDPFlags = cpu_to_le16(
1568 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
1569 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1570 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1571 				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1572 				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1573 		} else {
1574 			io_request->EEDPFlags = cpu_to_le16(
1575 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1576 				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1577 		}
1578 		io_request->Control |= cpu_to_le32((0x4 << 26));
1579 		io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1580 	} else {
1581 		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
1582 		if (((cdb_len == 12) || (cdb_len == 16)) &&
1583 		    (start_blk <= 0xffffffff)) {
1584 			if (cdb_len == 16) {
1585 				opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1586 				flagvals = cdb[1];
1587 				groupnum = cdb[14];
1588 				control = cdb[15];
1589 			} else {
1590 				opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1591 				flagvals = cdb[1];
1592 				groupnum = cdb[10];
1593 				control = cdb[11];
1594 			}
1595 
1596 			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1597 
1598 			cdb[0] = opcode;
1599 			cdb[1] = flagvals;
1600 			cdb[6] = groupnum;
1601 			cdb[9] = control;
1602 
1603 			/* Transfer length */
1604 			cdb[8] = (u8)(num_blocks & 0xff);
1605 			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1606 
1607 			io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1608 			cdb_len = 10;
1609 		} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1610 			/* Convert to 16 byte CDB for large LBA's */
1611 			switch (cdb_len) {
1612 			case 6:
1613 				opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1614 				control = cdb[5];
1615 				break;
1616 			case 10:
1617 				opcode =
1618 					cdb[0] == READ_10 ? READ_16 : WRITE_16;
1619 				flagvals = cdb[1];
1620 				groupnum = cdb[6];
1621 				control = cdb[9];
1622 				break;
1623 			case 12:
1624 				opcode =
1625 					cdb[0] == READ_12 ? READ_16 : WRITE_16;
1626 				flagvals = cdb[1];
1627 				groupnum = cdb[10];
1628 				control = cdb[11];
1629 				break;
1630 			}
1631 
1632 			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1633 
1634 			cdb[0] = opcode;
1635 			cdb[1] = flagvals;
1636 			cdb[14] = groupnum;
1637 			cdb[15] = control;
1638 
1639 			/* Transfer length */
1640 			cdb[13] = (u8)(num_blocks & 0xff);
1641 			cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1642 			cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1643 			cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1644 
1645 			io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
1646 			cdb_len = 16;
1647 		}
1648 
1649 		/* Normal case, just load LBA here */
1650 		switch (cdb_len) {
1651 		case 6:
1652 		{
1653 			u8 val = cdb[1] & 0xE0;
1654 			cdb[3] = (u8)(start_blk & 0xff);
1655 			cdb[2] = (u8)((start_blk >> 8) & 0xff);
1656 			cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
1657 			break;
1658 		}
1659 		case 10:
1660 			cdb[5] = (u8)(start_blk & 0xff);
1661 			cdb[4] = (u8)((start_blk >> 8) & 0xff);
1662 			cdb[3] = (u8)((start_blk >> 16) & 0xff);
1663 			cdb[2] = (u8)((start_blk >> 24) & 0xff);
1664 			break;
1665 		case 12:
1666 			cdb[5]    = (u8)(start_blk & 0xff);
1667 			cdb[4]    = (u8)((start_blk >> 8) & 0xff);
1668 			cdb[3]    = (u8)((start_blk >> 16) & 0xff);
1669 			cdb[2]    = (u8)((start_blk >> 24) & 0xff);
1670 			break;
1671 		case 16:
1672 			cdb[9]    = (u8)(start_blk & 0xff);
1673 			cdb[8]    = (u8)((start_blk >> 8) & 0xff);
1674 			cdb[7]    = (u8)((start_blk >> 16) & 0xff);
1675 			cdb[6]    = (u8)((start_blk >> 24) & 0xff);
1676 			cdb[5]    = (u8)((start_blk >> 32) & 0xff);
1677 			cdb[4]    = (u8)((start_blk >> 40) & 0xff);
1678 			cdb[3]    = (u8)((start_blk >> 48) & 0xff);
1679 			cdb[2]    = (u8)((start_blk >> 56) & 0xff);
1680 			break;
1681 		}
1682 	}
1683 }
1684 
1685 /**
1686  * megasas_build_ldio_fusion -	Prepares IOs to devices
1687  * @instance:		Adapter soft state
1688  * @scp:		SCSI command
1689  * @cmd:		Command to be prepared
1690  *
1691  * Prepares the io_request and chain elements (sg_frame) for IO
1692  * The IO can be for PD (Fast Path) or LD
1693  */
1694 void
1695 megasas_build_ldio_fusion(struct megasas_instance *instance,
1696 			  struct scsi_cmnd *scp,
1697 			  struct megasas_cmd_fusion *cmd)
1698 {
1699 	u8 fp_possible;
1700 	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1701 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1702 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1703 	struct IO_REQUEST_INFO io_info;
1704 	struct fusion_context *fusion;
1705 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1706 	u8 *raidLUN;
1707 
1708 	device_id = MEGASAS_DEV_INDEX(scp);
1709 
1710 	fusion = instance->ctrl_context;
1711 
1712 	io_request = cmd->io_request;
1713 	io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1714 	io_request->RaidContext.status = 0;
1715 	io_request->RaidContext.exStatus = 0;
1716 
1717 	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1718 
1719 	start_lba_lo = 0;
1720 	start_lba_hi = 0;
1721 	fp_possible = 0;
1722 
1723 	/*
1724 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1725 	 */
1726 	if (scp->cmd_len == 6) {
1727 		datalength = (u32) scp->cmnd[4];
1728 		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1729 			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1730 
1731 		start_lba_lo &= 0x1FFFFF;
1732 	}
1733 
1734 	/*
1735 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1736 	 */
1737 	else if (scp->cmd_len == 10) {
1738 		datalength = (u32) scp->cmnd[8] |
1739 			((u32) scp->cmnd[7] << 8);
1740 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1741 			((u32) scp->cmnd[3] << 16) |
1742 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1743 	}
1744 
1745 	/*
1746 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1747 	 */
1748 	else if (scp->cmd_len == 12) {
1749 		datalength = ((u32) scp->cmnd[6] << 24) |
1750 			((u32) scp->cmnd[7] << 16) |
1751 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1752 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1753 			((u32) scp->cmnd[3] << 16) |
1754 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1755 	}
1756 
1757 	/*
1758 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1759 	 */
1760 	else if (scp->cmd_len == 16) {
1761 		datalength = ((u32) scp->cmnd[10] << 24) |
1762 			((u32) scp->cmnd[11] << 16) |
1763 			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1764 		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
1765 			((u32) scp->cmnd[7] << 16) |
1766 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1767 
1768 		start_lba_hi = ((u32) scp->cmnd[2] << 24) |
1769 			((u32) scp->cmnd[3] << 16) |
1770 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1771 	}
1772 
1773 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1774 	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1775 	io_info.numBlocks = datalength;
1776 	io_info.ldTgtId = device_id;
1777 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
1778 
1779 	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1780 		io_info.isRead = 1;
1781 
1782 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1783 
1784 	if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1785 		instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
1786 		io_request->RaidContext.regLockFlags  = 0;
1787 		fp_possible = 0;
1788 	} else {
1789 		if (MR_BuildRaidContext(instance, &io_info,
1790 					&io_request->RaidContext,
1791 					local_map_ptr, &raidLUN))
1792 			fp_possible = io_info.fpOkForIo;
1793 	}
1794 
1795 	/* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
1796 	   id by default, not CPU group id, otherwise all MSI-X queues won't
1797 	   be utilized */
1798 	cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1799 		raw_smp_processor_id() % instance->msix_vectors : 0;
1800 
1801 	if (fp_possible) {
1802 		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1803 				   local_map_ptr, start_lba_lo);
1804 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1805 		cmd->request_desc->SCSIIO.RequestFlags =
1806 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
1807 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1808 		if (fusion->adapter_type == INVADER_SERIES) {
1809 			if (io_request->RaidContext.regLockFlags ==
1810 			    REGION_TYPE_UNUSED)
1811 				cmd->request_desc->SCSIIO.RequestFlags =
1812 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1813 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1814 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1815 			io_request->RaidContext.nseg = 0x1;
1816 			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1817 			io_request->RaidContext.regLockFlags |=
1818 			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1819 			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
1820 		}
1821 		if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1822 		    (io_info.isRead)) {
1823 			io_info.devHandle =
1824 				get_updated_dev_handle(instance,
1825 					&fusion->load_balance_info[device_id],
1826 					&io_info);
1827 			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1828 			cmd->pd_r1_lb = io_info.pd_after_lb;
1829 		} else
1830 			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1831 
1832 		if ((raidLUN[0] == 1) &&
1833 			(local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
1834 			instance->dev_handle = !(instance->dev_handle);
1835 			io_info.devHandle =
1836 				local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
1837 		}
1838 
1839 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1840 		io_request->DevHandle = io_info.devHandle;
1841 		/* populate the LUN field */
1842 		memcpy(io_request->LUN, raidLUN, 8);
1843 	} else {
1844 		io_request->RaidContext.timeoutValue =
1845 			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1846 		cmd->request_desc->SCSIIO.RequestFlags =
1847 			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1848 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1849 		if (fusion->adapter_type == INVADER_SERIES) {
1850 			if (io_info.do_fp_rlbypass ||
1851 				(io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
1852 				cmd->request_desc->SCSIIO.RequestFlags =
1853 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1854 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1855 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1856 			io_request->RaidContext.regLockFlags |=
1857 				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1858 				 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1859 			io_request->RaidContext.nseg = 0x1;
1860 		}
1861 		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1862 		io_request->DevHandle = cpu_to_le16(device_id);
1863 	} /* Not FP */
1864 }
1865 
1866 /**
1867  * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
1868  * @instance:		Adapter soft state
1869  * @scp:		SCSI command
1870  * @cmd:		Command to be prepared
1871  *
1872  * Prepares the io_request frame for non-rw io cmds for vd.
1873  */
1874 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1875 			  struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
1876 {
1877 	u32 device_id;
1878 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1879 	u16 pd_index = 0;
1880 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1881 	struct fusion_context *fusion = instance->ctrl_context;
1882 	u8                          span, physArm;
1883 	__le16                      devHandle;
1884 	u32                         ld, arRef, pd;
1885 	struct MR_LD_RAID                  *raid;
1886 	struct RAID_CONTEXT                *pRAID_Context;
1887 	u8 fp_possible = 1;
1888 
1889 	io_request = cmd->io_request;
1890 	device_id = MEGASAS_DEV_INDEX(scmd);
1891 	pd_index = MEGASAS_PD_INDEX(scmd);
1892 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1893 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1894 	/* get RAID_Context pointer */
1895 	pRAID_Context = &io_request->RaidContext;
1896 	/* Check with FW team */
1897 	pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1898 	pRAID_Context->regLockRowLBA    = 0;
1899 	pRAID_Context->regLockLength    = 0;
1900 
1901 	if (fusion->fast_path_io && (
1902 		device_id < instance->fw_supported_vd_count)) {
1903 
1904 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1905 		if (ld >= instance->fw_supported_vd_count)
1906 			fp_possible = 0;
1907 
1908 		raid = MR_LdRaidGet(ld, local_map_ptr);
1909 		if (!(raid->capability.fpNonRWCapable))
1910 			fp_possible = 0;
1911 	} else
1912 		fp_possible = 0;
1913 
1914 	if (!fp_possible) {
1915 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1916 		io_request->DevHandle = cpu_to_le16(device_id);
1917 		io_request->LUN[1] = scmd->device->lun;
1918 		pRAID_Context->timeoutValue =
1919 			cpu_to_le16 (scmd->request->timeout / HZ);
1920 		cmd->request_desc->SCSIIO.RequestFlags =
1921 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1922 			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1923 	} else {
1924 
1925 		/* set RAID context values */
1926 		pRAID_Context->configSeqNum = raid->seqNum;
1927 		pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1928 		pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1929 
1930 		/* get the DevHandle for the PD (since this is
1931 		   fpNonRWCapable, this is a single disk RAID0) */
1932 		span = physArm = 0;
1933 		arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
1934 		pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
1935 		devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
1936 
1937 		/* build request descriptor */
1938 		cmd->request_desc->SCSIIO.RequestFlags =
1939 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1940 			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1941 		cmd->request_desc->SCSIIO.DevHandle = devHandle;
1942 
1943 		/* populate the LUN field */
1944 		memcpy(io_request->LUN, raid->LUN, 8);
1945 
1946 		/* build the raidScsiIO structure */
1947 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1948 		io_request->DevHandle = devHandle;
1949 	}
1950 }
1951 
1952 /**
1953  * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
1954  * @instance:		Adapter soft state
1955  * @scp:		SCSI command
1956  * @cmd:		Command to be prepared
1957  * @fp_possible:	parameter to detect fast path or firmware path io.
1958  *
1959  * Prepares the io_request frame for rw/non-rw io cmds for syspds
1960  */
1961 static void
1962 megasas_build_syspd_fusion(struct megasas_instance *instance,
1963 	struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1964 {
1965 	u32 device_id;
1966 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1967 	u16 pd_index = 0;
1968 	u16 os_timeout_value;
1969 	u16 timeout_limit;
1970 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1971 	struct RAID_CONTEXT	*pRAID_Context;
1972 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1973 	struct fusion_context *fusion = instance->ctrl_context;
1974 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
1975 
1976 	device_id = MEGASAS_DEV_INDEX(scmd);
1977 	pd_index = MEGASAS_PD_INDEX(scmd);
1978 	os_timeout_value = scmd->request->timeout / HZ;
1979 
1980 	io_request = cmd->io_request;
1981 	/* get RAID_Context pointer */
1982 	pRAID_Context = &io_request->RaidContext;
1983 	pRAID_Context->regLockFlags = 0;
1984 	pRAID_Context->regLockRowLBA = 0;
1985 	pRAID_Context->regLockLength = 0;
1986 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1987 	io_request->LUN[1] = scmd->device->lun;
1988 	pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1989 		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1990 
1991 	/* If FW supports PD sequence number */
1992 	if (instance->use_seqnum_jbod_fp &&
1993 		instance->pd_list[pd_index].driveType == TYPE_DISK) {
1994 		/* TgtId must be incremented by 255 as jbod seq number is index
1995 		 * below raid map
1996 		 */
1997 		pRAID_Context->VirtualDiskTgtId =
1998 			cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
1999 		pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
2000 		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2001 		pRAID_Context->regLockFlags |=
2002 			(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
2003 	} else if (fusion->fast_path_io) {
2004 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2005 		pRAID_Context->configSeqNum = 0;
2006 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2007 		io_request->DevHandle =
2008 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
2009 	} else {
2010 		/* Want to send all IO via FW path */
2011 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2012 		pRAID_Context->configSeqNum = 0;
2013 		io_request->DevHandle = cpu_to_le16(0xFFFF);
2014 	}
2015 
2016 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
2017 	cmd->request_desc->SCSIIO.MSIxIndex =
2018 		instance->msix_vectors ?
2019 		(raw_smp_processor_id() % instance->msix_vectors) : 0;
2020 
2021 
2022 	if (!fp_possible) {
2023 		/* system pd firmware path */
2024 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2025 		cmd->request_desc->SCSIIO.RequestFlags =
2026 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2027 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2028 		pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
2029 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2030 	} else {
2031 		/* system pd Fast Path */
2032 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2033 		timeout_limit = (scmd->device->type == TYPE_DISK) ?
2034 				255 : 0xFFFF;
2035 		pRAID_Context->timeoutValue =
2036 			cpu_to_le16((os_timeout_value > timeout_limit) ?
2037 			timeout_limit : os_timeout_value);
2038 		if (fusion->adapter_type == INVADER_SERIES) {
2039 			pRAID_Context->Type = MPI2_TYPE_CUDA;
2040 			pRAID_Context->nseg = 0x1;
2041 			io_request->IoFlags |=
2042 				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2043 		}
2044 		cmd->request_desc->SCSIIO.RequestFlags =
2045 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
2046 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2047 	}
2048 }
2049 
2050 /**
2051  * megasas_build_io_fusion -	Prepares IOs to devices
2052  * @instance:		Adapter soft state
2053  * @scp:		SCSI command
2054  * @cmd:		Command to be prepared
2055  *
2056  * Invokes helper functions to prepare request frames
2057  * and sets flags appropriate for IO/Non-IO cmd
2058  */
2059 int
2060 megasas_build_io_fusion(struct megasas_instance *instance,
2061 			struct scsi_cmnd *scp,
2062 			struct megasas_cmd_fusion *cmd)
2063 {
2064 	u16 sge_count;
2065 	u8  cmd_type;
2066 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
2067 
2068 	/* Zero out some fields so they don't get reused */
2069 	memset(io_request->LUN, 0x0, 8);
2070 	io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
2071 	io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
2072 	io_request->EEDPFlags = 0;
2073 	io_request->Control = 0;
2074 	io_request->EEDPBlockSize = 0;
2075 	io_request->ChainOffset = 0;
2076 	io_request->RaidContext.RAIDFlags = 0;
2077 	io_request->RaidContext.Type = 0;
2078 	io_request->RaidContext.nseg = 0;
2079 
2080 	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
2081 	/*
2082 	 * Just the CDB length,rest of the Flags are zero
2083 	 * This will be modified for FP in build_ldio_fusion
2084 	 */
2085 	io_request->IoFlags = cpu_to_le16(scp->cmd_len);
2086 
2087 	switch (cmd_type = megasas_cmd_type(scp)) {
2088 	case READ_WRITE_LDIO:
2089 		megasas_build_ldio_fusion(instance, scp, cmd);
2090 		break;
2091 	case NON_READ_WRITE_LDIO:
2092 		megasas_build_ld_nonrw_fusion(instance, scp, cmd);
2093 		break;
2094 	case READ_WRITE_SYSPDIO:
2095 	case NON_READ_WRITE_SYSPDIO:
2096 		if (instance->secure_jbod_support &&
2097 			(cmd_type == NON_READ_WRITE_SYSPDIO))
2098 			megasas_build_syspd_fusion(instance, scp, cmd, 0);
2099 		else
2100 			megasas_build_syspd_fusion(instance, scp, cmd, 1);
2101 		break;
2102 	default:
2103 		break;
2104 	}
2105 
2106 	/*
2107 	 * Construct SGL
2108 	 */
2109 
2110 	sge_count =
2111 		megasas_make_sgl_fusion(instance, scp,
2112 					(struct MPI25_IEEE_SGE_CHAIN64 *)
2113 					&io_request->SGL, cmd);
2114 
2115 	if (sge_count > instance->max_num_sge) {
2116 		dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
2117 		       "max (0x%x) allowed\n", sge_count,
2118 		       instance->max_num_sge);
2119 		return 1;
2120 	}
2121 
2122 	/* numSGE store lower 8 bit of sge_count.
2123 	 * numSGEExt store higher 8 bit of sge_count
2124 	 */
2125 	io_request->RaidContext.numSGE = sge_count;
2126 	io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
2127 
2128 	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
2129 
2130 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
2131 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
2132 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
2133 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
2134 
2135 	io_request->SGLOffset0 =
2136 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
2137 
2138 	io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
2139 	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2140 
2141 	cmd->scmd = scp;
2142 	scp->SCp.ptr = (char *)cmd;
2143 
2144 	return 0;
2145 }
2146 
2147 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2148 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2149 {
2150 	u8 *p;
2151 	struct fusion_context *fusion;
2152 
2153 	if (index >= instance->max_fw_cmds) {
2154 		dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
2155 		       "descriptor for scsi%d\n", index,
2156 			instance->host->host_no);
2157 		return NULL;
2158 	}
2159 	fusion = instance->ctrl_context;
2160 	p = fusion->req_frames_desc
2161 		+sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
2162 
2163 	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2164 }
2165 
2166 /**
2167  * megasas_build_and_issue_cmd_fusion -Main routine for building and
2168  *                                     issuing non IOCTL cmd
2169  * @instance:			Adapter soft state
2170  * @scmd:			pointer to scsi cmd from OS
2171  */
2172 static u32
2173 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2174 				   struct scsi_cmnd *scmd)
2175 {
2176 	struct megasas_cmd_fusion *cmd;
2177 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2178 	u32 index;
2179 	struct fusion_context *fusion;
2180 
2181 	fusion = instance->ctrl_context;
2182 
2183 	if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
2184 		instance->ldio_threshold &&
2185 		(atomic_inc_return(&instance->ldio_outstanding) >
2186 		instance->ldio_threshold)) {
2187 		atomic_dec(&instance->ldio_outstanding);
2188 		return SCSI_MLQUEUE_DEVICE_BUSY;
2189 	}
2190 
2191 	cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2192 
2193 	index = cmd->index;
2194 
2195 	req_desc = megasas_get_request_descriptor(instance, index-1);
2196 	if (!req_desc)
2197 		return SCSI_MLQUEUE_HOST_BUSY;
2198 
2199 	req_desc->Words = 0;
2200 	cmd->request_desc = req_desc;
2201 
2202 	if (megasas_build_io_fusion(instance, scmd, cmd)) {
2203 		megasas_return_cmd_fusion(instance, cmd);
2204 		dev_err(&instance->pdev->dev, "Error building command\n");
2205 		cmd->request_desc = NULL;
2206 		return SCSI_MLQUEUE_HOST_BUSY;
2207 	}
2208 
2209 	req_desc = cmd->request_desc;
2210 	req_desc->SCSIIO.SMID = cpu_to_le16(index);
2211 
2212 	if (cmd->io_request->ChainOffset != 0 &&
2213 	    cmd->io_request->ChainOffset != 0xF)
2214 		dev_err(&instance->pdev->dev, "The chain offset value is not "
2215 		       "correct : %x\n", cmd->io_request->ChainOffset);
2216 
2217 	/*
2218 	 * Issue the command to the FW
2219 	 */
2220 	atomic_inc(&instance->fw_outstanding);
2221 
2222 	megasas_fire_cmd_fusion(instance, req_desc);
2223 
2224 	return 0;
2225 }
2226 
2227 /**
2228  * complete_cmd_fusion -	Completes command
2229  * @instance:			Adapter soft state
2230  * Completes all commands that is in reply descriptor queue
2231  */
2232 int
2233 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2234 {
2235 	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
2236 	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
2237 	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
2238 	struct fusion_context *fusion;
2239 	struct megasas_cmd *cmd_mfi;
2240 	struct megasas_cmd_fusion *cmd_fusion;
2241 	u16 smid, num_completed;
2242 	u8 reply_descript_type;
2243 	u32 status, extStatus, device_id;
2244 	union desc_value d_val;
2245 	struct LD_LOAD_BALANCE_INFO *lbinfo;
2246 	int threshold_reply_count = 0;
2247 	struct scsi_cmnd *scmd_local = NULL;
2248 	struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
2249 	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
2250 
2251 	fusion = instance->ctrl_context;
2252 
2253 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2254 		return IRQ_HANDLED;
2255 
2256 	desc = fusion->reply_frames_desc[MSIxIndex] +
2257 				fusion->last_reply_idx[MSIxIndex];
2258 
2259 	reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2260 
2261 	d_val.word = desc->Words;
2262 
2263 	reply_descript_type = reply_desc->ReplyFlags &
2264 		MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2265 
2266 	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2267 		return IRQ_NONE;
2268 
2269 	num_completed = 0;
2270 
2271 	while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
2272 	       d_val.u.high != cpu_to_le32(UINT_MAX)) {
2273 		smid = le16_to_cpu(reply_desc->SMID);
2274 
2275 		cmd_fusion = fusion->cmd_list[smid - 1];
2276 
2277 		scsi_io_req =
2278 			(struct MPI2_RAID_SCSI_IO_REQUEST *)
2279 		  cmd_fusion->io_request;
2280 
2281 		if (cmd_fusion->scmd)
2282 			cmd_fusion->scmd->SCp.ptr = NULL;
2283 
2284 		scmd_local = cmd_fusion->scmd;
2285 		status = scsi_io_req->RaidContext.status;
2286 		extStatus = scsi_io_req->RaidContext.exStatus;
2287 
2288 		switch (scsi_io_req->Function) {
2289 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
2290 			mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
2291 						cmd_fusion->io_request;
2292 			mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
2293 						&mr_tm_req->TmRequest;
2294 			dev_dbg(&instance->pdev->dev, "TM completion:"
2295 				"type: 0x%x TaskMID: 0x%x\n",
2296 				mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
2297 			complete(&cmd_fusion->done);
2298 			break;
2299 		case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
2300 			/* Update load balancing info */
2301 			device_id = MEGASAS_DEV_INDEX(scmd_local);
2302 			lbinfo = &fusion->load_balance_info[device_id];
2303 			if (cmd_fusion->scmd->SCp.Status &
2304 			    MEGASAS_LOAD_BALANCE_FLAG) {
2305 				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
2306 				cmd_fusion->scmd->SCp.Status &=
2307 					~MEGASAS_LOAD_BALANCE_FLAG;
2308 			}
2309 			if (reply_descript_type ==
2310 			    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2311 				if (megasas_dbg_lvl == 5)
2312 					dev_err(&instance->pdev->dev, "\nFAST Path "
2313 					       "IO Success\n");
2314 			}
2315 			/* Fall thru and complete IO */
2316 		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2317 			/* Map the FW Cmd Status */
2318 			map_cmd_status(cmd_fusion, status, extStatus);
2319 			scsi_io_req->RaidContext.status = 0;
2320 			scsi_io_req->RaidContext.exStatus = 0;
2321 			if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
2322 				atomic_dec(&instance->ldio_outstanding);
2323 			megasas_return_cmd_fusion(instance, cmd_fusion);
2324 			scsi_dma_unmap(scmd_local);
2325 			scmd_local->scsi_done(scmd_local);
2326 			atomic_dec(&instance->fw_outstanding);
2327 
2328 			break;
2329 		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2330 			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2331 
2332 			/* Poll mode. Dummy free.
2333 			 * In case of Interrupt mode, caller has reverse check.
2334 			 */
2335 			if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
2336 				cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
2337 				megasas_return_cmd(instance, cmd_mfi);
2338 			} else
2339 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2340 			break;
2341 		}
2342 
2343 		fusion->last_reply_idx[MSIxIndex]++;
2344 		if (fusion->last_reply_idx[MSIxIndex] >=
2345 		    fusion->reply_q_depth)
2346 			fusion->last_reply_idx[MSIxIndex] = 0;
2347 
2348 		desc->Words = cpu_to_le64(ULLONG_MAX);
2349 		num_completed++;
2350 		threshold_reply_count++;
2351 
2352 		/* Get the next reply descriptor */
2353 		if (!fusion->last_reply_idx[MSIxIndex])
2354 			desc = fusion->reply_frames_desc[MSIxIndex];
2355 		else
2356 			desc++;
2357 
2358 		reply_desc =
2359 		  (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2360 
2361 		d_val.word = desc->Words;
2362 
2363 		reply_descript_type = reply_desc->ReplyFlags &
2364 			MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2365 
2366 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2367 			break;
2368 		/*
2369 		 * Write to reply post host index register after completing threshold
2370 		 * number of reply counts and still there are more replies in reply queue
2371 		 * pending to be completed
2372 		 */
2373 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2374 			if (fusion->adapter_type == INVADER_SERIES)
2375 				writel(((MSIxIndex & 0x7) << 24) |
2376 					fusion->last_reply_idx[MSIxIndex],
2377 					instance->reply_post_host_index_addr[MSIxIndex/8]);
2378 			else
2379 				writel((MSIxIndex << 24) |
2380 					fusion->last_reply_idx[MSIxIndex],
2381 					instance->reply_post_host_index_addr[0]);
2382 			threshold_reply_count = 0;
2383 		}
2384 	}
2385 
2386 	if (!num_completed)
2387 		return IRQ_NONE;
2388 
2389 	wmb();
2390 	if (fusion->adapter_type == INVADER_SERIES)
2391 		writel(((MSIxIndex & 0x7) << 24) |
2392 			fusion->last_reply_idx[MSIxIndex],
2393 			instance->reply_post_host_index_addr[MSIxIndex/8]);
2394 	else
2395 		writel((MSIxIndex << 24) |
2396 			fusion->last_reply_idx[MSIxIndex],
2397 			instance->reply_post_host_index_addr[0]);
2398 	megasas_check_and_restore_queue_depth(instance);
2399 	return IRQ_HANDLED;
2400 }
2401 
2402 /**
2403  * megasas_complete_cmd_dpc_fusion -	Completes command
2404  * @instance:			Adapter soft state
2405  *
2406  * Tasklet to complete cmds
2407  */
2408 void
2409 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
2410 {
2411 	struct megasas_instance *instance =
2412 		(struct megasas_instance *)instance_addr;
2413 	unsigned long flags;
2414 	u32 count, MSIxIndex;
2415 
2416 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2417 
2418 	/* If we have already declared adapter dead, donot complete cmds */
2419 	spin_lock_irqsave(&instance->hba_lock, flags);
2420 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2421 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2422 		return;
2423 	}
2424 	spin_unlock_irqrestore(&instance->hba_lock, flags);
2425 
2426 	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
2427 		complete_cmd_fusion(instance, MSIxIndex);
2428 }
2429 
2430 /**
2431  * megasas_isr_fusion - isr entry point
2432  */
2433 irqreturn_t megasas_isr_fusion(int irq, void *devp)
2434 {
2435 	struct megasas_irq_context *irq_context = devp;
2436 	struct megasas_instance *instance = irq_context->instance;
2437 	u32 mfiStatus, fw_state, dma_state;
2438 
2439 	if (instance->mask_interrupts)
2440 		return IRQ_NONE;
2441 
2442 	if (!instance->msix_vectors) {
2443 		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
2444 		if (!mfiStatus)
2445 			return IRQ_NONE;
2446 	}
2447 
2448 	/* If we are resetting, bail */
2449 	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
2450 		instance->instancet->clear_intr(instance->reg_set);
2451 		return IRQ_HANDLED;
2452 	}
2453 
2454 	if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
2455 		instance->instancet->clear_intr(instance->reg_set);
2456 		/* If we didn't complete any commands, check for FW fault */
2457 		fw_state = instance->instancet->read_fw_status_reg(
2458 			instance->reg_set) & MFI_STATE_MASK;
2459 		dma_state = instance->instancet->read_fw_status_reg
2460 			(instance->reg_set) & MFI_STATE_DMADONE;
2461 		if (instance->crash_dump_drv_support &&
2462 			instance->crash_dump_app_support) {
2463 			/* Start collecting crash, if DMA bit is done */
2464 			if ((fw_state == MFI_STATE_FAULT) && dma_state)
2465 				schedule_work(&instance->crash_init);
2466 			else if (fw_state == MFI_STATE_FAULT)
2467 				schedule_work(&instance->work_init);
2468 		} else if (fw_state == MFI_STATE_FAULT) {
2469 			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2470 			       "for scsi%d\n", instance->host->host_no);
2471 			schedule_work(&instance->work_init);
2472 		}
2473 	}
2474 
2475 	return IRQ_HANDLED;
2476 }
2477 
2478 /**
2479  * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
2480  * @instance:			Adapter soft state
2481  * mfi_cmd:			megasas_cmd pointer
2482  *
2483  */
2484 u8
2485 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2486 			struct megasas_cmd *mfi_cmd)
2487 {
2488 	struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2489 	struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
2490 	struct megasas_cmd_fusion *cmd;
2491 	struct fusion_context *fusion;
2492 	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2493 
2494 	fusion = instance->ctrl_context;
2495 
2496 	cmd = megasas_get_cmd_fusion(instance,
2497 			instance->max_scsi_cmds + mfi_cmd->index);
2498 
2499 	/*  Save the smid. To be used for returning the cmd */
2500 	mfi_cmd->context.smid = cmd->index;
2501 
2502 	/*
2503 	 * For cmds where the flag is set, store the flag and check
2504 	 * on completion. For cmds with this flag, don't call
2505 	 * megasas_complete_cmd
2506 	 */
2507 
2508 	if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2509 		mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
2510 
2511 	io_req = cmd->io_request;
2512 
2513 	if (fusion->adapter_type == INVADER_SERIES) {
2514 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
2515 			(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
2516 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2517 		sgl_ptr_end->Flags = 0;
2518 	}
2519 
2520 	mpi25_ieee_chain =
2521 	  (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2522 
2523 	io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2524 	io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
2525 				       SGL) / 4;
2526 	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
2527 
2528 	mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
2529 
2530 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2531 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2532 
2533 	mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
2534 
2535 	return 0;
2536 }
2537 
2538 /**
2539  * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
2540  * @instance:			Adapter soft state
2541  * @cmd:			mfi cmd to build
2542  *
2543  */
2544 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2545 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2546 {
2547 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2548 	u16 index;
2549 
2550 	if (build_mpt_mfi_pass_thru(instance, cmd)) {
2551 		dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2552 		return NULL;
2553 	}
2554 
2555 	index = cmd->context.smid;
2556 
2557 	req_desc = megasas_get_request_descriptor(instance, index - 1);
2558 
2559 	if (!req_desc)
2560 		return NULL;
2561 
2562 	req_desc->Words = 0;
2563 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2564 					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2565 
2566 	req_desc->SCSIIO.SMID = cpu_to_le16(index);
2567 
2568 	return req_desc;
2569 }
2570 
2571 /**
2572  * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
2573  * @instance:			Adapter soft state
2574  * @cmd:			mfi cmd pointer
2575  *
2576  */
2577 int
2578 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2579 			  struct megasas_cmd *cmd)
2580 {
2581 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2582 
2583 	req_desc = build_mpt_cmd(instance, cmd);
2584 	if (!req_desc) {
2585 		dev_info(&instance->pdev->dev, "Failed from %s %d\n",
2586 					__func__, __LINE__);
2587 		return DCMD_NOT_FIRED;
2588 	}
2589 
2590 	megasas_fire_cmd_fusion(instance, req_desc);
2591 	return DCMD_SUCCESS;
2592 }
2593 
2594 /**
2595  * megasas_release_fusion -	Reverses the FW initialization
2596  * @instance:			Adapter soft state
2597  */
2598 void
2599 megasas_release_fusion(struct megasas_instance *instance)
2600 {
2601 	megasas_free_cmds(instance);
2602 	megasas_free_cmds_fusion(instance);
2603 
2604 	iounmap(instance->reg_set);
2605 
2606 	pci_release_selected_regions(instance->pdev, instance->bar);
2607 }
2608 
2609 /**
2610  * megasas_read_fw_status_reg_fusion - returns the current FW status value
2611  * @regs:			MFI register set
2612  */
2613 static u32
2614 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
2615 {
2616 	return readl(&(regs)->outbound_scratch_pad);
2617 }
2618 
2619 /**
2620  * megasas_alloc_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
2621  * @instance:				Controller's soft instance
2622  * return:			        Number of allocated host crash buffers
2623  */
2624 static void
2625 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2626 {
2627 	unsigned int i;
2628 
2629 	instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
2630 	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
2631 		instance->crash_buf[i] = (void	*)__get_free_pages(GFP_KERNEL,
2632 				instance->crash_buf_pages);
2633 		if (!instance->crash_buf[i]) {
2634 			dev_info(&instance->pdev->dev, "Firmware crash dump "
2635 				"memory allocation failed at index %d\n", i);
2636 			break;
2637 		}
2638 		memset(instance->crash_buf[i], 0,
2639 			((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2640 	}
2641 	instance->drv_buf_alloc = i;
2642 }
2643 
2644 /**
2645  * megasas_free_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
2646  * @instance:				Controller's soft instance
2647  */
2648 void
2649 megasas_free_host_crash_buffer(struct megasas_instance *instance)
2650 {
2651 	unsigned int i
2652 ;
2653 	for (i = 0; i < instance->drv_buf_alloc; i++) {
2654 		if (instance->crash_buf[i])
2655 			free_pages((ulong)instance->crash_buf[i],
2656 					instance->crash_buf_pages);
2657 	}
2658 	instance->drv_buf_index = 0;
2659 	instance->drv_buf_alloc = 0;
2660 	instance->fw_crash_state = UNAVAILABLE;
2661 	instance->fw_crash_buffer_size = 0;
2662 }
2663 
2664 /**
2665  * megasas_adp_reset_fusion -	For controller reset
2666  * @regs:				MFI register set
2667  */
2668 static int
2669 megasas_adp_reset_fusion(struct megasas_instance *instance,
2670 			 struct megasas_register_set __iomem *regs)
2671 {
2672 	u32 host_diag, abs_state, retry;
2673 
2674 	/* Now try to reset the chip */
2675 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2676 	writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2677 	writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2678 	writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2679 	writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2680 	writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2681 	writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2682 
2683 	/* Check that the diag write enable (DRWE) bit is on */
2684 	host_diag = readl(&instance->reg_set->fusion_host_diag);
2685 	retry = 0;
2686 	while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2687 		msleep(100);
2688 		host_diag = readl(&instance->reg_set->fusion_host_diag);
2689 		if (retry++ == 100) {
2690 			dev_warn(&instance->pdev->dev,
2691 				"Host diag unlock failed from %s %d\n",
2692 				__func__, __LINE__);
2693 			break;
2694 		}
2695 	}
2696 	if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2697 		return -1;
2698 
2699 	/* Send chip reset command */
2700 	writel(host_diag | HOST_DIAG_RESET_ADAPTER,
2701 		&instance->reg_set->fusion_host_diag);
2702 	msleep(3000);
2703 
2704 	/* Make sure reset adapter bit is cleared */
2705 	host_diag = readl(&instance->reg_set->fusion_host_diag);
2706 	retry = 0;
2707 	while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2708 		msleep(100);
2709 		host_diag = readl(&instance->reg_set->fusion_host_diag);
2710 		if (retry++ == 1000) {
2711 			dev_warn(&instance->pdev->dev,
2712 				"Diag reset adapter never cleared %s %d\n",
2713 				__func__, __LINE__);
2714 			break;
2715 		}
2716 	}
2717 	if (host_diag & HOST_DIAG_RESET_ADAPTER)
2718 		return -1;
2719 
2720 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
2721 			& MFI_STATE_MASK;
2722 	retry = 0;
2723 
2724 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2725 		msleep(100);
2726 		abs_state = instance->instancet->
2727 			read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2728 	}
2729 	if (abs_state <= MFI_STATE_FW_INIT) {
2730 		dev_warn(&instance->pdev->dev,
2731 			"fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
2732 			abs_state, __func__, __LINE__);
2733 		return -1;
2734 	}
2735 
2736 	return 0;
2737 }
2738 
2739 /**
2740  * megasas_check_reset_fusion -	For controller reset check
2741  * @regs:				MFI register set
2742  */
2743 static int
2744 megasas_check_reset_fusion(struct megasas_instance *instance,
2745 			   struct megasas_register_set __iomem *regs)
2746 {
2747 	return 0;
2748 }
2749 
2750 /* This function waits for outstanding commands on fusion to complete */
2751 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2752 					int reason, int *convert)
2753 {
2754 	int i, outstanding, retval = 0, hb_seconds_missed = 0;
2755 	u32 fw_state;
2756 
2757 	for (i = 0; i < resetwaittime; i++) {
2758 		/* Check if firmware is in fault state */
2759 		fw_state = instance->instancet->read_fw_status_reg(
2760 			instance->reg_set) & MFI_STATE_MASK;
2761 		if (fw_state == MFI_STATE_FAULT) {
2762 			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
2763 			       " will reset adapter scsi%d.\n",
2764 				instance->host->host_no);
2765 			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
2766 			retval = 1;
2767 			goto out;
2768 		}
2769 
2770 		if (reason == MFI_IO_TIMEOUT_OCR) {
2771 			dev_info(&instance->pdev->dev,
2772 				"MFI IO is timed out, initiating OCR\n");
2773 			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
2774 			retval = 1;
2775 			goto out;
2776 		}
2777 
2778 		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
2779 		if (instance->requestorId && !reason) {
2780 			retval = 1;
2781 			goto out;
2782 		}
2783 
2784 		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2785 		if (instance->requestorId && reason) {
2786 			if (instance->hb_host_mem->HB.fwCounter !=
2787 			    instance->hb_host_mem->HB.driverCounter) {
2788 				instance->hb_host_mem->HB.driverCounter =
2789 					instance->hb_host_mem->HB.fwCounter;
2790 				hb_seconds_missed = 0;
2791 			} else {
2792 				hb_seconds_missed++;
2793 				if (hb_seconds_missed ==
2794 				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2795 					dev_warn(&instance->pdev->dev, "SR-IOV:"
2796 					       " Heartbeat never completed "
2797 					       " while polling during I/O "
2798 					       " timeout handling for "
2799 					       "scsi%d.\n",
2800 					       instance->host->host_no);
2801 					       *convert = 1;
2802 					       retval = 1;
2803 					       goto out;
2804 				}
2805 			}
2806 		}
2807 
2808 		outstanding = atomic_read(&instance->fw_outstanding);
2809 		if (!outstanding)
2810 			goto out;
2811 
2812 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2813 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2814 			       "commands to complete for scsi%d\n", i,
2815 			       outstanding, instance->host->host_no);
2816 			megasas_complete_cmd_dpc_fusion(
2817 				(unsigned long)instance);
2818 		}
2819 		msleep(1000);
2820 	}
2821 
2822 	if (atomic_read(&instance->fw_outstanding)) {
2823 		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2824 		       "will reset adapter scsi%d.\n",
2825 		       instance->host->host_no);
2826 		retval = 1;
2827 	}
2828 out:
2829 	return retval;
2830 }
2831 
2832 void  megasas_reset_reply_desc(struct megasas_instance *instance)
2833 {
2834 	int i, j, count;
2835 	struct fusion_context *fusion;
2836 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2837 
2838 	fusion = instance->ctrl_context;
2839 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2840 	for (i = 0 ; i < count ; i++) {
2841 		fusion->last_reply_idx[i] = 0;
2842 		reply_desc = fusion->reply_frames_desc[i];
2843 		for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
2844 			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
2845 	}
2846 }
2847 
2848 /*
2849  * megasas_refire_mgmt_cmd :	Re-fire management commands
2850  * @instance:				Controller's soft instance
2851 */
2852 void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
2853 {
2854 	int j;
2855 	struct megasas_cmd_fusion *cmd_fusion;
2856 	struct fusion_context *fusion;
2857 	struct megasas_cmd *cmd_mfi;
2858 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2859 	u16 smid;
2860 	bool refire_cmd = 0;
2861 
2862 	fusion = instance->ctrl_context;
2863 
2864 	/* Re-fire management commands.
2865 	 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
2866 	 */
2867 	for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
2868 		cmd_fusion = fusion->cmd_list[j];
2869 		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2870 		smid = le16_to_cpu(cmd_mfi->context.smid);
2871 
2872 		if (!smid)
2873 			continue;
2874 		req_desc = megasas_get_request_descriptor
2875 					(instance, smid - 1);
2876 		refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
2877 				cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
2878 				 (cmd_mfi->frame->dcmd.opcode !=
2879 				cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
2880 				&& !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
2881 		if (refire_cmd)
2882 			megasas_fire_cmd_fusion(instance, req_desc);
2883 		else
2884 			megasas_return_cmd(instance, cmd_mfi);
2885 	}
2886 }
2887 
2888 /*
2889  * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
2890  * @instance: per adapter struct
2891  * @channel: the channel assigned by the OS
2892  * @id: the id assigned by the OS
2893  *
2894  * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
2895  */
2896 
2897 static int megasas_track_scsiio(struct megasas_instance *instance,
2898 		int id, int channel)
2899 {
2900 	int i, found = 0;
2901 	struct megasas_cmd_fusion *cmd_fusion;
2902 	struct fusion_context *fusion;
2903 	fusion = instance->ctrl_context;
2904 
2905 	for (i = 0 ; i < instance->max_scsi_cmds; i++) {
2906 		cmd_fusion = fusion->cmd_list[i];
2907 		if (cmd_fusion->scmd &&
2908 			(cmd_fusion->scmd->device->id == id &&
2909 			cmd_fusion->scmd->device->channel == channel)) {
2910 			dev_info(&instance->pdev->dev,
2911 				"SCSI commands pending to target"
2912 				"channel %d id %d \tSMID: 0x%x\n",
2913 				channel, id, cmd_fusion->index);
2914 			scsi_print_command(cmd_fusion->scmd);
2915 			found = 1;
2916 			break;
2917 		}
2918 	}
2919 
2920 	return found ? FAILED : SUCCESS;
2921 }
2922 
2923 /**
2924  * megasas_tm_response_code - translation of device response code
2925  * @ioc: per adapter object
2926  * @mpi_reply: MPI reply returned by firmware
2927  *
2928  * Return nothing.
2929  */
2930 static void
2931 megasas_tm_response_code(struct megasas_instance *instance,
2932 		struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
2933 {
2934 	char *desc;
2935 
2936 	switch (mpi_reply->ResponseCode) {
2937 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2938 		desc = "task management request completed";
2939 		break;
2940 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2941 		desc = "invalid frame";
2942 		break;
2943 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2944 		desc = "task management request not supported";
2945 		break;
2946 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2947 		desc = "task management request failed";
2948 		break;
2949 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2950 		desc = "task management request succeeded";
2951 		break;
2952 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2953 		desc = "invalid lun";
2954 		break;
2955 	case 0xA:
2956 		desc = "overlapped tag attempted";
2957 		break;
2958 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2959 		desc = "task queued, however not sent to target";
2960 		break;
2961 	default:
2962 		desc = "unknown";
2963 		break;
2964 	}
2965 	dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
2966 		mpi_reply->ResponseCode, desc);
2967 	dev_dbg(&instance->pdev->dev,
2968 		"TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
2969 		" 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
2970 		mpi_reply->TerminationCount, mpi_reply->DevHandle,
2971 		mpi_reply->Function, mpi_reply->TaskType,
2972 		mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
2973 }
2974 
2975 /**
2976  * megasas_issue_tm - main routine for sending tm requests
2977  * @instance: per adapter struct
2978  * @device_handle: device handle
2979  * @channel: the channel assigned by the OS
2980  * @id: the id assigned by the OS
2981  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
2982  * @smid_task: smid assigned to the task
2983  * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2984  * Context: user
2985  *
2986  * MegaRaid use MPT interface for Task Magement request.
2987  * A generic API for sending task management requests to firmware.
2988  *
2989  * Return SUCCESS or FAILED.
2990  */
2991 static int
2992 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
2993 	uint channel, uint id, u16 smid_task, u8 type)
2994 {
2995 	struct MR_TASK_MANAGE_REQUEST *mr_request;
2996 	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
2997 	unsigned long timeleft;
2998 	struct megasas_cmd_fusion *cmd_fusion;
2999 	struct megasas_cmd *cmd_mfi;
3000 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3001 	struct fusion_context *fusion;
3002 	struct megasas_cmd_fusion *scsi_lookup;
3003 	int rc;
3004 	struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
3005 
3006 	fusion = instance->ctrl_context;
3007 
3008 	cmd_mfi = megasas_get_cmd(instance);
3009 
3010 	if (!cmd_mfi) {
3011 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3012 			__func__, __LINE__);
3013 		return -ENOMEM;
3014 	}
3015 
3016 	cmd_fusion = megasas_get_cmd_fusion(instance,
3017 			instance->max_scsi_cmds + cmd_mfi->index);
3018 
3019 	/*  Save the smid. To be used for returning the cmd */
3020 	cmd_mfi->context.smid = cmd_fusion->index;
3021 
3022 	req_desc = megasas_get_request_descriptor(instance,
3023 			(cmd_fusion->index - 1));
3024 	if (!req_desc) {
3025 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3026 			__func__, __LINE__);
3027 		megasas_return_cmd(instance, cmd_mfi);
3028 		return -ENOMEM;
3029 	}
3030 
3031 	cmd_fusion->request_desc = req_desc;
3032 	req_desc->Words = 0;
3033 
3034 	scsi_lookup = fusion->cmd_list[smid_task - 1];
3035 
3036 	mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
3037 	memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
3038 	mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
3039 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3040 	mpi_request->DevHandle = cpu_to_le16(device_handle);
3041 	mpi_request->TaskType = type;
3042 	mpi_request->TaskMID = cpu_to_le16(smid_task);
3043 	mpi_request->LUN[1] = 0;
3044 
3045 
3046 	req_desc = cmd_fusion->request_desc;
3047 	req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
3048 	req_desc->HighPriority.RequestFlags =
3049 		(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
3050 		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3051 	req_desc->HighPriority.MSIxIndex =  0;
3052 	req_desc->HighPriority.LMID = 0;
3053 	req_desc->HighPriority.Reserved1 = 0;
3054 
3055 	if (channel < MEGASAS_MAX_PD_CHANNELS)
3056 		mr_request->tmReqFlags.isTMForPD = 1;
3057 	else
3058 		mr_request->tmReqFlags.isTMForLD = 1;
3059 
3060 	init_completion(&cmd_fusion->done);
3061 	megasas_fire_cmd_fusion(instance, req_desc);
3062 
3063 	timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
3064 
3065 	if (!timeleft) {
3066 		dev_err(&instance->pdev->dev,
3067 			"task mgmt type 0x%x timed out\n", type);
3068 		cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
3069 		mutex_unlock(&instance->reset_mutex);
3070 		rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
3071 		mutex_lock(&instance->reset_mutex);
3072 		return rc;
3073 	}
3074 
3075 	mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
3076 	megasas_tm_response_code(instance, mpi_reply);
3077 
3078 	megasas_return_cmd(instance, cmd_mfi);
3079 	rc = SUCCESS;
3080 	switch (type) {
3081 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3082 		if (scsi_lookup->scmd == NULL)
3083 			break;
3084 		else {
3085 			instance->instancet->disable_intr(instance);
3086 			msleep(1000);
3087 			megasas_complete_cmd_dpc_fusion
3088 					((unsigned long)instance);
3089 			instance->instancet->enable_intr(instance);
3090 			if (scsi_lookup->scmd == NULL)
3091 				break;
3092 		}
3093 		rc = FAILED;
3094 		break;
3095 
3096 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3097 		if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
3098 			break;
3099 		instance->instancet->disable_intr(instance);
3100 		msleep(1000);
3101 		megasas_complete_cmd_dpc_fusion
3102 				((unsigned long)instance);
3103 		rc = megasas_track_scsiio(instance, id, channel);
3104 		instance->instancet->enable_intr(instance);
3105 
3106 		break;
3107 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3108 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3109 		break;
3110 	default:
3111 		rc = FAILED;
3112 		break;
3113 	}
3114 
3115 	return rc;
3116 
3117 }
3118 
3119 /*
3120  * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
3121  * @instance: per adapter struct
3122  *
3123  * Return Non Zero index, if SMID found in outstanding commands
3124  */
3125 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
3126 {
3127 	int i, ret = 0;
3128 	struct megasas_instance *instance;
3129 	struct megasas_cmd_fusion *cmd_fusion;
3130 	struct fusion_context *fusion;
3131 
3132 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3133 
3134 	fusion = instance->ctrl_context;
3135 
3136 	for (i = 0; i < instance->max_scsi_cmds; i++) {
3137 		cmd_fusion = fusion->cmd_list[i];
3138 		if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
3139 			scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
3140 				" SMID: %d\n", cmd_fusion->index);
3141 			ret = cmd_fusion->index;
3142 			break;
3143 		}
3144 	}
3145 
3146 	return ret;
3147 }
3148 
3149 /*
3150 * megasas_get_tm_devhandle - Get devhandle for TM request
3151 * @sdev-		     OS provided scsi device
3152 *
3153 * Returns-		     devhandle/targetID of SCSI device
3154 */
3155 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
3156 {
3157 	u16 pd_index = 0;
3158 	u32 device_id;
3159 	struct megasas_instance *instance;
3160 	struct fusion_context *fusion;
3161 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3162 	u16 devhandle = (u16)ULONG_MAX;
3163 
3164 	instance = (struct megasas_instance *)sdev->host->hostdata;
3165 	fusion = instance->ctrl_context;
3166 
3167 	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
3168 		if (instance->use_seqnum_jbod_fp) {
3169 				pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
3170 						sdev->id;
3171 				pd_sync = (void *)fusion->pd_seq_sync
3172 						[(instance->pd_seq_map_id - 1) & 1];
3173 				devhandle = pd_sync->seq[pd_index].devHandle;
3174 		} else
3175 			sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
3176 				" without JBOD MAP support from %s %d\n", __func__, __LINE__);
3177 	} else {
3178 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
3179 				+ sdev->id;
3180 		devhandle = device_id;
3181 	}
3182 
3183 	return devhandle;
3184 }
3185 
3186 /*
3187  * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
3188  * @scmd : pointer to scsi command object
3189  *
3190  * Return SUCCESS, if command aborted else FAILED
3191  */
3192 
3193 int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
3194 {
3195 	struct megasas_instance *instance;
3196 	u16 smid, devhandle;
3197 	struct fusion_context *fusion;
3198 	int ret;
3199 	struct MR_PRIV_DEVICE *mr_device_priv_data;
3200 	mr_device_priv_data = scmd->device->hostdata;
3201 
3202 
3203 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3204 	fusion = instance->ctrl_context;
3205 
3206 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3207 		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3208 		"SCSI host:%d\n", instance->host->host_no);
3209 		ret = FAILED;
3210 		return ret;
3211 	}
3212 
3213 	if (!mr_device_priv_data) {
3214 		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
3215 			"scmd(%p)\n", scmd);
3216 		scmd->result = DID_NO_CONNECT << 16;
3217 		ret = SUCCESS;
3218 		goto out;
3219 	}
3220 
3221 
3222 	if (!mr_device_priv_data->is_tm_capable) {
3223 		ret = FAILED;
3224 		goto out;
3225 	}
3226 
3227 	mutex_lock(&instance->reset_mutex);
3228 
3229 	smid = megasas_fusion_smid_lookup(scmd);
3230 
3231 	if (!smid) {
3232 		ret = SUCCESS;
3233 		scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
3234 			" issued is not found in oustanding commands\n");
3235 		mutex_unlock(&instance->reset_mutex);
3236 		goto out;
3237 	}
3238 
3239 	devhandle = megasas_get_tm_devhandle(scmd->device);
3240 
3241 	if (devhandle == (u16)ULONG_MAX) {
3242 		ret = SUCCESS;
3243 		sdev_printk(KERN_INFO, scmd->device,
3244 			"task abort issued for invalid devhandle\n");
3245 		mutex_unlock(&instance->reset_mutex);
3246 		goto out;
3247 	}
3248 	sdev_printk(KERN_INFO, scmd->device,
3249 		"attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
3250 		scmd, devhandle);
3251 
3252 	mr_device_priv_data->tm_busy = 1;
3253 	ret = megasas_issue_tm(instance, devhandle,
3254 			scmd->device->channel, scmd->device->id, smid,
3255 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
3256 	mr_device_priv_data->tm_busy = 0;
3257 
3258 	mutex_unlock(&instance->reset_mutex);
3259 out:
3260 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
3261 			((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3262 
3263 	return ret;
3264 }
3265 
3266 /*
3267  * megasas_reset_target_fusion : target reset function for fusion adapters
3268  * scmd: SCSI command pointer
3269  *
3270  * Returns SUCCESS if all commands associated with target aborted else FAILED
3271  */
3272 
3273 int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
3274 {
3275 
3276 	struct megasas_instance *instance;
3277 	int ret = FAILED;
3278 	u16 devhandle;
3279 	struct fusion_context *fusion;
3280 	struct MR_PRIV_DEVICE *mr_device_priv_data;
3281 	mr_device_priv_data = scmd->device->hostdata;
3282 
3283 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3284 	fusion = instance->ctrl_context;
3285 
3286 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3287 		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3288 		"SCSI host:%d\n", instance->host->host_no);
3289 		ret = FAILED;
3290 		return ret;
3291 	}
3292 
3293 	if (!mr_device_priv_data) {
3294 		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
3295 			"scmd(%p)\n", scmd);
3296 		scmd->result = DID_NO_CONNECT << 16;
3297 		ret = SUCCESS;
3298 		goto out;
3299 	}
3300 
3301 
3302 	if (!mr_device_priv_data->is_tm_capable) {
3303 		ret = FAILED;
3304 		goto out;
3305 	}
3306 
3307 	mutex_lock(&instance->reset_mutex);
3308 	devhandle = megasas_get_tm_devhandle(scmd->device);
3309 
3310 	if (devhandle == (u16)ULONG_MAX) {
3311 		ret = SUCCESS;
3312 		sdev_printk(KERN_INFO, scmd->device,
3313 			"target reset issued for invalid devhandle\n");
3314 		mutex_unlock(&instance->reset_mutex);
3315 		goto out;
3316 	}
3317 
3318 	sdev_printk(KERN_INFO, scmd->device,
3319 		"attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
3320 		scmd, devhandle);
3321 	mr_device_priv_data->tm_busy = 1;
3322 	ret = megasas_issue_tm(instance, devhandle,
3323 			scmd->device->channel, scmd->device->id, 0,
3324 			MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
3325 	mr_device_priv_data->tm_busy = 0;
3326 	mutex_unlock(&instance->reset_mutex);
3327 out:
3328 	scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
3329 		(ret == SUCCESS) ? "SUCCESS" : "FAILED");
3330 
3331 	return ret;
3332 }
3333 
3334 /*SRIOV get other instance in cluster if any*/
3335 struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
3336 {
3337 	int i;
3338 
3339 	for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
3340 		if (megasas_mgmt_info.instance[i] &&
3341 			(megasas_mgmt_info.instance[i] != instance) &&
3342 			 megasas_mgmt_info.instance[i]->requestorId &&
3343 			 megasas_mgmt_info.instance[i]->peerIsPresent &&
3344 			(memcmp((megasas_mgmt_info.instance[i]->clusterId),
3345 			instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
3346 			return megasas_mgmt_info.instance[i];
3347 	}
3348 	return NULL;
3349 }
3350 
3351 /* Check for a second path that is currently UP */
3352 int megasas_check_mpio_paths(struct megasas_instance *instance,
3353 	struct scsi_cmnd *scmd)
3354 {
3355 	struct megasas_instance *peer_instance = NULL;
3356 	int retval = (DID_RESET << 16);
3357 
3358 	if (instance->peerIsPresent) {
3359 		peer_instance = megasas_get_peer_instance(instance);
3360 		if ((peer_instance) &&
3361 			(atomic_read(&peer_instance->adprecovery) ==
3362 			MEGASAS_HBA_OPERATIONAL))
3363 			retval = (DID_NO_CONNECT << 16);
3364 	}
3365 	return retval;
3366 }
3367 
3368 /* Core fusion reset function */
3369 int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
3370 {
3371 	int retval = SUCCESS, i, convert = 0;
3372 	struct megasas_instance *instance;
3373 	struct megasas_cmd_fusion *cmd_fusion;
3374 	struct fusion_context *fusion;
3375 	u32 abs_state, status_reg, reset_adapter;
3376 	u32 io_timeout_in_crash_mode = 0;
3377 	struct scsi_cmnd *scmd_local = NULL;
3378 	struct scsi_device *sdev;
3379 
3380 	instance = (struct megasas_instance *)shost->hostdata;
3381 	fusion = instance->ctrl_context;
3382 
3383 	mutex_lock(&instance->reset_mutex);
3384 
3385 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
3386 		dev_warn(&instance->pdev->dev, "Hardware critical error, "
3387 		       "returning FAILED for scsi%d.\n",
3388 			instance->host->host_no);
3389 		mutex_unlock(&instance->reset_mutex);
3390 		return FAILED;
3391 	}
3392 	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3393 	abs_state = status_reg & MFI_STATE_MASK;
3394 
3395 	/* IO timeout detected, forcibly put FW in FAULT state */
3396 	if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
3397 		instance->crash_dump_app_support && reason) {
3398 		dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
3399 			"forcibly FAULT Firmware\n");
3400 		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3401 		status_reg = readl(&instance->reg_set->doorbell);
3402 		writel(status_reg | MFI_STATE_FORCE_OCR,
3403 			&instance->reg_set->doorbell);
3404 		readl(&instance->reg_set->doorbell);
3405 		mutex_unlock(&instance->reset_mutex);
3406 		do {
3407 			ssleep(3);
3408 			io_timeout_in_crash_mode++;
3409 			dev_dbg(&instance->pdev->dev, "waiting for [%d] "
3410 				"seconds for crash dump collection and OCR "
3411 				"to be done\n", (io_timeout_in_crash_mode * 3));
3412 		} while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
3413 			(io_timeout_in_crash_mode < 80));
3414 
3415 		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
3416 			dev_info(&instance->pdev->dev, "OCR done for IO "
3417 				"timeout case\n");
3418 			retval = SUCCESS;
3419 		} else {
3420 			dev_info(&instance->pdev->dev, "Controller is not "
3421 				"operational after 240 seconds wait for IO "
3422 				"timeout case in FW crash dump mode\n do "
3423 				"OCR/kill adapter\n");
3424 			retval = megasas_reset_fusion(shost, 0);
3425 		}
3426 		return retval;
3427 	}
3428 
3429 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
3430 		del_timer_sync(&instance->sriov_heartbeat_timer);
3431 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3432 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
3433 	instance->instancet->disable_intr(instance);
3434 	msleep(1000);
3435 
3436 	/* First try waiting for commands to complete */
3437 	if (megasas_wait_for_outstanding_fusion(instance, reason,
3438 						&convert)) {
3439 		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3440 		dev_warn(&instance->pdev->dev, "resetting fusion "
3441 		       "adapter scsi%d.\n", instance->host->host_no);
3442 		if (convert)
3443 			reason = 0;
3444 
3445 		/* Now return commands back to the OS */
3446 		for (i = 0 ; i < instance->max_scsi_cmds; i++) {
3447 			cmd_fusion = fusion->cmd_list[i];
3448 			scmd_local = cmd_fusion->scmd;
3449 			if (cmd_fusion->scmd) {
3450 				scmd_local->result =
3451 					megasas_check_mpio_paths(instance,
3452 							scmd_local);
3453 				if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3454 					atomic_dec(&instance->ldio_outstanding);
3455 				megasas_return_cmd_fusion(instance, cmd_fusion);
3456 				scsi_dma_unmap(scmd_local);
3457 				scmd_local->scsi_done(scmd_local);
3458 				atomic_dec(&instance->fw_outstanding);
3459 			}
3460 		}
3461 
3462 		status_reg = instance->instancet->read_fw_status_reg(
3463 			instance->reg_set);
3464 		abs_state = status_reg & MFI_STATE_MASK;
3465 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3466 		if (instance->disableOnlineCtrlReset ||
3467 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3468 			/* Reset not supported, kill adapter */
3469 			dev_warn(&instance->pdev->dev, "Reset not supported"
3470 			       ", killing adapter scsi%d.\n",
3471 				instance->host->host_no);
3472 			megaraid_sas_kill_hba(instance);
3473 			instance->skip_heartbeat_timer_del = 1;
3474 			retval = FAILED;
3475 			goto out;
3476 		}
3477 
3478 		/* Let SR-IOV VF & PF sync up if there was a HB failure */
3479 		if (instance->requestorId && !reason) {
3480 			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
3481 			goto transition_to_ready;
3482 		}
3483 
3484 		/* Now try to reset the chip */
3485 		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
3486 
3487 			if (instance->instancet->adp_reset
3488 				(instance, instance->reg_set))
3489 				continue;
3490 transition_to_ready:
3491 			/* Wait for FW to become ready */
3492 			if (megasas_transition_to_ready(instance, 1)) {
3493 				dev_warn(&instance->pdev->dev,
3494 					"Failed to transition controller to ready for "
3495 					"scsi%d.\n", instance->host->host_no);
3496 				if (instance->requestorId && !reason)
3497 					goto fail_kill_adapter;
3498 				else
3499 					continue;
3500 			}
3501 			megasas_reset_reply_desc(instance);
3502 			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
3503 
3504 			if (megasas_ioc_init_fusion(instance)) {
3505 				dev_warn(&instance->pdev->dev,
3506 				       "megasas_ioc_init_fusion() failed! for "
3507 				       "scsi%d\n", instance->host->host_no);
3508 				if (instance->requestorId && !reason)
3509 					goto fail_kill_adapter;
3510 				else
3511 					continue;
3512 			}
3513 
3514 			megasas_refire_mgmt_cmd(instance);
3515 
3516 			if (megasas_get_ctrl_info(instance)) {
3517 				dev_info(&instance->pdev->dev,
3518 					"Failed from %s %d\n",
3519 					__func__, __LINE__);
3520 				megaraid_sas_kill_hba(instance);
3521 				retval = FAILED;
3522 			}
3523 			/* Reset load balance info */
3524 			memset(fusion->load_balance_info, 0,
3525 			       sizeof(struct LD_LOAD_BALANCE_INFO)
3526 			       *MAX_LOGICAL_DRIVES_EXT);
3527 
3528 			if (!megasas_get_map_info(instance))
3529 				megasas_sync_map_info(instance);
3530 
3531 			megasas_setup_jbod_map(instance);
3532 
3533 			shost_for_each_device(sdev, shost)
3534 				megasas_update_sdev_properties(sdev);
3535 
3536 			clear_bit(MEGASAS_FUSION_IN_RESET,
3537 				  &instance->reset_flags);
3538 			instance->instancet->enable_intr(instance);
3539 			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3540 
3541 			/* Restart SR-IOV heartbeat */
3542 			if (instance->requestorId) {
3543 				if (!megasas_sriov_start_heartbeat(instance, 0))
3544 					megasas_start_timer(instance,
3545 							    &instance->sriov_heartbeat_timer,
3546 							    megasas_sriov_heartbeat_handler,
3547 							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
3548 				else
3549 					instance->skip_heartbeat_timer_del = 1;
3550 			}
3551 
3552 			/* Adapter reset completed successfully */
3553 			dev_warn(&instance->pdev->dev, "Reset "
3554 			       "successful for scsi%d.\n",
3555 				instance->host->host_no);
3556 
3557 			if (instance->crash_dump_drv_support &&
3558 				instance->crash_dump_app_support)
3559 				megasas_set_crash_dump_params(instance,
3560 					MR_CRASH_BUF_TURN_ON);
3561 			else
3562 				megasas_set_crash_dump_params(instance,
3563 					MR_CRASH_BUF_TURN_OFF);
3564 
3565 			retval = SUCCESS;
3566 			goto out;
3567 		}
3568 fail_kill_adapter:
3569 		/* Reset failed, kill the adapter */
3570 		dev_warn(&instance->pdev->dev, "Reset failed, killing "
3571 		       "adapter scsi%d.\n", instance->host->host_no);
3572 		megaraid_sas_kill_hba(instance);
3573 		instance->skip_heartbeat_timer_del = 1;
3574 		retval = FAILED;
3575 	} else {
3576 		/* For VF: Restart HB timer if we didn't OCR */
3577 		if (instance->requestorId) {
3578 			megasas_start_timer(instance,
3579 					    &instance->sriov_heartbeat_timer,
3580 					    megasas_sriov_heartbeat_handler,
3581 					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
3582 		}
3583 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3584 		instance->instancet->enable_intr(instance);
3585 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3586 	}
3587 out:
3588 	clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3589 	mutex_unlock(&instance->reset_mutex);
3590 	return retval;
3591 }
3592 
3593 /* Fusion Crash dump collection work queue */
3594 void  megasas_fusion_crash_dump_wq(struct work_struct *work)
3595 {
3596 	struct megasas_instance *instance =
3597 		container_of(work, struct megasas_instance, crash_init);
3598 	u32 status_reg;
3599 	u8 partial_copy = 0;
3600 
3601 
3602 	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3603 
3604 	/*
3605 	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
3606 	 * to host crash buffers
3607 	 */
3608 	if (instance->drv_buf_index == 0) {
3609 		/* Buffer is already allocated for old Crash dump.
3610 		 * Do OCR and do not wait for crash dump collection
3611 		 */
3612 		if (instance->drv_buf_alloc) {
3613 			dev_info(&instance->pdev->dev, "earlier crash dump is "
3614 				"not yet copied by application, ignoring this "
3615 				"crash dump and initiating OCR\n");
3616 			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3617 			writel(status_reg,
3618 				&instance->reg_set->outbound_scratch_pad);
3619 			readl(&instance->reg_set->outbound_scratch_pad);
3620 			return;
3621 		}
3622 		megasas_alloc_host_crash_buffer(instance);
3623 		dev_info(&instance->pdev->dev, "Number of host crash buffers "
3624 			"allocated: %d\n", instance->drv_buf_alloc);
3625 	}
3626 
3627 	/*
3628 	 * Driver has allocated max buffers, which can be allocated
3629 	 * and FW has more crash dump data, then driver will
3630 	 * ignore the data.
3631 	 */
3632 	if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
3633 		dev_info(&instance->pdev->dev, "Driver is done copying "
3634 			"the buffer: %d\n", instance->drv_buf_alloc);
3635 		status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3636 		partial_copy = 1;
3637 	} else {
3638 		memcpy(instance->crash_buf[instance->drv_buf_index],
3639 			instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
3640 		instance->drv_buf_index++;
3641 		status_reg &= ~MFI_STATE_DMADONE;
3642 	}
3643 
3644 	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
3645 		dev_info(&instance->pdev->dev, "Crash Dump is available,number "
3646 			"of copied buffers: %d\n", instance->drv_buf_index);
3647 		instance->fw_crash_buffer_size =  instance->drv_buf_index;
3648 		instance->fw_crash_state = AVAILABLE;
3649 		instance->drv_buf_index = 0;
3650 		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3651 		readl(&instance->reg_set->outbound_scratch_pad);
3652 		if (!partial_copy)
3653 			megasas_reset_fusion(instance->host, 0);
3654 	} else {
3655 		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3656 		readl(&instance->reg_set->outbound_scratch_pad);
3657 	}
3658 }
3659 
3660 
3661 /* Fusion OCR work queue */
3662 void megasas_fusion_ocr_wq(struct work_struct *work)
3663 {
3664 	struct megasas_instance *instance =
3665 		container_of(work, struct megasas_instance, work_init);
3666 
3667 	megasas_reset_fusion(instance->host, 0);
3668 }
3669 
3670 struct megasas_instance_template megasas_instance_template_fusion = {
3671 	.enable_intr = megasas_enable_intr_fusion,
3672 	.disable_intr = megasas_disable_intr_fusion,
3673 	.clear_intr = megasas_clear_intr_fusion,
3674 	.read_fw_status_reg = megasas_read_fw_status_reg_fusion,
3675 	.adp_reset = megasas_adp_reset_fusion,
3676 	.check_reset = megasas_check_reset_fusion,
3677 	.service_isr = megasas_isr_fusion,
3678 	.tasklet = megasas_complete_cmd_dpc_fusion,
3679 	.init_adapter = megasas_init_adapter_fusion,
3680 	.build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
3681 	.issue_dcmd = megasas_issue_dcmd_fusion,
3682 };
3683