xref: /linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision d4a379a52c3c2dc44366c4f6722c063a7d0de179)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 	struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 	struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24 static bool threaded_isr_poll = true;
25 module_param(threaded_isr_poll, bool, 0444);
26 MODULE_PARM_DESC(threaded_isr_poll,
27 			"Enablement of IRQ polling thread (default=true)");
28 
29 #if defined(writeq) && defined(CONFIG_64BIT)
30 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
31 	spinlock_t *write_queue_lock)
32 {
33 	writeq(b, addr);
34 }
35 #else
36 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
37 	spinlock_t *write_queue_lock)
38 {
39 	__u64 data_out = b;
40 	unsigned long flags;
41 
42 	spin_lock_irqsave(write_queue_lock, flags);
43 	writel((u32)(data_out), addr);
44 	writel((u32)(data_out >> 32), (addr + 4));
45 	spin_unlock_irqrestore(write_queue_lock, flags);
46 }
47 #endif
48 
49 static inline bool
50 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
51 {
52 	u16 pi, ci, max_entries;
53 	bool is_qfull = false;
54 
55 	pi = op_req_q->pi;
56 	ci = READ_ONCE(op_req_q->ci);
57 	max_entries = op_req_q->num_requests;
58 
59 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
60 		is_qfull = true;
61 
62 	return is_qfull;
63 }
64 
65 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
66 {
67 	u16 i, max_vectors;
68 
69 	max_vectors = mrioc->intr_info_count;
70 
71 	for (i = 0; i < max_vectors; i++)
72 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
73 }
74 
75 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
76 {
77 	mrioc->intr_enabled = 0;
78 	mpi3mr_sync_irqs(mrioc);
79 }
80 
81 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
82 {
83 	mrioc->intr_enabled = 1;
84 }
85 
86 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
87 {
88 	u16 i;
89 
90 	mpi3mr_ioc_disable_intr(mrioc);
91 
92 	if (!mrioc->intr_info)
93 		return;
94 
95 	for (i = 0; i < mrioc->intr_info_count; i++)
96 		free_irq(pci_irq_vector(mrioc->pdev, i),
97 		    (mrioc->intr_info + i));
98 
99 	kfree(mrioc->intr_info);
100 	mrioc->intr_info = NULL;
101 	mrioc->intr_info_count = 0;
102 	mrioc->is_intr_info_set = false;
103 	pci_free_irq_vectors(mrioc->pdev);
104 }
105 
106 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
107 	dma_addr_t dma_addr)
108 {
109 	struct mpi3_sge_common *sgel = paddr;
110 
111 	sgel->flags = flags;
112 	sgel->length = cpu_to_le32(length);
113 	sgel->address = cpu_to_le64(dma_addr);
114 }
115 
116 void mpi3mr_build_zero_len_sge(void *paddr)
117 {
118 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
119 
120 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
121 }
122 
123 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
124 	dma_addr_t phys_addr)
125 {
126 	if (!phys_addr)
127 		return NULL;
128 
129 	if ((phys_addr < mrioc->reply_buf_dma) ||
130 	    (phys_addr > mrioc->reply_buf_dma_max_address))
131 		return NULL;
132 
133 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
134 }
135 
136 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
137 	dma_addr_t phys_addr)
138 {
139 	if (!phys_addr)
140 		return NULL;
141 
142 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
143 }
144 
145 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
146 	u64 reply_dma)
147 {
148 	u32 old_idx = 0;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
152 	old_idx  =  mrioc->reply_free_queue_host_index;
153 	mrioc->reply_free_queue_host_index = (
154 	    (mrioc->reply_free_queue_host_index ==
155 	    (mrioc->reply_free_qsz - 1)) ? 0 :
156 	    (mrioc->reply_free_queue_host_index + 1));
157 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
158 	writel(mrioc->reply_free_queue_host_index,
159 	    &mrioc->sysif_regs->reply_free_host_index);
160 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
161 }
162 
163 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
164 	u64 sense_buf_dma)
165 {
166 	u32 old_idx = 0;
167 	unsigned long flags;
168 
169 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
170 	old_idx  =  mrioc->sbq_host_index;
171 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
172 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
173 	    (mrioc->sbq_host_index + 1));
174 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
175 	writel(mrioc->sbq_host_index,
176 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
177 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
178 }
179 
180 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
181 	struct mpi3_event_notification_reply *event_reply)
182 {
183 	char *desc = NULL;
184 	u16 event;
185 
186 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
187 		return;
188 
189 	event = event_reply->event;
190 
191 	switch (event) {
192 	case MPI3_EVENT_LOG_DATA:
193 		desc = "Log Data";
194 		break;
195 	case MPI3_EVENT_CHANGE:
196 		desc = "Event Change";
197 		break;
198 	case MPI3_EVENT_GPIO_INTERRUPT:
199 		desc = "GPIO Interrupt";
200 		break;
201 	case MPI3_EVENT_CABLE_MGMT:
202 		desc = "Cable Management";
203 		break;
204 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
205 		desc = "Energy Pack Change";
206 		break;
207 	case MPI3_EVENT_DEVICE_ADDED:
208 	{
209 		struct mpi3_device_page0 *event_data =
210 		    (struct mpi3_device_page0 *)event_reply->event_data;
211 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
212 		    event_data->dev_handle, event_data->device_form);
213 		return;
214 	}
215 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
216 	{
217 		struct mpi3_device_page0 *event_data =
218 		    (struct mpi3_device_page0 *)event_reply->event_data;
219 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
220 		    event_data->dev_handle, event_data->device_form);
221 		return;
222 	}
223 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
224 	{
225 		struct mpi3_event_data_device_status_change *event_data =
226 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
227 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
228 		    event_data->dev_handle, event_data->reason_code);
229 		return;
230 	}
231 	case MPI3_EVENT_SAS_DISCOVERY:
232 	{
233 		struct mpi3_event_data_sas_discovery *event_data =
234 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
235 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
236 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
237 		    "start" : "stop",
238 		    le32_to_cpu(event_data->discovery_status));
239 		return;
240 	}
241 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
242 		desc = "SAS Broadcast Primitive";
243 		break;
244 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
245 		desc = "SAS Notify Primitive";
246 		break;
247 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
248 		desc = "SAS Init Device Status Change";
249 		break;
250 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
251 		desc = "SAS Init Table Overflow";
252 		break;
253 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
254 		desc = "SAS Topology Change List";
255 		break;
256 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
257 		desc = "Enclosure Device Status Change";
258 		break;
259 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
260 		desc = "Enclosure Added";
261 		break;
262 	case MPI3_EVENT_HARD_RESET_RECEIVED:
263 		desc = "Hard Reset Received";
264 		break;
265 	case MPI3_EVENT_SAS_PHY_COUNTER:
266 		desc = "SAS PHY Counter";
267 		break;
268 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
269 		desc = "SAS Device Discovery Error";
270 		break;
271 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
272 		desc = "PCIE Topology Change List";
273 		break;
274 	case MPI3_EVENT_PCIE_ENUMERATION:
275 	{
276 		struct mpi3_event_data_pcie_enumeration *event_data =
277 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
278 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
279 		    (event_data->reason_code ==
280 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
281 		if (event_data->enumeration_status)
282 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
283 			    le32_to_cpu(event_data->enumeration_status));
284 		return;
285 	}
286 	case MPI3_EVENT_PREPARE_FOR_RESET:
287 		desc = "Prepare For Reset";
288 		break;
289 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
290 		desc = "Diagnostic Buffer Status Change";
291 		break;
292 	}
293 
294 	if (!desc)
295 		return;
296 
297 	ioc_info(mrioc, "%s\n", desc);
298 }
299 
300 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
301 	struct mpi3_default_reply *def_reply)
302 {
303 	struct mpi3_event_notification_reply *event_reply =
304 	    (struct mpi3_event_notification_reply *)def_reply;
305 
306 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
307 	mpi3mr_print_event_data(mrioc, event_reply);
308 	mpi3mr_os_handle_events(mrioc, event_reply);
309 }
310 
311 static struct mpi3mr_drv_cmd *
312 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
313 	struct mpi3_default_reply *def_reply)
314 {
315 	u16 idx;
316 
317 	switch (host_tag) {
318 	case MPI3MR_HOSTTAG_INITCMDS:
319 		return &mrioc->init_cmds;
320 	case MPI3MR_HOSTTAG_CFG_CMDS:
321 		return &mrioc->cfg_cmds;
322 	case MPI3MR_HOSTTAG_BSG_CMDS:
323 		return &mrioc->bsg_cmds;
324 	case MPI3MR_HOSTTAG_BLK_TMS:
325 		return &mrioc->host_tm_cmds;
326 	case MPI3MR_HOSTTAG_PEL_ABORT:
327 		return &mrioc->pel_abort_cmd;
328 	case MPI3MR_HOSTTAG_PEL_WAIT:
329 		return &mrioc->pel_cmds;
330 	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
331 		return &mrioc->transport_cmds;
332 	case MPI3MR_HOSTTAG_INVALID:
333 		if (def_reply && def_reply->function ==
334 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
335 			mpi3mr_handle_events(mrioc, def_reply);
336 		return NULL;
337 	default:
338 		break;
339 	}
340 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
341 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
342 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
343 		return &mrioc->dev_rmhs_cmds[idx];
344 	}
345 
346 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
347 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
348 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
349 		return &mrioc->evtack_cmds[idx];
350 	}
351 
352 	return NULL;
353 }
354 
355 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
356 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
357 {
358 	u16 reply_desc_type, host_tag = 0;
359 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
360 	u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
361 	u32 ioc_loginfo = 0, sense_count = 0;
362 	struct mpi3_status_reply_descriptor *status_desc;
363 	struct mpi3_address_reply_descriptor *addr_desc;
364 	struct mpi3_success_reply_descriptor *success_desc;
365 	struct mpi3_default_reply *def_reply = NULL;
366 	struct mpi3mr_drv_cmd *cmdptr = NULL;
367 	struct mpi3_scsi_io_reply *scsi_reply;
368 	struct scsi_sense_hdr sshdr;
369 	u8 *sense_buf = NULL;
370 
371 	*reply_dma = 0;
372 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
373 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
374 	switch (reply_desc_type) {
375 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
376 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
377 		host_tag = le16_to_cpu(status_desc->host_tag);
378 		ioc_status = le16_to_cpu(status_desc->ioc_status);
379 		if (ioc_status &
380 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
381 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
382 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
383 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
384 		break;
385 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
386 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
387 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
388 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
389 		if (!def_reply)
390 			goto out;
391 		host_tag = le16_to_cpu(def_reply->host_tag);
392 		ioc_status = le16_to_cpu(def_reply->ioc_status);
393 		if (ioc_status &
394 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
395 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
396 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
397 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
398 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
399 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
400 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
401 			sense_count = le32_to_cpu(scsi_reply->sense_count);
402 			if (sense_buf) {
403 				scsi_normalize_sense(sense_buf, sense_count,
404 				    &sshdr);
405 				mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
406 				    sshdr.asc, sshdr.ascq);
407 			}
408 		}
409 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
410 		break;
411 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
412 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
413 		host_tag = le16_to_cpu(success_desc->host_tag);
414 		break;
415 	default:
416 		break;
417 	}
418 
419 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
420 	if (cmdptr) {
421 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
422 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
423 			cmdptr->ioc_loginfo = ioc_loginfo;
424 			if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
425 				cmdptr->ioc_status = ioc_status;
426 			else
427 				cmdptr->ioc_status = masked_ioc_status;
428 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
429 			if (def_reply) {
430 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
431 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
432 				    mrioc->reply_sz);
433 			}
434 			if (sense_buf && cmdptr->sensebuf) {
435 				cmdptr->is_sense = 1;
436 				memcpy(cmdptr->sensebuf, sense_buf,
437 				       MPI3MR_SENSE_BUF_SZ);
438 			}
439 			if (cmdptr->is_waiting) {
440 				cmdptr->is_waiting = 0;
441 				complete(&cmdptr->done);
442 			} else if (cmdptr->callback)
443 				cmdptr->callback(mrioc, cmdptr);
444 		}
445 	}
446 out:
447 	if (sense_buf)
448 		mpi3mr_repost_sense_buf(mrioc,
449 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
450 }
451 
452 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
453 {
454 	u32 exp_phase = mrioc->admin_reply_ephase;
455 	u32 admin_reply_ci = mrioc->admin_reply_ci;
456 	u32 num_admin_replies = 0;
457 	u64 reply_dma = 0;
458 	u16 threshold_comps = 0;
459 	struct mpi3_default_reply_descriptor *reply_desc;
460 
461 	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
462 		atomic_inc(&mrioc->admin_pend_isr);
463 		return 0;
464 	}
465 
466 	atomic_set(&mrioc->admin_pend_isr, 0);
467 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
468 	    admin_reply_ci;
469 
470 	if ((le16_to_cpu(reply_desc->reply_flags) &
471 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
472 		atomic_dec(&mrioc->admin_reply_q_in_use);
473 		return 0;
474 	}
475 
476 	do {
477 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
478 			break;
479 
480 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
481 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
482 		if (reply_dma)
483 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
484 		num_admin_replies++;
485 		threshold_comps++;
486 		if (++admin_reply_ci == mrioc->num_admin_replies) {
487 			admin_reply_ci = 0;
488 			exp_phase ^= 1;
489 		}
490 		reply_desc =
491 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
492 		    admin_reply_ci;
493 		if ((le16_to_cpu(reply_desc->reply_flags) &
494 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
495 			break;
496 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
497 			writel(admin_reply_ci,
498 			    &mrioc->sysif_regs->admin_reply_queue_ci);
499 			threshold_comps = 0;
500 		}
501 	} while (1);
502 
503 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
504 	mrioc->admin_reply_ci = admin_reply_ci;
505 	mrioc->admin_reply_ephase = exp_phase;
506 	atomic_dec(&mrioc->admin_reply_q_in_use);
507 
508 	return num_admin_replies;
509 }
510 
511 /**
512  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
513  *	queue's consumer index from operational reply descriptor queue.
514  * @op_reply_q: op_reply_qinfo object
515  * @reply_ci: operational reply descriptor's queue consumer index
516  *
517  * Returns: reply descriptor frame address
518  */
519 static inline struct mpi3_default_reply_descriptor *
520 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
521 {
522 	void *segment_base_addr;
523 	struct segments *segments = op_reply_q->q_segments;
524 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
525 
526 	segment_base_addr =
527 	    segments[reply_ci / op_reply_q->segment_qd].segment;
528 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
529 	    (reply_ci % op_reply_q->segment_qd);
530 	return reply_desc;
531 }
532 
533 /**
534  * mpi3mr_process_op_reply_q - Operational reply queue handler
535  * @mrioc: Adapter instance reference
536  * @op_reply_q: Operational reply queue info
537  *
538  * Checks the specific operational reply queue and drains the
539  * reply queue entries until the queue is empty and process the
540  * individual reply descriptors.
541  *
542  * Return: 0 if queue is already processed,or number of reply
543  *	    descriptors processed.
544  */
545 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
546 	struct op_reply_qinfo *op_reply_q)
547 {
548 	struct op_req_qinfo *op_req_q;
549 	u32 exp_phase;
550 	u32 reply_ci;
551 	u32 num_op_reply = 0;
552 	u64 reply_dma = 0;
553 	struct mpi3_default_reply_descriptor *reply_desc;
554 	u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
555 
556 	reply_qidx = op_reply_q->qid - 1;
557 
558 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
559 		return 0;
560 
561 	exp_phase = op_reply_q->ephase;
562 	reply_ci = op_reply_q->ci;
563 
564 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
565 	if ((le16_to_cpu(reply_desc->reply_flags) &
566 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
567 		atomic_dec(&op_reply_q->in_use);
568 		return 0;
569 	}
570 
571 	do {
572 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
573 			break;
574 
575 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
576 		op_req_q = &mrioc->req_qinfo[req_q_idx];
577 
578 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
579 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
580 		    reply_qidx);
581 
582 		if (reply_dma)
583 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
584 		num_op_reply++;
585 		threshold_comps++;
586 
587 		if (++reply_ci == op_reply_q->num_replies) {
588 			reply_ci = 0;
589 			exp_phase ^= 1;
590 		}
591 
592 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
593 
594 		if ((le16_to_cpu(reply_desc->reply_flags) &
595 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
596 			break;
597 #ifndef CONFIG_PREEMPT_RT
598 		/*
599 		 * Exit completion loop to avoid CPU lockup
600 		 * Ensure remaining completion happens from threaded ISR.
601 		 */
602 		if ((num_op_reply > mrioc->max_host_ios) &&
603 			(threaded_isr_poll == true)) {
604 			op_reply_q->enable_irq_poll = true;
605 			break;
606 		}
607 #endif
608 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
609 			writel(reply_ci,
610 			    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
611 			atomic_sub(threshold_comps, &op_reply_q->pend_ios);
612 			threshold_comps = 0;
613 		}
614 	} while (1);
615 
616 	writel(reply_ci,
617 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
618 	op_reply_q->ci = reply_ci;
619 	op_reply_q->ephase = exp_phase;
620 	atomic_sub(threshold_comps, &op_reply_q->pend_ios);
621 	atomic_dec(&op_reply_q->in_use);
622 	return num_op_reply;
623 }
624 
625 /**
626  * mpi3mr_blk_mq_poll - Operational reply queue handler
627  * @shost: SCSI Host reference
628  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
629  *
630  * Checks the specific operational reply queue and drains the
631  * reply queue entries until the queue is empty and process the
632  * individual reply descriptors.
633  *
634  * Return: 0 if queue is already processed,or number of reply
635  *	    descriptors processed.
636  */
637 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
638 {
639 	int num_entries = 0;
640 	struct mpi3mr_ioc *mrioc;
641 
642 	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
643 
644 	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
645 	    mrioc->unrecoverable || mrioc->pci_err_recovery))
646 		return 0;
647 
648 	num_entries = mpi3mr_process_op_reply_q(mrioc,
649 			&mrioc->op_reply_qinfo[queue_num]);
650 
651 	return num_entries;
652 }
653 
654 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
655 {
656 	struct mpi3mr_intr_info *intr_info = privdata;
657 	struct mpi3mr_ioc *mrioc;
658 	u16 midx;
659 	u32 num_admin_replies = 0, num_op_reply = 0;
660 
661 	if (!intr_info)
662 		return IRQ_NONE;
663 
664 	mrioc = intr_info->mrioc;
665 
666 	if (!mrioc->intr_enabled)
667 		return IRQ_NONE;
668 
669 	midx = intr_info->msix_index;
670 
671 	if (!midx)
672 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
673 	if (intr_info->op_reply_q)
674 		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
675 		    intr_info->op_reply_q);
676 
677 	if (num_admin_replies || num_op_reply)
678 		return IRQ_HANDLED;
679 	else
680 		return IRQ_NONE;
681 }
682 
683 #ifndef CONFIG_PREEMPT_RT
684 
685 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
686 {
687 	struct mpi3mr_intr_info *intr_info = privdata;
688 	int ret;
689 
690 	if (!intr_info)
691 		return IRQ_NONE;
692 
693 	/* Call primary ISR routine */
694 	ret = mpi3mr_isr_primary(irq, privdata);
695 
696 	/*
697 	 * If more IOs are expected, schedule IRQ polling thread.
698 	 * Otherwise exit from ISR.
699 	 */
700 	if ((threaded_isr_poll == false) || !intr_info->op_reply_q)
701 		return ret;
702 
703 	if (!intr_info->op_reply_q->enable_irq_poll ||
704 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
705 		return ret;
706 
707 	disable_irq_nosync(intr_info->os_irq);
708 
709 	return IRQ_WAKE_THREAD;
710 }
711 
712 /**
713  * mpi3mr_isr_poll - Reply queue polling routine
714  * @irq: IRQ
715  * @privdata: Interrupt info
716  *
717  * poll for pending I/O completions in a loop until pending I/Os
718  * present or controller queue depth I/Os are processed.
719  *
720  * Return: IRQ_NONE or IRQ_HANDLED
721  */
722 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
723 {
724 	struct mpi3mr_intr_info *intr_info = privdata;
725 	struct mpi3mr_ioc *mrioc;
726 	u16 midx;
727 	u32 num_op_reply = 0;
728 
729 	if (!intr_info || !intr_info->op_reply_q)
730 		return IRQ_NONE;
731 
732 	mrioc = intr_info->mrioc;
733 	midx = intr_info->msix_index;
734 
735 	/* Poll for pending IOs completions */
736 	do {
737 		if (!mrioc->intr_enabled || mrioc->unrecoverable)
738 			break;
739 
740 		if (!midx)
741 			mpi3mr_process_admin_reply_q(mrioc);
742 		if (intr_info->op_reply_q)
743 			num_op_reply +=
744 			    mpi3mr_process_op_reply_q(mrioc,
745 				intr_info->op_reply_q);
746 
747 		usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
748 
749 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
750 	    (num_op_reply < mrioc->max_host_ios));
751 
752 	intr_info->op_reply_q->enable_irq_poll = false;
753 	enable_irq(intr_info->os_irq);
754 
755 	return IRQ_HANDLED;
756 }
757 
758 #endif
759 
760 /**
761  * mpi3mr_request_irq - Request IRQ and register ISR
762  * @mrioc: Adapter instance reference
763  * @index: IRQ vector index
764  *
765  * Request threaded ISR with primary ISR and secondary
766  *
767  * Return: 0 on success and non zero on failures.
768  */
769 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
770 {
771 	struct pci_dev *pdev = mrioc->pdev;
772 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
773 	int retval = 0;
774 
775 	intr_info->mrioc = mrioc;
776 	intr_info->msix_index = index;
777 	intr_info->op_reply_q = NULL;
778 
779 	scnprintf(intr_info->name, MPI3MR_NAME_LENGTH,
780 	    "%.32s%d-msix%u", mrioc->driver_name, mrioc->id, index);
781 
782 #ifndef CONFIG_PREEMPT_RT
783 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
784 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
785 #else
786 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
787 	    NULL, IRQF_SHARED, intr_info->name, intr_info);
788 #endif
789 	if (retval) {
790 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
791 		    intr_info->name, pci_irq_vector(pdev, index));
792 		return retval;
793 	}
794 
795 	intr_info->os_irq = pci_irq_vector(pdev, index);
796 	return retval;
797 }
798 
799 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
800 {
801 	if (!mrioc->requested_poll_qcount)
802 		return;
803 
804 	/* Reserved for Admin and Default Queue */
805 	if (max_vectors > 2 &&
806 		(mrioc->requested_poll_qcount < max_vectors - 2)) {
807 		ioc_info(mrioc,
808 		    "enabled polled queues (%d) msix (%d)\n",
809 		    mrioc->requested_poll_qcount, max_vectors);
810 	} else {
811 		ioc_info(mrioc,
812 		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
813 		    mrioc->requested_poll_qcount, max_vectors);
814 		mrioc->requested_poll_qcount = 0;
815 	}
816 }
817 
818 /**
819  * mpi3mr_setup_isr - Setup ISR for the controller
820  * @mrioc: Adapter instance reference
821  * @setup_one: Request one IRQ or more
822  *
823  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
824  *
825  * Return: 0 on success and non zero on failures.
826  */
827 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
828 {
829 	unsigned int irq_flags = PCI_IRQ_MSIX;
830 	int max_vectors, min_vec;
831 	int retval;
832 	int i;
833 	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
834 
835 	if (mrioc->is_intr_info_set)
836 		return 0;
837 
838 	mpi3mr_cleanup_isr(mrioc);
839 
840 	if (setup_one || reset_devices) {
841 		max_vectors = 1;
842 		retval = pci_alloc_irq_vectors(mrioc->pdev,
843 		    1, max_vectors, irq_flags);
844 		if (retval < 0) {
845 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
846 			    retval);
847 			goto out_failed;
848 		}
849 	} else {
850 		max_vectors =
851 		    min_t(int, mrioc->cpu_count + 1 +
852 			mrioc->requested_poll_qcount, mrioc->msix_count);
853 
854 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
855 
856 		ioc_info(mrioc,
857 		    "MSI-X vectors supported: %d, no of cores: %d,",
858 		    mrioc->msix_count, mrioc->cpu_count);
859 		ioc_info(mrioc,
860 		    "MSI-x vectors requested: %d poll_queues %d\n",
861 		    max_vectors, mrioc->requested_poll_qcount);
862 
863 		desc.post_vectors = mrioc->requested_poll_qcount;
864 		min_vec = desc.pre_vectors + desc.post_vectors;
865 		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
866 
867 		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
868 			min_vec, max_vectors, irq_flags, &desc);
869 
870 		if (retval < 0) {
871 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
872 			    retval);
873 			goto out_failed;
874 		}
875 
876 
877 		/*
878 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
879 		 * between Admin queue and operational queue
880 		 */
881 		if (retval == min_vec)
882 			mrioc->op_reply_q_offset = 0;
883 		else if (retval != (max_vectors)) {
884 			ioc_info(mrioc,
885 			    "allocated vectors (%d) are less than configured (%d)\n",
886 			    retval, max_vectors);
887 		}
888 
889 		max_vectors = retval;
890 		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
891 
892 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
893 
894 	}
895 
896 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
897 	    GFP_KERNEL);
898 	if (!mrioc->intr_info) {
899 		retval = -ENOMEM;
900 		pci_free_irq_vectors(mrioc->pdev);
901 		goto out_failed;
902 	}
903 	for (i = 0; i < max_vectors; i++) {
904 		retval = mpi3mr_request_irq(mrioc, i);
905 		if (retval) {
906 			mrioc->intr_info_count = i;
907 			goto out_failed;
908 		}
909 	}
910 	if (reset_devices || !setup_one)
911 		mrioc->is_intr_info_set = true;
912 	mrioc->intr_info_count = max_vectors;
913 	mpi3mr_ioc_enable_intr(mrioc);
914 	return 0;
915 
916 out_failed:
917 	mpi3mr_cleanup_isr(mrioc);
918 
919 	return retval;
920 }
921 
922 static const struct {
923 	enum mpi3mr_iocstate value;
924 	char *name;
925 } mrioc_states[] = {
926 	{ MRIOC_STATE_READY, "ready" },
927 	{ MRIOC_STATE_FAULT, "fault" },
928 	{ MRIOC_STATE_RESET, "reset" },
929 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
930 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
931 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
932 };
933 
934 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
935 {
936 	int i;
937 	char *name = NULL;
938 
939 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
940 		if (mrioc_states[i].value == mrioc_state) {
941 			name = mrioc_states[i].name;
942 			break;
943 		}
944 	}
945 	return name;
946 }
947 
948 /* Reset reason to name mapper structure*/
949 static const struct {
950 	enum mpi3mr_reset_reason value;
951 	char *name;
952 } mpi3mr_reset_reason_codes[] = {
953 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
954 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
955 	{ MPI3MR_RESET_FROM_APP, "application invocation" },
956 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
957 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
958 	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
959 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
960 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
961 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
962 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
963 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
964 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
965 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
966 	{
967 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
968 		"create request queue timeout"
969 	},
970 	{
971 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
972 		"create reply queue timeout"
973 	},
974 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
975 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
976 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
977 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
978 	{
979 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
980 		"component image activation timeout"
981 	},
982 	{
983 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
984 		"get package version timeout"
985 	},
986 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
987 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
988 	{
989 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
990 		"diagnostic buffer post timeout"
991 	},
992 	{
993 		MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
994 		"diagnostic buffer release timeout"
995 	},
996 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
997 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
998 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
999 };
1000 
1001 /**
1002  * mpi3mr_reset_rc_name - get reset reason code name
1003  * @reason_code: reset reason code value
1004  *
1005  * Map reset reason to an NULL terminated ASCII string
1006  *
1007  * Return: name corresponding to reset reason value or NULL.
1008  */
1009 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1010 {
1011 	int i;
1012 	char *name = NULL;
1013 
1014 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1015 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1016 			name = mpi3mr_reset_reason_codes[i].name;
1017 			break;
1018 		}
1019 	}
1020 	return name;
1021 }
1022 
1023 /* Reset type to name mapper structure*/
1024 static const struct {
1025 	u16 reset_type;
1026 	char *name;
1027 } mpi3mr_reset_types[] = {
1028 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1029 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1030 };
1031 
1032 /**
1033  * mpi3mr_reset_type_name - get reset type name
1034  * @reset_type: reset type value
1035  *
1036  * Map reset type to an NULL terminated ASCII string
1037  *
1038  * Return: name corresponding to reset type value or NULL.
1039  */
1040 static const char *mpi3mr_reset_type_name(u16 reset_type)
1041 {
1042 	int i;
1043 	char *name = NULL;
1044 
1045 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1046 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1047 			name = mpi3mr_reset_types[i].name;
1048 			break;
1049 		}
1050 	}
1051 	return name;
1052 }
1053 
1054 /**
1055  * mpi3mr_is_fault_recoverable - Read fault code and decide
1056  * whether the controller can be recoverable
1057  * @mrioc: Adapter instance reference
1058  * Return: true if fault is recoverable, false otherwise.
1059  */
1060 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1061 {
1062 	u32 fault;
1063 
1064 	fault = (readl(&mrioc->sysif_regs->fault) &
1065 		      MPI3_SYSIF_FAULT_CODE_MASK);
1066 
1067 	switch (fault) {
1068 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1069 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1070 		ioc_warn(mrioc,
1071 		    "controller requires system power cycle, marking controller as unrecoverable\n");
1072 		return false;
1073 	case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1074 		ioc_warn(mrioc,
1075 		    "controller faulted due to insufficient power,\n"
1076 		    " try by connecting it to a different slot\n");
1077 		return false;
1078 	default:
1079 		break;
1080 	}
1081 	return true;
1082 }
1083 
1084 /**
1085  * mpi3mr_print_fault_info - Display fault information
1086  * @mrioc: Adapter instance reference
1087  *
1088  * Display the controller fault information if there is a
1089  * controller fault.
1090  *
1091  * Return: Nothing.
1092  */
1093 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1094 {
1095 	u32 ioc_status, code, code1, code2, code3;
1096 
1097 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1098 
1099 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1100 		code = readl(&mrioc->sysif_regs->fault);
1101 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1102 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1103 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1104 
1105 		ioc_info(mrioc,
1106 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1107 		    code, code1, code2, code3);
1108 	}
1109 }
1110 
1111 /**
1112  * mpi3mr_save_fault_info - Save fault information
1113  * @mrioc: Adapter instance reference
1114  *
1115  * Save the controller fault information if there is a
1116  * controller fault.
1117  *
1118  * Return: Nothing.
1119  */
1120 static void mpi3mr_save_fault_info(struct mpi3mr_ioc *mrioc)
1121 {
1122 	u32 ioc_status, i;
1123 
1124 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1125 
1126 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1127 		mrioc->saved_fault_code = readl(&mrioc->sysif_regs->fault) &
1128 		    MPI3_SYSIF_FAULT_CODE_MASK;
1129 		for (i = 0; i < 3; i++) {
1130 			mrioc->saved_fault_info[i] =
1131 			readl(&mrioc->sysif_regs->fault_info[i]);
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * mpi3mr_get_iocstate - Get IOC State
1138  * @mrioc: Adapter instance reference
1139  *
1140  * Return a proper IOC state enum based on the IOC status and
1141  * IOC configuration and unrcoverable state of the controller.
1142  *
1143  * Return: Current IOC state.
1144  */
1145 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1146 {
1147 	u32 ioc_status, ioc_config;
1148 	u8 ready, enabled;
1149 
1150 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1151 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1152 
1153 	if (mrioc->unrecoverable)
1154 		return MRIOC_STATE_UNRECOVERABLE;
1155 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1156 		return MRIOC_STATE_FAULT;
1157 
1158 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1159 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1160 
1161 	if (ready && enabled)
1162 		return MRIOC_STATE_READY;
1163 	if ((!ready) && (!enabled))
1164 		return MRIOC_STATE_RESET;
1165 	if ((!ready) && (enabled))
1166 		return MRIOC_STATE_BECOMING_READY;
1167 
1168 	return MRIOC_STATE_RESET_REQUESTED;
1169 }
1170 
1171 /**
1172  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1173  * @mrioc: Adapter instance reference
1174  *
1175  * Free the DMA memory allocated for IOCTL handling purpose.
1176  *
1177  * Return: None
1178  */
1179 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1180 {
1181 	struct dma_memory_desc *mem_desc;
1182 	u16 i;
1183 
1184 	if (!mrioc->ioctl_dma_pool)
1185 		return;
1186 
1187 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1188 		mem_desc = &mrioc->ioctl_sge[i];
1189 		if (mem_desc->addr) {
1190 			dma_pool_free(mrioc->ioctl_dma_pool,
1191 				      mem_desc->addr,
1192 				      mem_desc->dma_addr);
1193 			mem_desc->addr = NULL;
1194 		}
1195 	}
1196 	dma_pool_destroy(mrioc->ioctl_dma_pool);
1197 	mrioc->ioctl_dma_pool = NULL;
1198 	mem_desc = &mrioc->ioctl_chain_sge;
1199 
1200 	if (mem_desc->addr) {
1201 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1202 				  mem_desc->addr, mem_desc->dma_addr);
1203 		mem_desc->addr = NULL;
1204 	}
1205 	mem_desc = &mrioc->ioctl_resp_sge;
1206 	if (mem_desc->addr) {
1207 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1208 				  mem_desc->addr, mem_desc->dma_addr);
1209 		mem_desc->addr = NULL;
1210 	}
1211 
1212 	mrioc->ioctl_sges_allocated = false;
1213 }
1214 
1215 /**
1216  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1217  * @mrioc: Adapter instance reference
1218  *
1219  * This function allocates dmaable memory required to handle the
1220  * application issued MPI3 IOCTL requests.
1221  *
1222  * Return: None
1223  */
1224 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1225 
1226 {
1227 	struct dma_memory_desc *mem_desc;
1228 	u16 i;
1229 
1230 	mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1231 						&mrioc->pdev->dev,
1232 						MPI3MR_IOCTL_SGE_SIZE,
1233 						MPI3MR_PAGE_SIZE_4K, 0);
1234 
1235 	if (!mrioc->ioctl_dma_pool) {
1236 		ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1237 		goto out_failed;
1238 	}
1239 
1240 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1241 		mem_desc = &mrioc->ioctl_sge[i];
1242 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1243 		mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1244 						 GFP_KERNEL,
1245 						 &mem_desc->dma_addr);
1246 		if (!mem_desc->addr)
1247 			goto out_failed;
1248 	}
1249 
1250 	mem_desc = &mrioc->ioctl_chain_sge;
1251 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1252 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1253 					    mem_desc->size,
1254 					    &mem_desc->dma_addr,
1255 					    GFP_KERNEL);
1256 	if (!mem_desc->addr)
1257 		goto out_failed;
1258 
1259 	mem_desc = &mrioc->ioctl_resp_sge;
1260 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1261 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1262 					    mem_desc->size,
1263 					    &mem_desc->dma_addr,
1264 					    GFP_KERNEL);
1265 	if (!mem_desc->addr)
1266 		goto out_failed;
1267 
1268 	mrioc->ioctl_sges_allocated = true;
1269 
1270 	return;
1271 out_failed:
1272 	ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1273 		 "from the applications, application interface for MPT command is disabled\n");
1274 	mpi3mr_free_ioctl_dma_memory(mrioc);
1275 }
1276 
1277 /**
1278  * mpi3mr_fault_uevent_emit - Emit uevent for any controller
1279  * fault
1280  * @mrioc: Pointer to the mpi3mr_ioc structure for the controller instance
1281  *
1282  * This function is invoked when the controller undergoes any
1283  * type of fault.
1284  */
1285 
1286 static void mpi3mr_fault_uevent_emit(struct mpi3mr_ioc *mrioc)
1287 {
1288 	struct kobj_uevent_env *env;
1289 	int ret;
1290 
1291 	env = kzalloc(sizeof(*env), GFP_KERNEL);
1292 	if (!env)
1293 		return;
1294 
1295 	ret = add_uevent_var(env, "DRIVER=%s", mrioc->driver_name);
1296 	if (ret)
1297 		goto out_free;
1298 
1299 	ret = add_uevent_var(env, "IOC_ID=%u", mrioc->id);
1300 	if (ret)
1301 		goto out_free;
1302 
1303 	ret = add_uevent_var(env, "FAULT_CODE=0x%08x",
1304 			    mrioc->saved_fault_code);
1305 	if (ret)
1306 		goto out_free;
1307 
1308 	ret = add_uevent_var(env, "FAULT_INFO0=0x%08x",
1309 			     mrioc->saved_fault_info[0]);
1310 	if (ret)
1311 		goto out_free;
1312 
1313 	ret = add_uevent_var(env, "FAULT_INFO1=0x%08x",
1314 			    mrioc->saved_fault_info[1]);
1315 	if (ret)
1316 		goto out_free;
1317 
1318 	ret = add_uevent_var(env, "FAULT_INFO2=0x%08x",
1319 			    mrioc->saved_fault_info[2]);
1320 	if (ret)
1321 		goto out_free;
1322 
1323 	kobject_uevent_env(&mrioc->shost->shost_gendev.kobj,
1324 			KOBJ_CHANGE, env->envp);
1325 
1326 out_free:
1327 	kfree(env);
1328 
1329 }
1330 
1331 /**
1332  * mpi3mr_clear_reset_history - clear reset history
1333  * @mrioc: Adapter instance reference
1334  *
1335  * Write the reset history bit in IOC status to clear the bit,
1336  * if it is already set.
1337  *
1338  * Return: Nothing.
1339  */
1340 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1341 {
1342 	u32 ioc_status;
1343 
1344 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1345 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1346 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1347 }
1348 
1349 /**
1350  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1351  * @mrioc: Adapter instance reference
1352  * @reset_reason: Reset reason code
1353  *
1354  * Issue Message unit Reset to the controller and wait for it to
1355  * be complete.
1356  *
1357  * Return: 0 on success, -1 on failure.
1358  */
1359 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1360 	u32 reset_reason)
1361 {
1362 	u32 ioc_config, timeout, ioc_status, scratch_pad0;
1363 	int retval = -1;
1364 
1365 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1366 	if (mrioc->unrecoverable) {
1367 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1368 		return retval;
1369 	}
1370 	mpi3mr_clear_reset_history(mrioc);
1371 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1372 			 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1373 			(mrioc->facts.ioc_num <<
1374 			 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1375 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1376 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1377 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1378 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1379 
1380 	timeout = MPI3MR_MUR_TIMEOUT * 10;
1381 	do {
1382 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1383 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1384 			mpi3mr_clear_reset_history(mrioc);
1385 			break;
1386 		}
1387 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1388 			mpi3mr_print_fault_info(mrioc);
1389 			break;
1390 		}
1391 		msleep(100);
1392 	} while (--timeout);
1393 
1394 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1395 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1396 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1397 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1398 		retval = 0;
1399 
1400 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1401 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1402 	return retval;
1403 }
1404 
1405 /**
1406  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1407  * during reset/resume
1408  * @mrioc: Adapter instance reference
1409  *
1410  * Return: zero if the new IOCFacts parameters value is compatible with
1411  * older values else return -EPERM
1412  */
1413 static int
1414 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1415 {
1416 	unsigned long *removepend_bitmap;
1417 
1418 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1419 		ioc_err(mrioc,
1420 		    "cannot increase reply size from %d to %d\n",
1421 		    mrioc->reply_sz, mrioc->facts.reply_sz);
1422 		return -EPERM;
1423 	}
1424 
1425 	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1426 		ioc_err(mrioc,
1427 		    "cannot reduce number of operational reply queues from %d to %d\n",
1428 		    mrioc->num_op_reply_q,
1429 		    mrioc->facts.max_op_reply_q);
1430 		return -EPERM;
1431 	}
1432 
1433 	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1434 		ioc_err(mrioc,
1435 		    "cannot reduce number of operational request queues from %d to %d\n",
1436 		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1437 		return -EPERM;
1438 	}
1439 
1440 	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1441 		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1442 			    "\tchanged after reset: previous(%d), new(%d),\n"
1443 			    "the driver cannot change this at run time\n",
1444 			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1445 
1446 	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1447 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1448 		ioc_err(mrioc,
1449 		    "critical error: multipath capability is enabled at the\n"
1450 		    "\tcontroller while sas transport support is enabled at the\n"
1451 		    "\tdriver, please reboot the system or reload the driver\n");
1452 
1453 	if (mrioc->seg_tb_support) {
1454 		if (!(mrioc->facts.ioc_capabilities &
1455 		     MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1456 			ioc_err(mrioc,
1457 			    "critical error: previously enabled segmented trace\n"
1458 			    " buffer capability is disabled after reset. Please\n"
1459 			    " update the firmware or reboot the system or\n"
1460 			    " reload the driver to enable trace diag buffer\n");
1461 			mrioc->diag_buffers[0].disabled_after_reset = true;
1462 		} else
1463 			mrioc->diag_buffers[0].disabled_after_reset = false;
1464 	}
1465 
1466 	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1467 		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1468 						  GFP_KERNEL);
1469 		if (!removepend_bitmap) {
1470 			ioc_err(mrioc,
1471 				"failed to increase removepend_bitmap bits from %d to %d\n",
1472 				mrioc->dev_handle_bitmap_bits,
1473 				mrioc->facts.max_devhandle);
1474 			return -EPERM;
1475 		}
1476 		bitmap_free(mrioc->removepend_bitmap);
1477 		mrioc->removepend_bitmap = removepend_bitmap;
1478 		ioc_info(mrioc,
1479 			 "increased bits of dev_handle_bitmap from %d to %d\n",
1480 			 mrioc->dev_handle_bitmap_bits,
1481 			 mrioc->facts.max_devhandle);
1482 		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 /**
1489  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1490  * @mrioc: Adapter instance reference
1491  *
1492  * Set Enable IOC bit in IOC configuration register and wait for
1493  * the controller to become ready.
1494  *
1495  * Return: 0 on success, appropriate error on failure.
1496  */
1497 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1498 {
1499 	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1500 	int retval = 0;
1501 	enum mpi3mr_iocstate ioc_state;
1502 	u64 base_info;
1503 	u8 retry = 0;
1504 	u64 start_time, elapsed_time_sec;
1505 
1506 retry_bring_ioc_ready:
1507 
1508 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1509 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1510 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1511 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1512 	    ioc_status, ioc_config, base_info);
1513 
1514 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
1515 		mrioc->unrecoverable = 1;
1516 		goto out_device_not_present;
1517 	}
1518 
1519 	/*The timeout value is in 2sec unit, changing it to seconds*/
1520 	mrioc->ready_timeout =
1521 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1522 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1523 
1524 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1525 
1526 	ioc_state = mpi3mr_get_iocstate(mrioc);
1527 	ioc_info(mrioc, "controller is in %s state during detection\n",
1528 	    mpi3mr_iocstate_name(ioc_state));
1529 
1530 	timeout = mrioc->ready_timeout * 10;
1531 
1532 	do {
1533 		ioc_state = mpi3mr_get_iocstate(mrioc);
1534 
1535 		if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1536 		    ioc_state != MRIOC_STATE_RESET_REQUESTED)
1537 			break;
1538 
1539 		if (!pci_device_is_present(mrioc->pdev)) {
1540 			mrioc->unrecoverable = 1;
1541 			ioc_err(mrioc, "controller is not present while waiting to reset\n");
1542 			goto out_device_not_present;
1543 		}
1544 
1545 		msleep(100);
1546 	} while (--timeout);
1547 
1548 	if (ioc_state == MRIOC_STATE_READY) {
1549 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1550 		retval = mpi3mr_issue_and_process_mur(mrioc,
1551 		    MPI3MR_RESET_FROM_BRINGUP);
1552 		ioc_state = mpi3mr_get_iocstate(mrioc);
1553 		if (retval)
1554 			ioc_err(mrioc,
1555 			    "message unit reset failed with error %d current state %s\n",
1556 			    retval, mpi3mr_iocstate_name(ioc_state));
1557 	}
1558 	if (ioc_state != MRIOC_STATE_RESET) {
1559 		if (ioc_state == MRIOC_STATE_FAULT) {
1560 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1561 			mpi3mr_print_fault_info(mrioc);
1562 			mpi3mr_save_fault_info(mrioc);
1563 			mrioc->fault_during_init = 1;
1564 			mrioc->fwfault_counter++;
1565 
1566 			do {
1567 				host_diagnostic =
1568 					readl(&mrioc->sysif_regs->host_diagnostic);
1569 				if (!(host_diagnostic &
1570 				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1571 					break;
1572 				if (!pci_device_is_present(mrioc->pdev)) {
1573 					mrioc->unrecoverable = 1;
1574 					ioc_err(mrioc, "controller is not present at the bringup\n");
1575 					goto out_device_not_present;
1576 				}
1577 				msleep(100);
1578 			} while (--timeout);
1579 		}
1580 		mpi3mr_print_fault_info(mrioc);
1581 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1582 		retval = mpi3mr_issue_reset(mrioc,
1583 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1584 		    MPI3MR_RESET_FROM_BRINGUP);
1585 		if (retval) {
1586 			ioc_err(mrioc,
1587 			    "soft reset failed with error %d\n", retval);
1588 			goto out_failed;
1589 		}
1590 	}
1591 	ioc_state = mpi3mr_get_iocstate(mrioc);
1592 	if (ioc_state != MRIOC_STATE_RESET) {
1593 		ioc_err(mrioc,
1594 		    "cannot bring controller to reset state, current state: %s\n",
1595 		    mpi3mr_iocstate_name(ioc_state));
1596 		goto out_failed;
1597 	}
1598 	mpi3mr_clear_reset_history(mrioc);
1599 	retval = mpi3mr_setup_admin_qpair(mrioc);
1600 	if (retval) {
1601 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1602 		    retval);
1603 		goto out_failed;
1604 	}
1605 
1606 	ioc_info(mrioc, "bringing controller to ready state\n");
1607 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1608 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1609 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1610 
1611 	if (retry == 0)
1612 		start_time = jiffies;
1613 
1614 	timeout = mrioc->ready_timeout * 10;
1615 	do {
1616 		ioc_state = mpi3mr_get_iocstate(mrioc);
1617 		if (ioc_state == MRIOC_STATE_READY) {
1618 			ioc_info(mrioc,
1619 			    "successfully transitioned to %s state\n",
1620 			    mpi3mr_iocstate_name(ioc_state));
1621 			return 0;
1622 		}
1623 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1624 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1625 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1626 			mpi3mr_print_fault_info(mrioc);
1627 			goto out_failed;
1628 		}
1629 		if (!pci_device_is_present(mrioc->pdev)) {
1630 			mrioc->unrecoverable = 1;
1631 			ioc_err(mrioc,
1632 			    "controller is not present at the bringup\n");
1633 			retval = -1;
1634 			goto out_device_not_present;
1635 		}
1636 		msleep(100);
1637 		elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1638 	} while (elapsed_time_sec < mrioc->ready_timeout);
1639 
1640 out_failed:
1641 	elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1642 	if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1643 		retry++;
1644 
1645 		ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1646 				" elapsed time =%llu\n", retry, elapsed_time_sec);
1647 
1648 		goto retry_bring_ioc_ready;
1649 	}
1650 	ioc_state = mpi3mr_get_iocstate(mrioc);
1651 	ioc_err(mrioc,
1652 	    "failed to bring to ready state,  current state: %s\n",
1653 	    mpi3mr_iocstate_name(ioc_state));
1654 out_device_not_present:
1655 	return retval;
1656 }
1657 
1658 /**
1659  * mpi3mr_soft_reset_success - Check softreset is success or not
1660  * @ioc_status: IOC status register value
1661  * @ioc_config: IOC config register value
1662  *
1663  * Check whether the soft reset is successful or not based on
1664  * IOC status and IOC config register values.
1665  *
1666  * Return: True when the soft reset is success, false otherwise.
1667  */
1668 static inline bool
1669 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1670 {
1671 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1672 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1673 		return true;
1674 	return false;
1675 }
1676 
1677 /**
1678  * mpi3mr_diagfault_success - Check diag fault is success or not
1679  * @mrioc: Adapter reference
1680  * @ioc_status: IOC status register value
1681  *
1682  * Check whether the controller hit diag reset fault code.
1683  *
1684  * Return: True when there is diag fault, false otherwise.
1685  */
1686 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1687 	u32 ioc_status)
1688 {
1689 	u32 fault;
1690 
1691 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1692 		return false;
1693 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1694 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1695 		mpi3mr_print_fault_info(mrioc);
1696 		return true;
1697 	}
1698 	return false;
1699 }
1700 
1701 /**
1702  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1703  * @mrioc: Adapter reference
1704  *
1705  * Set diag save bit in IOC configuration register to enable
1706  * snapdump.
1707  *
1708  * Return: Nothing.
1709  */
1710 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1711 {
1712 	u32 ioc_config;
1713 
1714 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1715 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1716 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1717 }
1718 
1719 /**
1720  * mpi3mr_issue_reset - Issue reset to the controller
1721  * @mrioc: Adapter reference
1722  * @reset_type: Reset type
1723  * @reset_reason: Reset reason code
1724  *
1725  * Unlock the host diagnostic registers and write the specific
1726  * reset type to that, wait for reset acknowledgment from the
1727  * controller, if the reset is not successful retry for the
1728  * predefined number of times.
1729  *
1730  * Return: 0 on success, non-zero on failure.
1731  */
1732 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1733 	u16 reset_reason)
1734 {
1735 	int retval = -1;
1736 	u8 unlock_retry_count = 0;
1737 	u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1738 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1739 
1740 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1741 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1742 		return retval;
1743 	if (mrioc->unrecoverable)
1744 		return retval;
1745 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1746 		retval = 0;
1747 		return retval;
1748 	}
1749 
1750 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1751 	    mpi3mr_reset_type_name(reset_type),
1752 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1753 
1754 	mpi3mr_clear_reset_history(mrioc);
1755 	do {
1756 		ioc_info(mrioc,
1757 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1758 		    ++unlock_retry_count);
1759 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1760 			ioc_err(mrioc,
1761 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1762 			    mpi3mr_reset_type_name(reset_type),
1763 			    host_diagnostic);
1764 			mrioc->unrecoverable = 1;
1765 			return retval;
1766 		}
1767 
1768 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1769 		    &mrioc->sysif_regs->write_sequence);
1770 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1771 		    &mrioc->sysif_regs->write_sequence);
1772 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1773 		    &mrioc->sysif_regs->write_sequence);
1774 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1775 		    &mrioc->sysif_regs->write_sequence);
1776 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1777 		    &mrioc->sysif_regs->write_sequence);
1778 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1779 		    &mrioc->sysif_regs->write_sequence);
1780 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1781 		    &mrioc->sysif_regs->write_sequence);
1782 		usleep_range(1000, 1100);
1783 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1784 		ioc_info(mrioc,
1785 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1786 		    unlock_retry_count, host_diagnostic);
1787 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1788 
1789 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1790 	    MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1791 	    MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1792 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1793 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)
1794 		mpi3mr_set_diagsave(mrioc);
1795 	writel(host_diagnostic | reset_type,
1796 	    &mrioc->sysif_regs->host_diagnostic);
1797 	switch (reset_type) {
1798 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1799 		do {
1800 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1801 			ioc_config =
1802 			    readl(&mrioc->sysif_regs->ioc_configuration);
1803 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1804 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1805 			    ) {
1806 				mpi3mr_clear_reset_history(mrioc);
1807 				retval = 0;
1808 				break;
1809 			}
1810 			msleep(100);
1811 		} while (--timeout);
1812 		mpi3mr_print_fault_info(mrioc);
1813 		break;
1814 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1815 		do {
1816 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1817 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1818 				retval = 0;
1819 				break;
1820 			}
1821 			msleep(100);
1822 		} while (--timeout);
1823 		break;
1824 	default:
1825 		break;
1826 	}
1827 
1828 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1829 	    &mrioc->sysif_regs->write_sequence);
1830 
1831 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1832 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1833 	ioc_info(mrioc,
1834 	    "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1835 	    (!retval)?"successful":"failed", ioc_status,
1836 	    ioc_config);
1837 	if (retval)
1838 		mrioc->unrecoverable = 1;
1839 	return retval;
1840 }
1841 
1842 /**
1843  * mpi3mr_admin_request_post - Post request to admin queue
1844  * @mrioc: Adapter reference
1845  * @admin_req: MPI3 request
1846  * @admin_req_sz: Request size
1847  * @ignore_reset: Ignore reset in process
1848  *
1849  * Post the MPI3 request into admin request queue and
1850  * inform the controller, if the queue is full return
1851  * appropriate error.
1852  *
1853  * Return: 0 on success, non-zero on failure.
1854  */
1855 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1856 	u16 admin_req_sz, u8 ignore_reset)
1857 {
1858 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1859 	int retval = 0;
1860 	unsigned long flags;
1861 	u8 *areq_entry;
1862 
1863 	if (mrioc->unrecoverable) {
1864 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1865 		return -EFAULT;
1866 	}
1867 
1868 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1869 	areq_pi = mrioc->admin_req_pi;
1870 	areq_ci = mrioc->admin_req_ci;
1871 	max_entries = mrioc->num_admin_req;
1872 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1873 	    (areq_pi == (max_entries - 1)))) {
1874 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1875 		retval = -EAGAIN;
1876 		goto out;
1877 	}
1878 	if (!ignore_reset && mrioc->reset_in_progress) {
1879 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1880 		retval = -EAGAIN;
1881 		goto out;
1882 	}
1883 	if (mrioc->pci_err_recovery) {
1884 		ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1885 		retval = -EAGAIN;
1886 		goto out;
1887 	}
1888 
1889 	areq_entry = (u8 *)mrioc->admin_req_base +
1890 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1891 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1892 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1893 
1894 	if (++areq_pi == max_entries)
1895 		areq_pi = 0;
1896 	mrioc->admin_req_pi = areq_pi;
1897 
1898 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1899 
1900 out:
1901 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1902 
1903 	return retval;
1904 }
1905 
1906 /**
1907  * mpi3mr_free_op_req_q_segments - free request memory segments
1908  * @mrioc: Adapter instance reference
1909  * @q_idx: operational request queue index
1910  *
1911  * Free memory segments allocated for operational request queue
1912  *
1913  * Return: Nothing.
1914  */
1915 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1916 {
1917 	u16 j;
1918 	int size;
1919 	struct segments *segments;
1920 
1921 	segments = mrioc->req_qinfo[q_idx].q_segments;
1922 	if (!segments)
1923 		return;
1924 
1925 	if (mrioc->enable_segqueue) {
1926 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1927 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1928 			dma_free_coherent(&mrioc->pdev->dev,
1929 			    MPI3MR_MAX_SEG_LIST_SIZE,
1930 			    mrioc->req_qinfo[q_idx].q_segment_list,
1931 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1932 			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1933 		}
1934 	} else
1935 		size = mrioc->req_qinfo[q_idx].segment_qd *
1936 		    mrioc->facts.op_req_sz;
1937 
1938 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1939 		if (!segments[j].segment)
1940 			continue;
1941 		dma_free_coherent(&mrioc->pdev->dev,
1942 		    size, segments[j].segment, segments[j].segment_dma);
1943 		segments[j].segment = NULL;
1944 	}
1945 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1946 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1947 	mrioc->req_qinfo[q_idx].qid = 0;
1948 }
1949 
1950 /**
1951  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1952  * @mrioc: Adapter instance reference
1953  * @q_idx: operational reply queue index
1954  *
1955  * Free memory segments allocated for operational reply queue
1956  *
1957  * Return: Nothing.
1958  */
1959 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1960 {
1961 	u16 j;
1962 	int size;
1963 	struct segments *segments;
1964 
1965 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1966 	if (!segments)
1967 		return;
1968 
1969 	if (mrioc->enable_segqueue) {
1970 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1971 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1972 			dma_free_coherent(&mrioc->pdev->dev,
1973 			    MPI3MR_MAX_SEG_LIST_SIZE,
1974 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1975 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1976 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1977 		}
1978 	} else
1979 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1980 		    mrioc->op_reply_desc_sz;
1981 
1982 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1983 		if (!segments[j].segment)
1984 			continue;
1985 		dma_free_coherent(&mrioc->pdev->dev,
1986 		    size, segments[j].segment, segments[j].segment_dma);
1987 		segments[j].segment = NULL;
1988 	}
1989 
1990 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1991 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1992 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1993 }
1994 
1995 /**
1996  * mpi3mr_delete_op_reply_q - delete operational reply queue
1997  * @mrioc: Adapter instance reference
1998  * @qidx: operational reply queue index
1999  *
2000  * Delete operatinal reply queue by issuing MPI request
2001  * through admin queue.
2002  *
2003  * Return:  0 on success, non-zero on failure.
2004  */
2005 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2006 {
2007 	struct mpi3_delete_reply_queue_request delq_req;
2008 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2009 	int retval = 0;
2010 	u16 reply_qid = 0, midx;
2011 
2012 	reply_qid = op_reply_q->qid;
2013 
2014 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2015 
2016 	if (!reply_qid)	{
2017 		retval = -1;
2018 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
2019 		goto out;
2020 	}
2021 
2022 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
2023 	    mrioc->active_poll_qcount--;
2024 
2025 	memset(&delq_req, 0, sizeof(delq_req));
2026 	mutex_lock(&mrioc->init_cmds.mutex);
2027 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2028 		retval = -1;
2029 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
2030 		mutex_unlock(&mrioc->init_cmds.mutex);
2031 		goto out;
2032 	}
2033 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2034 	mrioc->init_cmds.is_waiting = 1;
2035 	mrioc->init_cmds.callback = NULL;
2036 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2037 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
2038 	delq_req.queue_id = cpu_to_le16(reply_qid);
2039 
2040 	init_completion(&mrioc->init_cmds.done);
2041 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
2042 	    1);
2043 	if (retval) {
2044 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
2045 		goto out_unlock;
2046 	}
2047 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2048 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2049 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2050 		ioc_err(mrioc, "delete reply queue timed out\n");
2051 		mpi3mr_check_rh_fault_ioc(mrioc,
2052 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
2053 		retval = -1;
2054 		goto out_unlock;
2055 	}
2056 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2057 	    != MPI3_IOCSTATUS_SUCCESS) {
2058 		ioc_err(mrioc,
2059 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2060 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2061 		    mrioc->init_cmds.ioc_loginfo);
2062 		retval = -1;
2063 		goto out_unlock;
2064 	}
2065 	mrioc->intr_info[midx].op_reply_q = NULL;
2066 
2067 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2068 out_unlock:
2069 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2070 	mutex_unlock(&mrioc->init_cmds.mutex);
2071 out:
2072 
2073 	return retval;
2074 }
2075 
2076 /**
2077  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
2078  * @mrioc: Adapter instance reference
2079  * @qidx: request queue index
2080  *
2081  * Allocate segmented memory pools for operational reply
2082  * queue.
2083  *
2084  * Return: 0 on success, non-zero on failure.
2085  */
2086 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2087 {
2088 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2089 	int i, size;
2090 	u64 *q_segment_list_entry = NULL;
2091 	struct segments *segments;
2092 
2093 	if (mrioc->enable_segqueue) {
2094 		op_reply_q->segment_qd =
2095 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
2096 
2097 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
2098 
2099 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2100 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2101 		    GFP_KERNEL);
2102 		if (!op_reply_q->q_segment_list)
2103 			return -ENOMEM;
2104 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2105 	} else {
2106 		op_reply_q->segment_qd = op_reply_q->num_replies;
2107 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2108 	}
2109 
2110 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2111 	    op_reply_q->segment_qd);
2112 
2113 	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
2114 	    sizeof(struct segments), GFP_KERNEL);
2115 	if (!op_reply_q->q_segments)
2116 		return -ENOMEM;
2117 
2118 	segments = op_reply_q->q_segments;
2119 	for (i = 0; i < op_reply_q->num_segments; i++) {
2120 		segments[i].segment =
2121 		    dma_alloc_coherent(&mrioc->pdev->dev,
2122 		    size, &segments[i].segment_dma, GFP_KERNEL);
2123 		if (!segments[i].segment)
2124 			return -ENOMEM;
2125 		if (mrioc->enable_segqueue)
2126 			q_segment_list_entry[i] =
2127 			    (unsigned long)segments[i].segment_dma;
2128 	}
2129 
2130 	return 0;
2131 }
2132 
2133 /**
2134  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2135  * @mrioc: Adapter instance reference
2136  * @qidx: request queue index
2137  *
2138  * Allocate segmented memory pools for operational request
2139  * queue.
2140  *
2141  * Return: 0 on success, non-zero on failure.
2142  */
2143 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2144 {
2145 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2146 	int i, size;
2147 	u64 *q_segment_list_entry = NULL;
2148 	struct segments *segments;
2149 
2150 	if (mrioc->enable_segqueue) {
2151 		op_req_q->segment_qd =
2152 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2153 
2154 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2155 
2156 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2157 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2158 		    GFP_KERNEL);
2159 		if (!op_req_q->q_segment_list)
2160 			return -ENOMEM;
2161 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2162 
2163 	} else {
2164 		op_req_q->segment_qd = op_req_q->num_requests;
2165 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2166 	}
2167 
2168 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2169 	    op_req_q->segment_qd);
2170 
2171 	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
2172 	    sizeof(struct segments), GFP_KERNEL);
2173 	if (!op_req_q->q_segments)
2174 		return -ENOMEM;
2175 
2176 	segments = op_req_q->q_segments;
2177 	for (i = 0; i < op_req_q->num_segments; i++) {
2178 		segments[i].segment =
2179 		    dma_alloc_coherent(&mrioc->pdev->dev,
2180 		    size, &segments[i].segment_dma, GFP_KERNEL);
2181 		if (!segments[i].segment)
2182 			return -ENOMEM;
2183 		if (mrioc->enable_segqueue)
2184 			q_segment_list_entry[i] =
2185 			    (unsigned long)segments[i].segment_dma;
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 /**
2192  * mpi3mr_create_op_reply_q - create operational reply queue
2193  * @mrioc: Adapter instance reference
2194  * @qidx: operational reply queue index
2195  *
2196  * Create operatinal reply queue by issuing MPI request
2197  * through admin queue.
2198  *
2199  * Return:  0 on success, non-zero on failure.
2200  */
2201 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2202 {
2203 	struct mpi3_create_reply_queue_request create_req;
2204 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2205 	int retval = 0;
2206 	u16 reply_qid = 0, midx;
2207 
2208 	reply_qid = op_reply_q->qid;
2209 
2210 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2211 
2212 	if (reply_qid) {
2213 		retval = -1;
2214 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2215 		    reply_qid);
2216 
2217 		return retval;
2218 	}
2219 
2220 	reply_qid = qidx + 1;
2221 
2222 	if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2223 		if (mrioc->pdev->revision)
2224 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2225 		else
2226 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2227 	} else
2228 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2229 
2230 	op_reply_q->ci = 0;
2231 	op_reply_q->ephase = 1;
2232 	atomic_set(&op_reply_q->pend_ios, 0);
2233 	atomic_set(&op_reply_q->in_use, 0);
2234 	op_reply_q->enable_irq_poll = false;
2235 	op_reply_q->qfull_watermark =
2236 		op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2237 
2238 	if (!op_reply_q->q_segments) {
2239 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2240 		if (retval) {
2241 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2242 			goto out;
2243 		}
2244 	}
2245 
2246 	memset(&create_req, 0, sizeof(create_req));
2247 	mutex_lock(&mrioc->init_cmds.mutex);
2248 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2249 		retval = -1;
2250 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2251 		goto out_unlock;
2252 	}
2253 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2254 	mrioc->init_cmds.is_waiting = 1;
2255 	mrioc->init_cmds.callback = NULL;
2256 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2257 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2258 	create_req.queue_id = cpu_to_le16(reply_qid);
2259 
2260 	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2261 		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2262 	else
2263 		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2264 
2265 	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2266 		create_req.flags =
2267 			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2268 		create_req.msix_index =
2269 			cpu_to_le16(mrioc->intr_info[midx].msix_index);
2270 	} else {
2271 		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2272 		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2273 			reply_qid, midx);
2274 		if (!mrioc->active_poll_qcount)
2275 			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2276 			    mrioc->intr_info_count - 1));
2277 	}
2278 
2279 	if (mrioc->enable_segqueue) {
2280 		create_req.flags |=
2281 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2282 		create_req.base_address = cpu_to_le64(
2283 		    op_reply_q->q_segment_list_dma);
2284 	} else
2285 		create_req.base_address = cpu_to_le64(
2286 		    op_reply_q->q_segments[0].segment_dma);
2287 
2288 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
2289 
2290 	init_completion(&mrioc->init_cmds.done);
2291 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2292 	    sizeof(create_req), 1);
2293 	if (retval) {
2294 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2295 		goto out_unlock;
2296 	}
2297 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2298 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2299 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2300 		ioc_err(mrioc, "create reply queue timed out\n");
2301 		mpi3mr_check_rh_fault_ioc(mrioc,
2302 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2303 		retval = -1;
2304 		goto out_unlock;
2305 	}
2306 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2307 	    != MPI3_IOCSTATUS_SUCCESS) {
2308 		ioc_err(mrioc,
2309 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2310 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2311 		    mrioc->init_cmds.ioc_loginfo);
2312 		retval = -1;
2313 		goto out_unlock;
2314 	}
2315 	op_reply_q->qid = reply_qid;
2316 	if (midx < mrioc->intr_info_count)
2317 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
2318 
2319 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2320 	    mrioc->active_poll_qcount++;
2321 
2322 out_unlock:
2323 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2324 	mutex_unlock(&mrioc->init_cmds.mutex);
2325 out:
2326 
2327 	return retval;
2328 }
2329 
2330 /**
2331  * mpi3mr_create_op_req_q - create operational request queue
2332  * @mrioc: Adapter instance reference
2333  * @idx: operational request queue index
2334  * @reply_qid: Reply queue ID
2335  *
2336  * Create operatinal request queue by issuing MPI request
2337  * through admin queue.
2338  *
2339  * Return:  0 on success, non-zero on failure.
2340  */
2341 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2342 	u16 reply_qid)
2343 {
2344 	struct mpi3_create_request_queue_request create_req;
2345 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2346 	int retval = 0;
2347 	u16 req_qid = 0;
2348 
2349 	req_qid = op_req_q->qid;
2350 
2351 	if (req_qid) {
2352 		retval = -1;
2353 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2354 		    req_qid);
2355 
2356 		return retval;
2357 	}
2358 	req_qid = idx + 1;
2359 
2360 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2361 	op_req_q->ci = 0;
2362 	op_req_q->pi = 0;
2363 	op_req_q->reply_qid = reply_qid;
2364 	spin_lock_init(&op_req_q->q_lock);
2365 
2366 	if (!op_req_q->q_segments) {
2367 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2368 		if (retval) {
2369 			mpi3mr_free_op_req_q_segments(mrioc, idx);
2370 			goto out;
2371 		}
2372 	}
2373 
2374 	memset(&create_req, 0, sizeof(create_req));
2375 	mutex_lock(&mrioc->init_cmds.mutex);
2376 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2377 		retval = -1;
2378 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2379 		goto out_unlock;
2380 	}
2381 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2382 	mrioc->init_cmds.is_waiting = 1;
2383 	mrioc->init_cmds.callback = NULL;
2384 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2385 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2386 	create_req.queue_id = cpu_to_le16(req_qid);
2387 	if (mrioc->enable_segqueue) {
2388 		create_req.flags =
2389 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2390 		create_req.base_address = cpu_to_le64(
2391 		    op_req_q->q_segment_list_dma);
2392 	} else
2393 		create_req.base_address = cpu_to_le64(
2394 		    op_req_q->q_segments[0].segment_dma);
2395 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2396 	create_req.size = cpu_to_le16(op_req_q->num_requests);
2397 
2398 	init_completion(&mrioc->init_cmds.done);
2399 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2400 	    sizeof(create_req), 1);
2401 	if (retval) {
2402 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2403 		goto out_unlock;
2404 	}
2405 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2406 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2407 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2408 		ioc_err(mrioc, "create request queue timed out\n");
2409 		mpi3mr_check_rh_fault_ioc(mrioc,
2410 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2411 		retval = -1;
2412 		goto out_unlock;
2413 	}
2414 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2415 	    != MPI3_IOCSTATUS_SUCCESS) {
2416 		ioc_err(mrioc,
2417 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2418 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2419 		    mrioc->init_cmds.ioc_loginfo);
2420 		retval = -1;
2421 		goto out_unlock;
2422 	}
2423 	op_req_q->qid = req_qid;
2424 
2425 out_unlock:
2426 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2427 	mutex_unlock(&mrioc->init_cmds.mutex);
2428 out:
2429 
2430 	return retval;
2431 }
2432 
2433 /**
2434  * mpi3mr_create_op_queues - create operational queue pairs
2435  * @mrioc: Adapter instance reference
2436  *
2437  * Allocate memory for operational queue meta data and call
2438  * create request and reply queue functions.
2439  *
2440  * Return: 0 on success, non-zero on failures.
2441  */
2442 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2443 {
2444 	int retval = 0;
2445 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2446 	u32 ioc_status;
2447 	enum mpi3mr_iocstate ioc_state;
2448 
2449 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2450 	    mrioc->facts.max_op_req_q);
2451 
2452 	msix_count_op_q =
2453 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2454 	if (!mrioc->num_queues)
2455 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2456 	/*
2457 	 * During reset set the num_queues to the number of queues
2458 	 * that was set before the reset.
2459 	 */
2460 	num_queues = mrioc->num_op_reply_q ?
2461 	    mrioc->num_op_reply_q : mrioc->num_queues;
2462 	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2463 	    num_queues);
2464 
2465 	if (!mrioc->req_qinfo) {
2466 		mrioc->req_qinfo = kcalloc(num_queues,
2467 		    sizeof(struct op_req_qinfo), GFP_KERNEL);
2468 		if (!mrioc->req_qinfo) {
2469 			retval = -1;
2470 			goto out_failed;
2471 		}
2472 
2473 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2474 		    num_queues, GFP_KERNEL);
2475 		if (!mrioc->op_reply_qinfo) {
2476 			retval = -1;
2477 			goto out_failed;
2478 		}
2479 	}
2480 
2481 	if (mrioc->enable_segqueue)
2482 		ioc_info(mrioc,
2483 		    "allocating operational queues through segmented queues\n");
2484 
2485 	for (i = 0; i < num_queues; i++) {
2486 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2487 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2488 			break;
2489 		}
2490 		if (mpi3mr_create_op_req_q(mrioc, i,
2491 		    mrioc->op_reply_qinfo[i].qid)) {
2492 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2493 			mpi3mr_delete_op_reply_q(mrioc, i);
2494 			break;
2495 		}
2496 	}
2497 
2498 	if (i == 0) {
2499 		/* Not even one queue is created successfully*/
2500 		retval = -1;
2501 		goto out_failed;
2502 	}
2503 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2504 	ioc_state = mpi3mr_get_iocstate(mrioc);
2505 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2506 	    ioc_state != MRIOC_STATE_READY) {
2507 		mpi3mr_print_fault_info(mrioc);
2508 		retval = -1;
2509 		goto out_failed;
2510 	}
2511 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2512 	ioc_info(mrioc,
2513 	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2514 	    mrioc->num_op_reply_q, mrioc->default_qcount,
2515 	    mrioc->active_poll_qcount);
2516 
2517 	return retval;
2518 out_failed:
2519 	kfree(mrioc->req_qinfo);
2520 	mrioc->req_qinfo = NULL;
2521 
2522 	kfree(mrioc->op_reply_qinfo);
2523 	mrioc->op_reply_qinfo = NULL;
2524 
2525 	return retval;
2526 }
2527 
2528 /**
2529  * mpi3mr_op_request_post - Post request to operational queue
2530  * @mrioc: Adapter reference
2531  * @op_req_q: Operational request queue info
2532  * @req: MPI3 request
2533  *
2534  * Post the MPI3 request into operational request queue and
2535  * inform the controller, if the queue is full return
2536  * appropriate error.
2537  *
2538  * Return: 0 on success, non-zero on failure.
2539  */
2540 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2541 	struct op_req_qinfo *op_req_q, u8 *req)
2542 {
2543 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2544 	int retval = 0;
2545 	unsigned long flags;
2546 	u8 *req_entry;
2547 	void *segment_base_addr;
2548 	u16 req_sz = mrioc->facts.op_req_sz;
2549 	struct segments *segments = op_req_q->q_segments;
2550 	struct op_reply_qinfo *op_reply_q = NULL;
2551 
2552 	reply_qidx = op_req_q->reply_qid - 1;
2553 	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2554 
2555 	if (mrioc->unrecoverable)
2556 		return -EFAULT;
2557 
2558 	spin_lock_irqsave(&op_req_q->q_lock, flags);
2559 	pi = op_req_q->pi;
2560 	max_entries = op_req_q->num_requests;
2561 
2562 	if (mpi3mr_check_req_qfull(op_req_q)) {
2563 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2564 		    reply_qidx, mrioc->op_reply_q_offset);
2565 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2566 
2567 		if (mpi3mr_check_req_qfull(op_req_q)) {
2568 			retval = -EAGAIN;
2569 			goto out;
2570 		}
2571 	}
2572 
2573 	if (mrioc->reset_in_progress) {
2574 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2575 		retval = -EAGAIN;
2576 		goto out;
2577 	}
2578 	if (mrioc->pci_err_recovery) {
2579 		ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2580 		retval = -EAGAIN;
2581 		goto out;
2582 	}
2583 
2584 	/* Reply queue is nearing to get full, push back IOs to SML */
2585 	if ((mrioc->prevent_reply_qfull == true) &&
2586 		(atomic_read(&op_reply_q->pend_ios) >
2587 	     (op_reply_q->qfull_watermark))) {
2588 		atomic_inc(&mrioc->reply_qfull_count);
2589 		retval = -EAGAIN;
2590 		goto out;
2591 	}
2592 
2593 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2594 	req_entry = (u8 *)segment_base_addr +
2595 	    ((pi % op_req_q->segment_qd) * req_sz);
2596 
2597 	memset(req_entry, 0, req_sz);
2598 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2599 
2600 	if (++pi == max_entries)
2601 		pi = 0;
2602 	op_req_q->pi = pi;
2603 
2604 #ifndef CONFIG_PREEMPT_RT
2605 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2606 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2607 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2608 #else
2609 	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2610 #endif
2611 
2612 	writel(op_req_q->pi,
2613 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2614 
2615 out:
2616 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2617 	return retval;
2618 }
2619 
2620 /**
2621  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2622  * controller
2623  * @mrioc: Adapter instance reference
2624  * @reason_code: reason code for the fault.
2625  *
2626  * This routine will save snapdump and fault the controller with
2627  * the given reason code if it is not already in the fault or
2628  * not asynchronosuly reset. This will be used to handle
2629  * initilaization time faults/resets/timeout as in those cases
2630  * immediate soft reset invocation is not required.
2631  *
2632  * Return:  None.
2633  */
2634 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2635 {
2636 	u32 ioc_status, host_diagnostic, timeout;
2637 	union mpi3mr_trigger_data trigger_data;
2638 
2639 	if (mrioc->unrecoverable) {
2640 		ioc_err(mrioc, "controller is unrecoverable\n");
2641 		return;
2642 	}
2643 
2644 	if (!pci_device_is_present(mrioc->pdev)) {
2645 		mrioc->unrecoverable = 1;
2646 		ioc_err(mrioc, "controller is not present\n");
2647 		return;
2648 	}
2649 	memset(&trigger_data, 0, sizeof(trigger_data));
2650 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2651 
2652 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2653 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2654 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2655 		return;
2656 	} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2657 		trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2658 		      MPI3_SYSIF_FAULT_CODE_MASK);
2659 
2660 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2661 		    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2662 		mpi3mr_print_fault_info(mrioc);
2663 		mpi3mr_save_fault_info(mrioc);
2664 		mrioc->fault_during_init = 1;
2665 		mrioc->fwfault_counter++;
2666 		return;
2667 	}
2668 
2669 	mpi3mr_set_diagsave(mrioc);
2670 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2671 	    reason_code);
2672 	trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2673 		      MPI3_SYSIF_FAULT_CODE_MASK);
2674 	mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2675 	    &trigger_data, 0);
2676 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2677 	do {
2678 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2679 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2680 			break;
2681 		msleep(100);
2682 	} while (--timeout);
2683 
2684 	mpi3mr_save_fault_info(mrioc);
2685 	mrioc->fault_during_init = 1;
2686 	mrioc->fwfault_counter++;
2687 }
2688 
2689 /**
2690  * mpi3mr_sync_timestamp - Issue time stamp sync request
2691  * @mrioc: Adapter reference
2692  *
2693  * Issue IO unit control MPI request to synchornize firmware
2694  * timestamp with host time.
2695  *
2696  * Return: 0 on success, non-zero on failure.
2697  */
2698 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2699 {
2700 	ktime_t current_time;
2701 	struct mpi3_iounit_control_request iou_ctrl;
2702 	int retval = 0;
2703 
2704 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2705 	mutex_lock(&mrioc->init_cmds.mutex);
2706 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2707 		retval = -1;
2708 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2709 		mutex_unlock(&mrioc->init_cmds.mutex);
2710 		goto out;
2711 	}
2712 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2713 	mrioc->init_cmds.is_waiting = 1;
2714 	mrioc->init_cmds.callback = NULL;
2715 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2716 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2717 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2718 	current_time = ktime_get_real();
2719 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2720 
2721 	init_completion(&mrioc->init_cmds.done);
2722 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2723 	    sizeof(iou_ctrl), 0);
2724 	if (retval) {
2725 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2726 		goto out_unlock;
2727 	}
2728 
2729 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2730 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2731 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2732 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2733 		mrioc->init_cmds.is_waiting = 0;
2734 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2735 			mpi3mr_check_rh_fault_ioc(mrioc,
2736 			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2737 		retval = -1;
2738 		goto out_unlock;
2739 	}
2740 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2741 	    != MPI3_IOCSTATUS_SUCCESS) {
2742 		ioc_err(mrioc,
2743 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2744 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2745 		    mrioc->init_cmds.ioc_loginfo);
2746 		retval = -1;
2747 		goto out_unlock;
2748 	}
2749 
2750 out_unlock:
2751 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2752 	mutex_unlock(&mrioc->init_cmds.mutex);
2753 
2754 out:
2755 	return retval;
2756 }
2757 
2758 /**
2759  * mpi3mr_print_pkg_ver - display controller fw package version
2760  * @mrioc: Adapter reference
2761  *
2762  * Retrieve firmware package version from the component image
2763  * header of the controller flash and display it.
2764  *
2765  * Return: 0 on success and non-zero on failure.
2766  */
2767 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2768 {
2769 	struct mpi3_ci_upload_request ci_upload;
2770 	int retval = -1;
2771 	void *data = NULL;
2772 	dma_addr_t data_dma;
2773 	struct mpi3_ci_manifest_mpi *manifest;
2774 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2775 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2776 
2777 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2778 	    GFP_KERNEL);
2779 	if (!data)
2780 		return -ENOMEM;
2781 
2782 	memset(&ci_upload, 0, sizeof(ci_upload));
2783 	mutex_lock(&mrioc->init_cmds.mutex);
2784 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2785 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2786 		mutex_unlock(&mrioc->init_cmds.mutex);
2787 		goto out;
2788 	}
2789 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2790 	mrioc->init_cmds.is_waiting = 1;
2791 	mrioc->init_cmds.callback = NULL;
2792 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2793 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2794 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2795 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2796 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2797 	ci_upload.segment_size = cpu_to_le32(data_len);
2798 
2799 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2800 	    data_dma);
2801 	init_completion(&mrioc->init_cmds.done);
2802 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2803 	    sizeof(ci_upload), 1);
2804 	if (retval) {
2805 		ioc_err(mrioc, "posting get package version failed\n");
2806 		goto out_unlock;
2807 	}
2808 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2809 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2810 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2811 		ioc_err(mrioc, "get package version timed out\n");
2812 		mpi3mr_check_rh_fault_ioc(mrioc,
2813 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2814 		retval = -1;
2815 		goto out_unlock;
2816 	}
2817 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2818 	    == MPI3_IOCSTATUS_SUCCESS) {
2819 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2820 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2821 			ioc_info(mrioc,
2822 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2823 			    manifest->package_version.gen_major,
2824 			    manifest->package_version.gen_minor,
2825 			    manifest->package_version.phase_major,
2826 			    manifest->package_version.phase_minor,
2827 			    manifest->package_version.customer_id,
2828 			    manifest->package_version.build_num);
2829 		}
2830 	}
2831 	retval = 0;
2832 out_unlock:
2833 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2834 	mutex_unlock(&mrioc->init_cmds.mutex);
2835 
2836 out:
2837 	if (data)
2838 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2839 		    data_dma);
2840 	return retval;
2841 }
2842 
2843 /**
2844  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2845  * @work: work struct
2846  *
2847  * Watch dog work periodically executed (1 second interval) to
2848  * monitor firmware fault and to issue periodic timer sync to
2849  * the firmware.
2850  *
2851  * Return: Nothing.
2852  */
2853 static void mpi3mr_watchdog_work(struct work_struct *work)
2854 {
2855 	struct mpi3mr_ioc *mrioc =
2856 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2857 	unsigned long flags;
2858 	enum mpi3mr_iocstate ioc_state;
2859 	u32 host_diagnostic, ioc_status;
2860 	union mpi3mr_trigger_data trigger_data;
2861 	u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2862 
2863 	if (mrioc->fault_during_init) {
2864 		mpi3mr_fault_uevent_emit(mrioc);
2865 		mrioc->fault_during_init = 0;
2866 	}
2867 
2868 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2869 		return;
2870 
2871 	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2872 		ioc_err(mrioc, "watchdog could not detect the controller\n");
2873 		mrioc->unrecoverable = 1;
2874 	}
2875 
2876 	if (mrioc->unrecoverable) {
2877 		ioc_err(mrioc,
2878 		    "flush pending commands for unrecoverable controller\n");
2879 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2880 		return;
2881 	}
2882 
2883 	if (atomic_read(&mrioc->admin_pend_isr)) {
2884 		ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2885 				"flush admin replies\n");
2886 		mpi3mr_process_admin_reply_q(mrioc);
2887 	}
2888 
2889 	if (!(mrioc->facts.ioc_capabilities &
2890 		MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2891 		(mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2892 
2893 		mrioc->ts_update_counter = 0;
2894 		mpi3mr_sync_timestamp(mrioc);
2895 	}
2896 
2897 	if ((mrioc->prepare_for_reset) &&
2898 	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2899 	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2900 		mpi3mr_soft_reset_handler(mrioc,
2901 		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2902 		return;
2903 	}
2904 
2905 	memset(&trigger_data, 0, sizeof(trigger_data));
2906 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2907 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2908 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2909 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2910 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2911 		return;
2912 	}
2913 
2914 	/*Check for fault state every one second and issue Soft reset*/
2915 	ioc_state = mpi3mr_get_iocstate(mrioc);
2916 	if (ioc_state != MRIOC_STATE_FAULT)
2917 		goto schedule_work;
2918 
2919 	trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2920 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2921 	    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2922 	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2923 	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2924 		if (!mrioc->diagsave_timeout) {
2925 			mpi3mr_print_fault_info(mrioc);
2926 			ioc_warn(mrioc, "diag save in progress\n");
2927 		}
2928 		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2929 			goto schedule_work;
2930 	}
2931 
2932 	mpi3mr_print_fault_info(mrioc);
2933 	mrioc->diagsave_timeout = 0;
2934 
2935 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
2936 		mrioc->unrecoverable = 1;
2937 		goto schedule_work;
2938 	}
2939 
2940 	mpi3mr_save_fault_info(mrioc);
2941 	mpi3mr_fault_uevent_emit(mrioc);
2942 	mrioc->fwfault_counter++;
2943 
2944 	switch (trigger_data.fault) {
2945 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2946 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2947 		ioc_warn(mrioc,
2948 		    "controller requires system power cycle, marking controller as unrecoverable\n");
2949 		mrioc->unrecoverable = 1;
2950 		goto schedule_work;
2951 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2952 		goto schedule_work;
2953 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2954 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2955 		break;
2956 	default:
2957 		break;
2958 	}
2959 	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2960 	return;
2961 
2962 schedule_work:
2963 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2964 	if (mrioc->watchdog_work_q)
2965 		queue_delayed_work(mrioc->watchdog_work_q,
2966 		    &mrioc->watchdog_work,
2967 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2968 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2969 	return;
2970 }
2971 
2972 /**
2973  * mpi3mr_start_watchdog - Start watchdog
2974  * @mrioc: Adapter instance reference
2975  *
2976  * Create and start the watchdog thread to monitor controller
2977  * faults.
2978  *
2979  * Return: Nothing.
2980  */
2981 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2982 {
2983 	if (mrioc->watchdog_work_q)
2984 		return;
2985 
2986 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2987 	mrioc->watchdog_work_q = alloc_ordered_workqueue(
2988 		"watchdog_%s%d", WQ_MEM_RECLAIM, mrioc->name, mrioc->id);
2989 	if (!mrioc->watchdog_work_q) {
2990 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2991 		return;
2992 	}
2993 
2994 	if (mrioc->watchdog_work_q)
2995 		queue_delayed_work(mrioc->watchdog_work_q,
2996 		    &mrioc->watchdog_work,
2997 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2998 }
2999 
3000 /**
3001  * mpi3mr_stop_watchdog - Stop watchdog
3002  * @mrioc: Adapter instance reference
3003  *
3004  * Stop the watchdog thread created to monitor controller
3005  * faults.
3006  *
3007  * Return: Nothing.
3008  */
3009 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
3010 {
3011 	unsigned long flags;
3012 	struct workqueue_struct *wq;
3013 
3014 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
3015 	wq = mrioc->watchdog_work_q;
3016 	mrioc->watchdog_work_q = NULL;
3017 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
3018 	if (wq) {
3019 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
3020 			flush_workqueue(wq);
3021 		destroy_workqueue(wq);
3022 	}
3023 }
3024 
3025 /**
3026  * mpi3mr_setup_admin_qpair - Setup admin queue pair
3027  * @mrioc: Adapter instance reference
3028  *
3029  * Allocate memory for admin queue pair if required and register
3030  * the admin queue with the controller.
3031  *
3032  * Return: 0 on success, non-zero on failures.
3033  */
3034 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
3035 {
3036 	int retval = 0;
3037 	u32 num_admin_entries = 0;
3038 
3039 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
3040 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
3041 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
3042 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
3043 
3044 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
3045 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
3046 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
3047 	mrioc->admin_reply_ci = 0;
3048 	mrioc->admin_reply_ephase = 1;
3049 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
3050 	atomic_set(&mrioc->admin_pend_isr, 0);
3051 
3052 	if (!mrioc->admin_req_base) {
3053 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
3054 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
3055 
3056 		if (!mrioc->admin_req_base) {
3057 			retval = -1;
3058 			goto out_failed;
3059 		}
3060 
3061 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
3062 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
3063 		    GFP_KERNEL);
3064 
3065 		if (!mrioc->admin_reply_base) {
3066 			retval = -1;
3067 			goto out_failed;
3068 		}
3069 	}
3070 
3071 	num_admin_entries = (mrioc->num_admin_replies << 16) |
3072 	    (mrioc->num_admin_req);
3073 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
3074 	mpi3mr_writeq(mrioc->admin_req_dma,
3075 		&mrioc->sysif_regs->admin_request_queue_address,
3076 		&mrioc->adm_req_q_bar_writeq_lock);
3077 	mpi3mr_writeq(mrioc->admin_reply_dma,
3078 		&mrioc->sysif_regs->admin_reply_queue_address,
3079 		&mrioc->adm_reply_q_bar_writeq_lock);
3080 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
3081 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
3082 	return retval;
3083 
3084 out_failed:
3085 
3086 	if (mrioc->admin_reply_base) {
3087 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3088 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3089 		mrioc->admin_reply_base = NULL;
3090 	}
3091 	if (mrioc->admin_req_base) {
3092 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3093 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3094 		mrioc->admin_req_base = NULL;
3095 	}
3096 	return retval;
3097 }
3098 
3099 /**
3100  * mpi3mr_issue_iocfacts - Send IOC Facts
3101  * @mrioc: Adapter instance reference
3102  * @facts_data: Cached IOC facts data
3103  *
3104  * Issue IOC Facts MPI request through admin queue and wait for
3105  * the completion of it or time out.
3106  *
3107  * Return: 0 on success, non-zero on failures.
3108  */
3109 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
3110 	struct mpi3_ioc_facts_data *facts_data)
3111 {
3112 	struct mpi3_ioc_facts_request iocfacts_req;
3113 	void *data = NULL;
3114 	dma_addr_t data_dma;
3115 	u32 data_len = sizeof(*facts_data);
3116 	int retval = 0;
3117 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
3118 
3119 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3120 	    GFP_KERNEL);
3121 
3122 	if (!data) {
3123 		retval = -1;
3124 		goto out;
3125 	}
3126 
3127 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3128 	mutex_lock(&mrioc->init_cmds.mutex);
3129 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3130 		retval = -1;
3131 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3132 		mutex_unlock(&mrioc->init_cmds.mutex);
3133 		goto out;
3134 	}
3135 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3136 	mrioc->init_cmds.is_waiting = 1;
3137 	mrioc->init_cmds.callback = NULL;
3138 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3139 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3140 
3141 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3142 	    data_dma);
3143 
3144 	init_completion(&mrioc->init_cmds.done);
3145 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3146 	    sizeof(iocfacts_req), 1);
3147 	if (retval) {
3148 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3149 		goto out_unlock;
3150 	}
3151 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3152 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3153 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3154 		ioc_err(mrioc, "ioc_facts timed out\n");
3155 		mpi3mr_check_rh_fault_ioc(mrioc,
3156 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3157 		retval = -1;
3158 		goto out_unlock;
3159 	}
3160 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3161 	    != MPI3_IOCSTATUS_SUCCESS) {
3162 		ioc_err(mrioc,
3163 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3164 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3165 		    mrioc->init_cmds.ioc_loginfo);
3166 		retval = -1;
3167 		goto out_unlock;
3168 	}
3169 	memcpy(facts_data, (u8 *)data, data_len);
3170 	mpi3mr_process_factsdata(mrioc, facts_data);
3171 out_unlock:
3172 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3173 	mutex_unlock(&mrioc->init_cmds.mutex);
3174 
3175 out:
3176 	if (data)
3177 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3178 
3179 	return retval;
3180 }
3181 
3182 /**
3183  * mpi3mr_check_reset_dma_mask - Process IOC facts data
3184  * @mrioc: Adapter instance reference
3185  *
3186  * Check whether the new DMA mask requested through IOCFacts by
3187  * firmware needs to be set, if so set it .
3188  *
3189  * Return: 0 on success, non-zero on failure.
3190  */
3191 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3192 {
3193 	struct pci_dev *pdev = mrioc->pdev;
3194 	int r;
3195 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3196 
3197 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3198 		return 0;
3199 
3200 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3201 	    mrioc->dma_mask, facts_dma_mask);
3202 
3203 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3204 	if (r) {
3205 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3206 		    facts_dma_mask, r);
3207 		return r;
3208 	}
3209 	mrioc->dma_mask = facts_dma_mask;
3210 	return r;
3211 }
3212 
3213 /**
3214  * mpi3mr_process_factsdata - Process IOC facts data
3215  * @mrioc: Adapter instance reference
3216  * @facts_data: Cached IOC facts data
3217  *
3218  * Convert IOC facts data into cpu endianness and cache it in
3219  * the driver .
3220  *
3221  * Return: Nothing.
3222  */
3223 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3224 	struct mpi3_ioc_facts_data *facts_data)
3225 {
3226 	u32 ioc_config, req_sz, facts_flags;
3227 
3228 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3229 	    (sizeof(*facts_data) / 4)) {
3230 		ioc_warn(mrioc,
3231 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3232 		    sizeof(*facts_data),
3233 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3234 	}
3235 
3236 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3237 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3238 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3239 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3240 		ioc_err(mrioc,
3241 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3242 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3243 	}
3244 
3245 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3246 
3247 	facts_flags = le32_to_cpu(facts_data->flags);
3248 	mrioc->facts.op_req_sz = req_sz;
3249 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3250 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3251 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3252 
3253 	mrioc->facts.ioc_num = facts_data->ioc_number;
3254 	mrioc->facts.who_init = facts_data->who_init;
3255 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3256 	mrioc->facts.personality = (facts_flags &
3257 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3258 	mrioc->facts.dma_mask = (facts_flags &
3259 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3260 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3261 	mrioc->facts.dma_mask = (facts_flags &
3262 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3263 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3264 	mrioc->facts.max_req_limit = (facts_flags &
3265 			MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT);
3266 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
3267 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3268 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3269 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3270 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3271 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3272 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3273 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3274 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3275 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3276 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3277 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3278 	mrioc->facts.max_pcie_switches =
3279 	    le16_to_cpu(facts_data->max_pcie_switches);
3280 	mrioc->facts.max_sasexpanders =
3281 	    le16_to_cpu(facts_data->max_sas_expanders);
3282 	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3283 	mrioc->facts.max_sasinitiators =
3284 	    le16_to_cpu(facts_data->max_sas_initiators);
3285 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3286 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3287 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3288 	mrioc->facts.max_op_req_q =
3289 	    le16_to_cpu(facts_data->max_operational_request_queues);
3290 	mrioc->facts.max_op_reply_q =
3291 	    le16_to_cpu(facts_data->max_operational_reply_queues);
3292 	mrioc->facts.ioc_capabilities =
3293 	    le32_to_cpu(facts_data->ioc_capabilities);
3294 	mrioc->facts.fw_ver.build_num =
3295 	    le16_to_cpu(facts_data->fw_version.build_num);
3296 	mrioc->facts.fw_ver.cust_id =
3297 	    le16_to_cpu(facts_data->fw_version.customer_id);
3298 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3299 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3300 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3301 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3302 	mrioc->msix_count = min_t(int, mrioc->msix_count,
3303 	    mrioc->facts.max_msix_vectors);
3304 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3305 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3306 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3307 	mrioc->facts.shutdown_timeout =
3308 	    le16_to_cpu(facts_data->shutdown_timeout);
3309 	mrioc->facts.diag_trace_sz =
3310 	    le32_to_cpu(facts_data->diag_trace_size);
3311 	mrioc->facts.diag_fw_sz =
3312 	    le32_to_cpu(facts_data->diag_fw_size);
3313 	mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3314 	mrioc->facts.max_dev_per_tg =
3315 	    facts_data->max_devices_per_throttle_group;
3316 	mrioc->facts.io_throttle_data_length =
3317 	    le16_to_cpu(facts_data->io_throttle_data_length);
3318 	mrioc->facts.max_io_throttle_group =
3319 	    le16_to_cpu(facts_data->max_io_throttle_group);
3320 	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3321 	mrioc->facts.io_throttle_high =
3322 	    le16_to_cpu(facts_data->io_throttle_high);
3323 
3324 	if (mrioc->facts.max_data_length ==
3325 	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3326 		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3327 	else
3328 		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3329 	/* Store in 512b block count */
3330 	if (mrioc->facts.io_throttle_data_length)
3331 		mrioc->io_throttle_data_length =
3332 		    (mrioc->facts.io_throttle_data_length * 2 * 4);
3333 	else
3334 		/* set the length to 1MB + 1K to disable throttle */
3335 		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3336 
3337 	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3338 	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3339 
3340 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3341 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3342 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3343 	ioc_info(mrioc,
3344 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3345 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3346 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3347 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3348 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3349 	    mrioc->facts.sge_mod_shift);
3350 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3351 	    mrioc->facts.dma_mask, (facts_flags &
3352 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3353 	ioc_info(mrioc,
3354 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3355 	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3356 	ioc_info(mrioc,
3357 	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3358 	   mrioc->facts.io_throttle_data_length * 4,
3359 	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3360 }
3361 
3362 /**
3363  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3364  * @mrioc: Adapter instance reference
3365  *
3366  * Allocate and initialize the reply free buffers, sense
3367  * buffers, reply free queue and sense buffer queue.
3368  *
3369  * Return: 0 on success, non-zero on failures.
3370  */
3371 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3372 {
3373 	int retval = 0;
3374 	u32 sz, i;
3375 
3376 	if (mrioc->init_cmds.reply)
3377 		return retval;
3378 
3379 	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3380 	if (!mrioc->init_cmds.reply)
3381 		goto out_failed;
3382 
3383 	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3384 	if (!mrioc->bsg_cmds.reply)
3385 		goto out_failed;
3386 
3387 	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3388 	if (!mrioc->transport_cmds.reply)
3389 		goto out_failed;
3390 
3391 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3392 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3393 		    GFP_KERNEL);
3394 		if (!mrioc->dev_rmhs_cmds[i].reply)
3395 			goto out_failed;
3396 	}
3397 
3398 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3399 		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3400 		    GFP_KERNEL);
3401 		if (!mrioc->evtack_cmds[i].reply)
3402 			goto out_failed;
3403 	}
3404 
3405 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3406 	if (!mrioc->host_tm_cmds.reply)
3407 		goto out_failed;
3408 
3409 	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3410 	if (!mrioc->pel_cmds.reply)
3411 		goto out_failed;
3412 
3413 	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3414 	if (!mrioc->pel_abort_cmd.reply)
3415 		goto out_failed;
3416 
3417 	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3418 	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3419 						 GFP_KERNEL);
3420 	if (!mrioc->removepend_bitmap)
3421 		goto out_failed;
3422 
3423 	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3424 	if (!mrioc->devrem_bitmap)
3425 		goto out_failed;
3426 
3427 	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3428 						  GFP_KERNEL);
3429 	if (!mrioc->evtack_cmds_bitmap)
3430 		goto out_failed;
3431 
3432 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3433 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3434 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3435 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3436 
3437 	/* reply buffer pool, 16 byte align */
3438 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3439 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3440 	    &mrioc->pdev->dev, sz, 16, 0);
3441 	if (!mrioc->reply_buf_pool) {
3442 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3443 		goto out_failed;
3444 	}
3445 
3446 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3447 	    &mrioc->reply_buf_dma);
3448 	if (!mrioc->reply_buf)
3449 		goto out_failed;
3450 
3451 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3452 
3453 	/* reply free queue, 8 byte align */
3454 	sz = mrioc->reply_free_qsz * 8;
3455 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3456 	    &mrioc->pdev->dev, sz, 8, 0);
3457 	if (!mrioc->reply_free_q_pool) {
3458 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3459 		goto out_failed;
3460 	}
3461 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3462 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3463 	if (!mrioc->reply_free_q)
3464 		goto out_failed;
3465 
3466 	/* sense buffer pool,  4 byte align */
3467 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3468 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3469 	    &mrioc->pdev->dev, sz, 4, 0);
3470 	if (!mrioc->sense_buf_pool) {
3471 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3472 		goto out_failed;
3473 	}
3474 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3475 	    &mrioc->sense_buf_dma);
3476 	if (!mrioc->sense_buf)
3477 		goto out_failed;
3478 
3479 	/* sense buffer queue, 8 byte align */
3480 	sz = mrioc->sense_buf_q_sz * 8;
3481 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3482 	    &mrioc->pdev->dev, sz, 8, 0);
3483 	if (!mrioc->sense_buf_q_pool) {
3484 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3485 		goto out_failed;
3486 	}
3487 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3488 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3489 	if (!mrioc->sense_buf_q)
3490 		goto out_failed;
3491 
3492 	return retval;
3493 
3494 out_failed:
3495 	retval = -1;
3496 	return retval;
3497 }
3498 
3499 /**
3500  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3501  * buffers
3502  * @mrioc: Adapter instance reference
3503  *
3504  * Helper function to initialize reply and sense buffers along
3505  * with some debug prints.
3506  *
3507  * Return:  None.
3508  */
3509 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3510 {
3511 	u32 sz, i;
3512 	dma_addr_t phy_addr;
3513 
3514 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3515 	ioc_info(mrioc,
3516 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3517 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3518 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3519 	sz = mrioc->reply_free_qsz * 8;
3520 	ioc_info(mrioc,
3521 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3522 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3523 	    (unsigned long long)mrioc->reply_free_q_dma);
3524 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3525 	ioc_info(mrioc,
3526 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3527 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3528 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3529 	sz = mrioc->sense_buf_q_sz * 8;
3530 	ioc_info(mrioc,
3531 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3532 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3533 	    (unsigned long long)mrioc->sense_buf_q_dma);
3534 
3535 	/* initialize Reply buffer Queue */
3536 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3537 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3538 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3539 	mrioc->reply_free_q[i] = cpu_to_le64(0);
3540 
3541 	/* initialize Sense Buffer Queue */
3542 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3543 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3544 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3545 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3546 }
3547 
3548 /**
3549  * mpi3mr_issue_iocinit - Send IOC Init
3550  * @mrioc: Adapter instance reference
3551  *
3552  * Issue IOC Init MPI request through admin queue and wait for
3553  * the completion of it or time out.
3554  *
3555  * Return: 0 on success, non-zero on failures.
3556  */
3557 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3558 {
3559 	struct mpi3_ioc_init_request iocinit_req;
3560 	struct mpi3_driver_info_layout *drv_info;
3561 	dma_addr_t data_dma;
3562 	u32 data_len = sizeof(*drv_info);
3563 	int retval = 0;
3564 	ktime_t current_time;
3565 
3566 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3567 	    GFP_KERNEL);
3568 	if (!drv_info) {
3569 		retval = -1;
3570 		goto out;
3571 	}
3572 	mpimr_initialize_reply_sbuf_queues(mrioc);
3573 
3574 	drv_info->information_length = cpu_to_le32(data_len);
3575 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3576 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3577 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3578 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3579 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3580 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3581 	    sizeof(drv_info->driver_release_date));
3582 	drv_info->driver_capabilities = 0;
3583 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3584 	    sizeof(mrioc->driver_info));
3585 
3586 	memset(&iocinit_req, 0, sizeof(iocinit_req));
3587 	mutex_lock(&mrioc->init_cmds.mutex);
3588 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3589 		retval = -1;
3590 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3591 		mutex_unlock(&mrioc->init_cmds.mutex);
3592 		goto out;
3593 	}
3594 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3595 	mrioc->init_cmds.is_waiting = 1;
3596 	mrioc->init_cmds.callback = NULL;
3597 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3598 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3599 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3600 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3601 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3602 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3603 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3604 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3605 	iocinit_req.reply_free_queue_address =
3606 	    cpu_to_le64(mrioc->reply_free_q_dma);
3607 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3608 	iocinit_req.sense_buffer_free_queue_depth =
3609 	    cpu_to_le16(mrioc->sense_buf_q_sz);
3610 	iocinit_req.sense_buffer_free_queue_address =
3611 	    cpu_to_le64(mrioc->sense_buf_q_dma);
3612 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3613 
3614 	current_time = ktime_get_real();
3615 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3616 
3617 	iocinit_req.msg_flags |=
3618 	    MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3619 	iocinit_req.msg_flags |=
3620 		MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3621 
3622 	init_completion(&mrioc->init_cmds.done);
3623 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3624 	    sizeof(iocinit_req), 1);
3625 	if (retval) {
3626 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3627 		goto out_unlock;
3628 	}
3629 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3630 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3631 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3632 		mpi3mr_check_rh_fault_ioc(mrioc,
3633 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3634 		ioc_err(mrioc, "ioc_init timed out\n");
3635 		retval = -1;
3636 		goto out_unlock;
3637 	}
3638 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3639 	    != MPI3_IOCSTATUS_SUCCESS) {
3640 		ioc_err(mrioc,
3641 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3642 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3643 		    mrioc->init_cmds.ioc_loginfo);
3644 		retval = -1;
3645 		goto out_unlock;
3646 	}
3647 
3648 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3649 	writel(mrioc->reply_free_queue_host_index,
3650 	    &mrioc->sysif_regs->reply_free_host_index);
3651 
3652 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3653 	writel(mrioc->sbq_host_index,
3654 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3655 out_unlock:
3656 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3657 	mutex_unlock(&mrioc->init_cmds.mutex);
3658 
3659 out:
3660 	if (drv_info)
3661 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3662 		    data_dma);
3663 
3664 	return retval;
3665 }
3666 
3667 /**
3668  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3669  * @mrioc: Adapter instance reference
3670  * @event: MPI event ID
3671  *
3672  * Un mask the specific event by resetting the event_mask
3673  * bitmap.
3674  *
3675  * Return: 0 on success, non-zero on failures.
3676  */
3677 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3678 {
3679 	u32 desired_event;
3680 	u8 word;
3681 
3682 	if (event >= 128)
3683 		return;
3684 
3685 	desired_event = (1 << (event % 32));
3686 	word = event / 32;
3687 
3688 	mrioc->event_masks[word] &= ~desired_event;
3689 }
3690 
3691 /**
3692  * mpi3mr_issue_event_notification - Send event notification
3693  * @mrioc: Adapter instance reference
3694  *
3695  * Issue event notification MPI request through admin queue and
3696  * wait for the completion of it or time out.
3697  *
3698  * Return: 0 on success, non-zero on failures.
3699  */
3700 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3701 {
3702 	struct mpi3_event_notification_request evtnotify_req;
3703 	int retval = 0;
3704 	u8 i;
3705 
3706 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3707 	mutex_lock(&mrioc->init_cmds.mutex);
3708 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3709 		retval = -1;
3710 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3711 		mutex_unlock(&mrioc->init_cmds.mutex);
3712 		goto out;
3713 	}
3714 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3715 	mrioc->init_cmds.is_waiting = 1;
3716 	mrioc->init_cmds.callback = NULL;
3717 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3718 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3719 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3720 		evtnotify_req.event_masks[i] =
3721 		    cpu_to_le32(mrioc->event_masks[i]);
3722 	init_completion(&mrioc->init_cmds.done);
3723 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3724 	    sizeof(evtnotify_req), 1);
3725 	if (retval) {
3726 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3727 		goto out_unlock;
3728 	}
3729 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3730 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3731 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3732 		ioc_err(mrioc, "event notification timed out\n");
3733 		mpi3mr_check_rh_fault_ioc(mrioc,
3734 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3735 		retval = -1;
3736 		goto out_unlock;
3737 	}
3738 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3739 	    != MPI3_IOCSTATUS_SUCCESS) {
3740 		ioc_err(mrioc,
3741 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3742 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3743 		    mrioc->init_cmds.ioc_loginfo);
3744 		retval = -1;
3745 		goto out_unlock;
3746 	}
3747 
3748 out_unlock:
3749 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3750 	mutex_unlock(&mrioc->init_cmds.mutex);
3751 out:
3752 	return retval;
3753 }
3754 
3755 /**
3756  * mpi3mr_process_event_ack - Process event acknowledgment
3757  * @mrioc: Adapter instance reference
3758  * @event: MPI3 event ID
3759  * @event_ctx: event context
3760  *
3761  * Send event acknowledgment through admin queue and wait for
3762  * it to complete.
3763  *
3764  * Return: 0 on success, non-zero on failures.
3765  */
3766 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3767 	u32 event_ctx)
3768 {
3769 	struct mpi3_event_ack_request evtack_req;
3770 	int retval = 0;
3771 
3772 	memset(&evtack_req, 0, sizeof(evtack_req));
3773 	mutex_lock(&mrioc->init_cmds.mutex);
3774 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3775 		retval = -1;
3776 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3777 		mutex_unlock(&mrioc->init_cmds.mutex);
3778 		goto out;
3779 	}
3780 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3781 	mrioc->init_cmds.is_waiting = 1;
3782 	mrioc->init_cmds.callback = NULL;
3783 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3784 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3785 	evtack_req.event = event;
3786 	evtack_req.event_context = cpu_to_le32(event_ctx);
3787 
3788 	init_completion(&mrioc->init_cmds.done);
3789 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3790 	    sizeof(evtack_req), 1);
3791 	if (retval) {
3792 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3793 		goto out_unlock;
3794 	}
3795 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3796 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3797 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3798 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3799 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3800 			mpi3mr_check_rh_fault_ioc(mrioc,
3801 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3802 		retval = -1;
3803 		goto out_unlock;
3804 	}
3805 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3806 	    != MPI3_IOCSTATUS_SUCCESS) {
3807 		ioc_err(mrioc,
3808 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3809 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3810 		    mrioc->init_cmds.ioc_loginfo);
3811 		retval = -1;
3812 		goto out_unlock;
3813 	}
3814 
3815 out_unlock:
3816 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3817 	mutex_unlock(&mrioc->init_cmds.mutex);
3818 out:
3819 	return retval;
3820 }
3821 
3822 /**
3823  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3824  * @mrioc: Adapter instance reference
3825  *
3826  * Allocate chain buffers and set a bitmap to indicate free
3827  * chain buffers. Chain buffers are used to pass the SGE
3828  * information along with MPI3 SCSI IO requests for host I/O.
3829  *
3830  * Return: 0 on success, non-zero on failure
3831  */
3832 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3833 {
3834 	int retval = 0;
3835 	u32 sz, i;
3836 	u16 num_chains;
3837 
3838 	if (mrioc->chain_sgl_list)
3839 		return retval;
3840 
3841 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3842 
3843 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3844 	    | SHOST_DIX_TYPE1_PROTECTION
3845 	    | SHOST_DIX_TYPE2_PROTECTION
3846 	    | SHOST_DIX_TYPE3_PROTECTION))
3847 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3848 
3849 	mrioc->chain_buf_count = num_chains;
3850 	sz = sizeof(struct chain_element) * num_chains;
3851 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3852 	if (!mrioc->chain_sgl_list)
3853 		goto out_failed;
3854 
3855 	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3856 		MPI3MR_PAGE_SIZE_4K))
3857 		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3858 			MPI3MR_PAGE_SIZE_4K;
3859 	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3860 	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3861 			mrioc->max_sgl_entries, sz/1024);
3862 
3863 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3864 	    &mrioc->pdev->dev, sz, 16, 0);
3865 	if (!mrioc->chain_buf_pool) {
3866 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3867 		goto out_failed;
3868 	}
3869 
3870 	for (i = 0; i < num_chains; i++) {
3871 		mrioc->chain_sgl_list[i].addr =
3872 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3873 		    &mrioc->chain_sgl_list[i].dma_addr);
3874 
3875 		if (!mrioc->chain_sgl_list[i].addr)
3876 			goto out_failed;
3877 	}
3878 	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3879 	if (!mrioc->chain_bitmap)
3880 		goto out_failed;
3881 	return retval;
3882 out_failed:
3883 	retval = -1;
3884 	return retval;
3885 }
3886 
3887 /**
3888  * mpi3mr_port_enable_complete - Mark port enable complete
3889  * @mrioc: Adapter instance reference
3890  * @drv_cmd: Internal command tracker
3891  *
3892  * Call back for asynchronous port enable request sets the
3893  * driver command to indicate port enable request is complete.
3894  *
3895  * Return: Nothing
3896  */
3897 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3898 	struct mpi3mr_drv_cmd *drv_cmd)
3899 {
3900 	drv_cmd->callback = NULL;
3901 	mrioc->scan_started = 0;
3902 	if (drv_cmd->state & MPI3MR_CMD_RESET)
3903 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3904 	else
3905 		mrioc->scan_failed = drv_cmd->ioc_status;
3906 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3907 }
3908 
3909 /**
3910  * mpi3mr_issue_port_enable - Issue Port Enable
3911  * @mrioc: Adapter instance reference
3912  * @async: Flag to wait for completion or not
3913  *
3914  * Issue Port Enable MPI request through admin queue and if the
3915  * async flag is not set wait for the completion of the port
3916  * enable or time out.
3917  *
3918  * Return: 0 on success, non-zero on failures.
3919  */
3920 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3921 {
3922 	struct mpi3_port_enable_request pe_req;
3923 	int retval = 0;
3924 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3925 
3926 	memset(&pe_req, 0, sizeof(pe_req));
3927 	mutex_lock(&mrioc->init_cmds.mutex);
3928 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3929 		retval = -1;
3930 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3931 		mutex_unlock(&mrioc->init_cmds.mutex);
3932 		goto out;
3933 	}
3934 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3935 	if (async) {
3936 		mrioc->init_cmds.is_waiting = 0;
3937 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3938 	} else {
3939 		mrioc->init_cmds.is_waiting = 1;
3940 		mrioc->init_cmds.callback = NULL;
3941 		init_completion(&mrioc->init_cmds.done);
3942 	}
3943 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3944 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3945 
3946 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3947 	if (retval) {
3948 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3949 		goto out_unlock;
3950 	}
3951 	if (async) {
3952 		mutex_unlock(&mrioc->init_cmds.mutex);
3953 		goto out;
3954 	}
3955 
3956 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3957 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3958 		ioc_err(mrioc, "port enable timed out\n");
3959 		retval = -1;
3960 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3961 		goto out_unlock;
3962 	}
3963 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3964 
3965 out_unlock:
3966 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3967 	mutex_unlock(&mrioc->init_cmds.mutex);
3968 out:
3969 	return retval;
3970 }
3971 
3972 /* Protocol type to name mapper structure */
3973 static const struct {
3974 	u8 protocol;
3975 	char *name;
3976 } mpi3mr_protocols[] = {
3977 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3978 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3979 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3980 };
3981 
3982 /* Capability to name mapper structure*/
3983 static const struct {
3984 	u32 capability;
3985 	char *name;
3986 } mpi3mr_capabilities[] = {
3987 	{ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3988 	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3989 };
3990 
3991 /**
3992  * mpi3mr_repost_diag_bufs - repost host diag buffers
3993  * @mrioc: Adapter instance reference
3994  *
3995  * repost firmware and trace diag buffers based on global
3996  * trigger flag from driver page 2
3997  *
3998  * Return: 0 on success, non-zero on failures.
3999  */
4000 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
4001 {
4002 	u64 global_trigger;
4003 	union mpi3mr_trigger_data prev_trigger_data;
4004 	struct diag_buffer_desc *trace_hdb = NULL;
4005 	struct diag_buffer_desc *fw_hdb = NULL;
4006 	int retval = 0;
4007 	bool trace_repost_needed = false;
4008 	bool fw_repost_needed = false;
4009 	u8 prev_trigger_type;
4010 
4011 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4012 	if (retval)
4013 		return -1;
4014 
4015 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
4016 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
4017 
4018 	if (trace_hdb &&
4019 	    trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4020 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4021 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4022 		trace_repost_needed = true;
4023 
4024 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
4025 
4026 	if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4027 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4028 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4029 		fw_repost_needed = true;
4030 
4031 	if (trace_repost_needed || fw_repost_needed) {
4032 		global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
4033 		if (global_trigger &
4034 		      MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
4035 			trace_repost_needed = false;
4036 		if (global_trigger &
4037 		     MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
4038 			fw_repost_needed = false;
4039 	}
4040 
4041 	if (trace_repost_needed) {
4042 		prev_trigger_type = trace_hdb->trigger_type;
4043 		memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
4044 		    sizeof(trace_hdb->trigger_data));
4045 		retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
4046 		if (!retval) {
4047 			dprint_init(mrioc, "trace diag buffer reposted");
4048 			mpi3mr_set_trigger_data_in_hdb(trace_hdb,
4049 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4050 		} else {
4051 			trace_hdb->trigger_type = prev_trigger_type;
4052 			memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
4053 			    sizeof(prev_trigger_data));
4054 			ioc_err(mrioc, "trace diag buffer repost failed");
4055 			return -1;
4056 		}
4057 	}
4058 
4059 	if (fw_repost_needed) {
4060 		prev_trigger_type = fw_hdb->trigger_type;
4061 		memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
4062 		    sizeof(fw_hdb->trigger_data));
4063 		retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
4064 		if (!retval) {
4065 			dprint_init(mrioc, "firmware diag buffer reposted");
4066 			mpi3mr_set_trigger_data_in_hdb(fw_hdb,
4067 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4068 		} else {
4069 			fw_hdb->trigger_type = prev_trigger_type;
4070 			memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
4071 			    sizeof(prev_trigger_data));
4072 			ioc_err(mrioc, "firmware diag buffer repost failed");
4073 			return -1;
4074 		}
4075 	}
4076 	return retval;
4077 }
4078 
4079 /**
4080  * mpi3mr_read_tsu_interval - Update time stamp interval
4081  * @mrioc: Adapter instance reference
4082  *
4083  * Update time stamp interval if its defined in driver page 1,
4084  * otherwise use default value.
4085  *
4086  * Return: Nothing
4087  */
4088 static void
4089 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
4090 {
4091 	struct mpi3_driver_page1 driver_pg1;
4092 	u16 pg_sz = sizeof(driver_pg1);
4093 	int retval = 0;
4094 
4095 	mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
4096 
4097 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
4098 	if (!retval && driver_pg1.time_stamp_update)
4099 		mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
4100 }
4101 
4102 /**
4103  * mpi3mr_print_ioc_info - Display controller information
4104  * @mrioc: Adapter instance reference
4105  *
4106  * Display controller personality, capability, supported
4107  * protocols etc.
4108  *
4109  * Return: Nothing
4110  */
4111 static void
4112 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
4113 {
4114 	int i = 0, bytes_written = 0;
4115 	const char *personality;
4116 	char protocol[50] = {0};
4117 	char capabilities[100] = {0};
4118 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
4119 
4120 	switch (mrioc->facts.personality) {
4121 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
4122 		personality = "Enhanced HBA";
4123 		break;
4124 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
4125 		personality = "RAID";
4126 		break;
4127 	default:
4128 		personality = "Unknown";
4129 		break;
4130 	}
4131 
4132 	ioc_info(mrioc, "Running in %s Personality", personality);
4133 
4134 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4135 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4136 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
4137 
4138 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4139 		if (mrioc->facts.protocol_flags &
4140 		    mpi3mr_protocols[i].protocol) {
4141 			bytes_written += scnprintf(protocol + bytes_written,
4142 				    sizeof(protocol) - bytes_written, "%s%s",
4143 				    bytes_written ? "," : "",
4144 				    mpi3mr_protocols[i].name);
4145 		}
4146 	}
4147 
4148 	bytes_written = 0;
4149 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4150 		if (mrioc->facts.protocol_flags &
4151 		    mpi3mr_capabilities[i].capability) {
4152 			bytes_written += scnprintf(capabilities + bytes_written,
4153 				    sizeof(capabilities) - bytes_written, "%s%s",
4154 				    bytes_written ? "," : "",
4155 				    mpi3mr_capabilities[i].name);
4156 		}
4157 	}
4158 
4159 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4160 		 protocol, capabilities);
4161 }
4162 
4163 /**
4164  * mpi3mr_cleanup_resources - Free PCI resources
4165  * @mrioc: Adapter instance reference
4166  *
4167  * Unmap PCI device memory and disable PCI device.
4168  *
4169  * Return: 0 on success and non-zero on failure.
4170  */
4171 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4172 {
4173 	struct pci_dev *pdev = mrioc->pdev;
4174 
4175 	mpi3mr_cleanup_isr(mrioc);
4176 
4177 	if (mrioc->sysif_regs) {
4178 		iounmap((void __iomem *)mrioc->sysif_regs);
4179 		mrioc->sysif_regs = NULL;
4180 	}
4181 
4182 	if (pci_is_enabled(pdev)) {
4183 		if (mrioc->bars)
4184 			pci_release_selected_regions(pdev, mrioc->bars);
4185 		pci_disable_device(pdev);
4186 	}
4187 }
4188 
4189 /**
4190  * mpi3mr_setup_resources - Enable PCI resources
4191  * @mrioc: Adapter instance reference
4192  *
4193  * Enable PCI device memory, MSI-x registers and set DMA mask.
4194  *
4195  * Return: 0 on success and non-zero on failure.
4196  */
4197 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4198 {
4199 	struct pci_dev *pdev = mrioc->pdev;
4200 	u32 memap_sz = 0;
4201 	int i, retval = 0, capb = 0;
4202 	u16 message_control;
4203 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4204 	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4205 
4206 	if (pci_enable_device_mem(pdev)) {
4207 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4208 		retval = -ENODEV;
4209 		goto out_failed;
4210 	}
4211 
4212 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4213 	if (!capb) {
4214 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4215 		retval = -ENODEV;
4216 		goto out_failed;
4217 	}
4218 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4219 
4220 	if (pci_request_selected_regions(pdev, mrioc->bars,
4221 	    mrioc->driver_name)) {
4222 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4223 		retval = -ENODEV;
4224 		goto out_failed;
4225 	}
4226 
4227 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4228 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4229 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4230 			memap_sz = pci_resource_len(pdev, i);
4231 			mrioc->sysif_regs =
4232 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
4233 			break;
4234 		}
4235 	}
4236 
4237 	pci_set_master(pdev);
4238 
4239 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4240 	if (retval) {
4241 		if (dma_mask != DMA_BIT_MASK(32)) {
4242 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4243 			dma_mask = DMA_BIT_MASK(32);
4244 			retval = dma_set_mask_and_coherent(&pdev->dev,
4245 			    dma_mask);
4246 		}
4247 		if (retval) {
4248 			mrioc->dma_mask = 0;
4249 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4250 			goto out_failed;
4251 		}
4252 	}
4253 	mrioc->dma_mask = dma_mask;
4254 
4255 	if (!mrioc->sysif_regs) {
4256 		ioc_err(mrioc,
4257 		    "Unable to map adapter memory or resource not found\n");
4258 		retval = -EINVAL;
4259 		goto out_failed;
4260 	}
4261 
4262 	pci_read_config_word(pdev, capb + 2, &message_control);
4263 	mrioc->msix_count = (message_control & 0x3FF) + 1;
4264 
4265 	pci_save_state(pdev);
4266 
4267 	pci_set_drvdata(pdev, mrioc->shost);
4268 
4269 	mpi3mr_ioc_disable_intr(mrioc);
4270 
4271 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4272 	    (unsigned long long)mrioc->sysif_regs_phys,
4273 	    mrioc->sysif_regs, memap_sz);
4274 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4275 	    mrioc->msix_count);
4276 
4277 	if (!reset_devices && poll_queues > 0)
4278 		mrioc->requested_poll_qcount = min_t(int, poll_queues,
4279 				mrioc->msix_count - 2);
4280 	return retval;
4281 
4282 out_failed:
4283 	mpi3mr_cleanup_resources(mrioc);
4284 	return retval;
4285 }
4286 
4287 /**
4288  * mpi3mr_enable_events - Enable required events
4289  * @mrioc: Adapter instance reference
4290  *
4291  * This routine unmasks the events required by the driver by
4292  * sennding appropriate event mask bitmapt through an event
4293  * notification request.
4294  *
4295  * Return: 0 on success and non-zero on failure.
4296  */
4297 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4298 {
4299 	int retval = 0;
4300 	u32  i;
4301 
4302 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4303 		mrioc->event_masks[i] = -1;
4304 
4305 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4306 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4307 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4308 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4309 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4310 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4311 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4312 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4313 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4314 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4315 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4316 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4317 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4318 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4319 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4320 
4321 	retval = mpi3mr_issue_event_notification(mrioc);
4322 	if (retval)
4323 		ioc_err(mrioc, "failed to issue event notification %d\n",
4324 		    retval);
4325 	return retval;
4326 }
4327 
4328 /**
4329  * mpi3mr_init_ioc - Initialize the controller
4330  * @mrioc: Adapter instance reference
4331  *
4332  * This the controller initialization routine, executed either
4333  * after soft reset or from pci probe callback.
4334  * Setup the required resources, memory map the controller
4335  * registers, create admin and operational reply queue pairs,
4336  * allocate required memory for reply pool, sense buffer pool,
4337  * issue IOC init request to the firmware, unmask the events and
4338  * issue port enable to discover SAS/SATA/NVMe devies and RAID
4339  * volumes.
4340  *
4341  * Return: 0 on success and non-zero on failure.
4342  */
4343 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4344 {
4345 	int retval = 0;
4346 	u8 retry = 0;
4347 	struct mpi3_ioc_facts_data facts_data;
4348 	u32 sz;
4349 
4350 retry_init:
4351 	retval = mpi3mr_bring_ioc_ready(mrioc);
4352 	if (retval) {
4353 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4354 		    retval);
4355 		goto out_failed_noretry;
4356 	}
4357 
4358 	retval = mpi3mr_setup_isr(mrioc, 1);
4359 	if (retval) {
4360 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
4361 		    retval);
4362 		goto out_failed_noretry;
4363 	}
4364 
4365 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4366 	if (retval) {
4367 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4368 		    retval);
4369 		goto out_failed;
4370 	}
4371 
4372 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4373 	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4374 	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4375 	atomic_set(&mrioc->pend_large_data_sz, 0);
4376 
4377 	if (reset_devices)
4378 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4379 		    MPI3MR_HOST_IOS_KDUMP);
4380 
4381 	if (!(mrioc->facts.ioc_capabilities &
4382 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4383 		mrioc->sas_transport_enabled = 1;
4384 		mrioc->scsi_device_channel = 1;
4385 		mrioc->shost->max_channel = 1;
4386 		mrioc->shost->transportt = mpi3mr_transport_template;
4387 	}
4388 
4389 	if (mrioc->facts.max_req_limit)
4390 		mrioc->prevent_reply_qfull = true;
4391 
4392 	if (mrioc->facts.ioc_capabilities &
4393 		MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4394 		mrioc->seg_tb_support = true;
4395 
4396 	mrioc->reply_sz = mrioc->facts.reply_sz;
4397 
4398 	retval = mpi3mr_check_reset_dma_mask(mrioc);
4399 	if (retval) {
4400 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
4401 		    retval);
4402 		goto out_failed_noretry;
4403 	}
4404 
4405 	mpi3mr_read_tsu_interval(mrioc);
4406 	mpi3mr_print_ioc_info(mrioc);
4407 
4408 	dprint_init(mrioc, "allocating host diag buffers\n");
4409 	mpi3mr_alloc_diag_bufs(mrioc);
4410 
4411 	dprint_init(mrioc, "allocating ioctl dma buffers\n");
4412 	mpi3mr_alloc_ioctl_dma_memory(mrioc);
4413 
4414 	dprint_init(mrioc, "posting host diag buffers\n");
4415 	retval = mpi3mr_post_diag_bufs(mrioc);
4416 
4417 	if (retval)
4418 		ioc_warn(mrioc, "failed to post host diag buffers\n");
4419 
4420 	if (!mrioc->init_cmds.reply) {
4421 		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4422 		if (retval) {
4423 			ioc_err(mrioc,
4424 			    "%s :Failed to allocated reply sense buffers %d\n",
4425 			    __func__, retval);
4426 			goto out_failed_noretry;
4427 		}
4428 	}
4429 
4430 	if (!mrioc->chain_sgl_list) {
4431 		retval = mpi3mr_alloc_chain_bufs(mrioc);
4432 		if (retval) {
4433 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4434 			    retval);
4435 			goto out_failed_noretry;
4436 		}
4437 	}
4438 
4439 	retval = mpi3mr_issue_iocinit(mrioc);
4440 	if (retval) {
4441 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4442 		    retval);
4443 		goto out_failed;
4444 	}
4445 
4446 	retval = mpi3mr_print_pkg_ver(mrioc);
4447 	if (retval) {
4448 		ioc_err(mrioc, "failed to get package version\n");
4449 		goto out_failed;
4450 	}
4451 
4452 	retval = mpi3mr_setup_isr(mrioc, 0);
4453 	if (retval) {
4454 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4455 		    retval);
4456 		goto out_failed_noretry;
4457 	}
4458 
4459 	retval = mpi3mr_create_op_queues(mrioc);
4460 	if (retval) {
4461 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4462 		    retval);
4463 		goto out_failed;
4464 	}
4465 
4466 	if (!mrioc->pel_seqnum_virt) {
4467 		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4468 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4469 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4470 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4471 		    GFP_KERNEL);
4472 		if (!mrioc->pel_seqnum_virt) {
4473 			retval = -ENOMEM;
4474 			goto out_failed_noretry;
4475 		}
4476 	}
4477 
4478 	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4479 		dprint_init(mrioc, "allocating memory for throttle groups\n");
4480 		sz = sizeof(struct mpi3mr_throttle_group_info);
4481 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4482 		if (!mrioc->throttle_groups) {
4483 			retval = -1;
4484 			goto out_failed_noretry;
4485 		}
4486 	}
4487 
4488 	retval = mpi3mr_enable_events(mrioc);
4489 	if (retval) {
4490 		ioc_err(mrioc, "failed to enable events %d\n",
4491 		    retval);
4492 		goto out_failed;
4493 	}
4494 
4495 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4496 	if (retval) {
4497 		ioc_err(mrioc, "failed to refresh triggers\n");
4498 		goto out_failed;
4499 	}
4500 
4501 	ioc_info(mrioc, "controller initialization completed successfully\n");
4502 	return retval;
4503 out_failed:
4504 	if (retry < 2) {
4505 		retry++;
4506 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4507 		    retry);
4508 		mpi3mr_memset_buffers(mrioc);
4509 		goto retry_init;
4510 	}
4511 	retval = -1;
4512 out_failed_noretry:
4513 	ioc_err(mrioc, "controller initialization failed\n");
4514 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4515 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4516 	mrioc->unrecoverable = 1;
4517 	return retval;
4518 }
4519 
4520 /**
4521  * mpi3mr_reinit_ioc - Re-Initialize the controller
4522  * @mrioc: Adapter instance reference
4523  * @is_resume: Called from resume or reset path
4524  *
4525  * This the controller re-initialization routine, executed from
4526  * the soft reset handler or resume callback. Creates
4527  * operational reply queue pairs, allocate required memory for
4528  * reply pool, sense buffer pool, issue IOC init request to the
4529  * firmware, unmask the events and issue port enable to discover
4530  * SAS/SATA/NVMe devices and RAID volumes.
4531  *
4532  * Return: 0 on success and non-zero on failure.
4533  */
4534 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4535 {
4536 	int retval = 0;
4537 	u8 retry = 0;
4538 	struct mpi3_ioc_facts_data facts_data;
4539 	u32 pe_timeout, ioc_status;
4540 
4541 retry_init:
4542 	pe_timeout =
4543 	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4544 
4545 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
4546 	retval = mpi3mr_bring_ioc_ready(mrioc);
4547 	if (retval) {
4548 		ioc_err(mrioc, "failed to bring to ready state\n");
4549 		goto out_failed_noretry;
4550 	}
4551 
4552 	mrioc->io_admin_reset_sync = 0;
4553 	if (is_resume || mrioc->block_on_pci_err) {
4554 		dprint_reset(mrioc, "setting up single ISR\n");
4555 		retval = mpi3mr_setup_isr(mrioc, 1);
4556 		if (retval) {
4557 			ioc_err(mrioc, "failed to setup ISR\n");
4558 			goto out_failed_noretry;
4559 		}
4560 	} else
4561 		mpi3mr_ioc_enable_intr(mrioc);
4562 
4563 	dprint_reset(mrioc, "getting ioc_facts\n");
4564 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4565 	if (retval) {
4566 		ioc_err(mrioc, "failed to get ioc_facts\n");
4567 		goto out_failed;
4568 	}
4569 
4570 	dprint_reset(mrioc, "validating ioc_facts\n");
4571 	retval = mpi3mr_revalidate_factsdata(mrioc);
4572 	if (retval) {
4573 		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4574 		goto out_failed_noretry;
4575 	}
4576 
4577 	mpi3mr_read_tsu_interval(mrioc);
4578 	mpi3mr_print_ioc_info(mrioc);
4579 
4580 	if (is_resume) {
4581 		dprint_reset(mrioc, "posting host diag buffers\n");
4582 		retval = mpi3mr_post_diag_bufs(mrioc);
4583 		if (retval)
4584 			ioc_warn(mrioc, "failed to post host diag buffers\n");
4585 	} else {
4586 		retval = mpi3mr_repost_diag_bufs(mrioc);
4587 		if (retval)
4588 			ioc_warn(mrioc, "failed to re post host diag buffers\n");
4589 	}
4590 
4591 	dprint_reset(mrioc, "sending ioc_init\n");
4592 	retval = mpi3mr_issue_iocinit(mrioc);
4593 	if (retval) {
4594 		ioc_err(mrioc, "failed to send ioc_init\n");
4595 		goto out_failed;
4596 	}
4597 
4598 	dprint_reset(mrioc, "getting package version\n");
4599 	retval = mpi3mr_print_pkg_ver(mrioc);
4600 	if (retval) {
4601 		ioc_err(mrioc, "failed to get package version\n");
4602 		goto out_failed;
4603 	}
4604 
4605 	if (is_resume || mrioc->block_on_pci_err) {
4606 		dprint_reset(mrioc, "setting up multiple ISR\n");
4607 		retval = mpi3mr_setup_isr(mrioc, 0);
4608 		if (retval) {
4609 			ioc_err(mrioc, "failed to re-setup ISR\n");
4610 			goto out_failed_noretry;
4611 		}
4612 	}
4613 
4614 	dprint_reset(mrioc, "creating operational queue pairs\n");
4615 	retval = mpi3mr_create_op_queues(mrioc);
4616 	if (retval) {
4617 		ioc_err(mrioc, "failed to create operational queue pairs\n");
4618 		goto out_failed;
4619 	}
4620 
4621 	if (!mrioc->pel_seqnum_virt) {
4622 		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4623 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4624 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4625 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4626 		    GFP_KERNEL);
4627 		if (!mrioc->pel_seqnum_virt) {
4628 			retval = -ENOMEM;
4629 			goto out_failed_noretry;
4630 		}
4631 	}
4632 
4633 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4634 		ioc_err(mrioc,
4635 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4636 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4637 		retval = -1;
4638 		goto out_failed_noretry;
4639 	}
4640 
4641 	dprint_reset(mrioc, "enabling events\n");
4642 	retval = mpi3mr_enable_events(mrioc);
4643 	if (retval) {
4644 		ioc_err(mrioc, "failed to enable events\n");
4645 		goto out_failed;
4646 	}
4647 
4648 	mrioc->device_refresh_on = 1;
4649 	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4650 
4651 	ioc_info(mrioc, "sending port enable\n");
4652 	retval = mpi3mr_issue_port_enable(mrioc, 1);
4653 	if (retval) {
4654 		ioc_err(mrioc, "failed to issue port enable\n");
4655 		goto out_failed;
4656 	}
4657 	do {
4658 		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4659 		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4660 			break;
4661 		if (!pci_device_is_present(mrioc->pdev))
4662 			mrioc->unrecoverable = 1;
4663 		if (mrioc->unrecoverable) {
4664 			retval = -1;
4665 			goto out_failed_noretry;
4666 		}
4667 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4668 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4669 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4670 			mpi3mr_print_fault_info(mrioc);
4671 			mrioc->init_cmds.is_waiting = 0;
4672 			mrioc->init_cmds.callback = NULL;
4673 			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4674 			goto out_failed;
4675 		}
4676 	} while (--pe_timeout);
4677 
4678 	if (!pe_timeout) {
4679 		ioc_err(mrioc, "port enable timed out\n");
4680 		mpi3mr_check_rh_fault_ioc(mrioc,
4681 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4682 		mrioc->init_cmds.is_waiting = 0;
4683 		mrioc->init_cmds.callback = NULL;
4684 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4685 		goto out_failed;
4686 	} else if (mrioc->scan_failed) {
4687 		ioc_err(mrioc,
4688 		    "port enable failed with status=0x%04x\n",
4689 		    mrioc->scan_failed);
4690 	} else
4691 		ioc_info(mrioc, "port enable completed successfully\n");
4692 
4693 	ioc_info(mrioc, "controller %s completed successfully\n",
4694 	    (is_resume)?"resume":"re-initialization");
4695 	return retval;
4696 out_failed:
4697 	if (retry < 2) {
4698 		retry++;
4699 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4700 		    (is_resume)?"resume":"re-initialization", retry);
4701 		mpi3mr_memset_buffers(mrioc);
4702 		goto retry_init;
4703 	}
4704 	retval = -1;
4705 out_failed_noretry:
4706 	ioc_err(mrioc, "controller %s is failed\n",
4707 	    (is_resume)?"resume":"re-initialization");
4708 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4709 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4710 	mrioc->unrecoverable = 1;
4711 	return retval;
4712 }
4713 
4714 /**
4715  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4716  *					segments
4717  * @mrioc: Adapter instance reference
4718  * @qidx: Operational reply queue index
4719  *
4720  * Return: Nothing.
4721  */
4722 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4723 {
4724 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4725 	struct segments *segments;
4726 	int i, size;
4727 
4728 	if (!op_reply_q->q_segments)
4729 		return;
4730 
4731 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4732 	segments = op_reply_q->q_segments;
4733 	for (i = 0; i < op_reply_q->num_segments; i++)
4734 		memset(segments[i].segment, 0, size);
4735 }
4736 
4737 /**
4738  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4739  *					segments
4740  * @mrioc: Adapter instance reference
4741  * @qidx: Operational request queue index
4742  *
4743  * Return: Nothing.
4744  */
4745 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4746 {
4747 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4748 	struct segments *segments;
4749 	int i, size;
4750 
4751 	if (!op_req_q->q_segments)
4752 		return;
4753 
4754 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4755 	segments = op_req_q->q_segments;
4756 	for (i = 0; i < op_req_q->num_segments; i++)
4757 		memset(segments[i].segment, 0, size);
4758 }
4759 
4760 /**
4761  * mpi3mr_memset_buffers - memset memory for a controller
4762  * @mrioc: Adapter instance reference
4763  *
4764  * clear all the memory allocated for a controller, typically
4765  * called post reset to reuse the memory allocated during the
4766  * controller init.
4767  *
4768  * Return: Nothing.
4769  */
4770 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4771 {
4772 	u16 i;
4773 	struct mpi3mr_throttle_group_info *tg;
4774 
4775 	mrioc->change_count = 0;
4776 	mrioc->active_poll_qcount = 0;
4777 	mrioc->default_qcount = 0;
4778 	if (mrioc->admin_req_base)
4779 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4780 	if (mrioc->admin_reply_base)
4781 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4782 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4783 	atomic_set(&mrioc->admin_pend_isr, 0);
4784 
4785 	if (mrioc->init_cmds.reply) {
4786 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4787 		memset(mrioc->bsg_cmds.reply, 0,
4788 		    sizeof(*mrioc->bsg_cmds.reply));
4789 		memset(mrioc->host_tm_cmds.reply, 0,
4790 		    sizeof(*mrioc->host_tm_cmds.reply));
4791 		memset(mrioc->pel_cmds.reply, 0,
4792 		    sizeof(*mrioc->pel_cmds.reply));
4793 		memset(mrioc->pel_abort_cmd.reply, 0,
4794 		    sizeof(*mrioc->pel_abort_cmd.reply));
4795 		memset(mrioc->transport_cmds.reply, 0,
4796 		    sizeof(*mrioc->transport_cmds.reply));
4797 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4798 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4799 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4800 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4801 			memset(mrioc->evtack_cmds[i].reply, 0,
4802 			    sizeof(*mrioc->evtack_cmds[i].reply));
4803 		bitmap_clear(mrioc->removepend_bitmap, 0,
4804 			     mrioc->dev_handle_bitmap_bits);
4805 		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4806 		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4807 			     MPI3MR_NUM_EVTACKCMD);
4808 	}
4809 
4810 	for (i = 0; i < mrioc->num_queues; i++) {
4811 		mrioc->op_reply_qinfo[i].qid = 0;
4812 		mrioc->op_reply_qinfo[i].ci = 0;
4813 		mrioc->op_reply_qinfo[i].num_replies = 0;
4814 		mrioc->op_reply_qinfo[i].ephase = 0;
4815 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4816 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4817 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4818 
4819 		mrioc->req_qinfo[i].ci = 0;
4820 		mrioc->req_qinfo[i].pi = 0;
4821 		mrioc->req_qinfo[i].num_requests = 0;
4822 		mrioc->req_qinfo[i].qid = 0;
4823 		mrioc->req_qinfo[i].reply_qid = 0;
4824 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4825 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
4826 	}
4827 
4828 	atomic_set(&mrioc->pend_large_data_sz, 0);
4829 	if (mrioc->throttle_groups) {
4830 		tg = mrioc->throttle_groups;
4831 		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4832 			tg->id = 0;
4833 			tg->fw_qd = 0;
4834 			tg->modified_qd = 0;
4835 			tg->io_divert = 0;
4836 			tg->need_qd_reduction = 0;
4837 			tg->high = 0;
4838 			tg->low = 0;
4839 			tg->qd_reduction = 0;
4840 			atomic_set(&tg->pend_large_data_sz, 0);
4841 		}
4842 	}
4843 }
4844 
4845 /**
4846  * mpi3mr_free_mem - Free memory allocated for a controller
4847  * @mrioc: Adapter instance reference
4848  *
4849  * Free all the memory allocated for a controller.
4850  *
4851  * Return: Nothing.
4852  */
4853 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4854 {
4855 	u16 i, j;
4856 	struct mpi3mr_intr_info *intr_info;
4857 	struct diag_buffer_desc *diag_buffer;
4858 
4859 	mpi3mr_free_enclosure_list(mrioc);
4860 	mpi3mr_free_ioctl_dma_memory(mrioc);
4861 
4862 	if (mrioc->sense_buf_pool) {
4863 		if (mrioc->sense_buf)
4864 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4865 			    mrioc->sense_buf_dma);
4866 		dma_pool_destroy(mrioc->sense_buf_pool);
4867 		mrioc->sense_buf = NULL;
4868 		mrioc->sense_buf_pool = NULL;
4869 	}
4870 	if (mrioc->sense_buf_q_pool) {
4871 		if (mrioc->sense_buf_q)
4872 			dma_pool_free(mrioc->sense_buf_q_pool,
4873 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4874 		dma_pool_destroy(mrioc->sense_buf_q_pool);
4875 		mrioc->sense_buf_q = NULL;
4876 		mrioc->sense_buf_q_pool = NULL;
4877 	}
4878 
4879 	if (mrioc->reply_buf_pool) {
4880 		if (mrioc->reply_buf)
4881 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4882 			    mrioc->reply_buf_dma);
4883 		dma_pool_destroy(mrioc->reply_buf_pool);
4884 		mrioc->reply_buf = NULL;
4885 		mrioc->reply_buf_pool = NULL;
4886 	}
4887 	if (mrioc->reply_free_q_pool) {
4888 		if (mrioc->reply_free_q)
4889 			dma_pool_free(mrioc->reply_free_q_pool,
4890 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4891 		dma_pool_destroy(mrioc->reply_free_q_pool);
4892 		mrioc->reply_free_q = NULL;
4893 		mrioc->reply_free_q_pool = NULL;
4894 	}
4895 
4896 	for (i = 0; i < mrioc->num_op_req_q; i++)
4897 		mpi3mr_free_op_req_q_segments(mrioc, i);
4898 
4899 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4900 		mpi3mr_free_op_reply_q_segments(mrioc, i);
4901 
4902 	for (i = 0; i < mrioc->intr_info_count; i++) {
4903 		intr_info = mrioc->intr_info + i;
4904 		intr_info->op_reply_q = NULL;
4905 	}
4906 
4907 	kfree(mrioc->req_qinfo);
4908 	mrioc->req_qinfo = NULL;
4909 	mrioc->num_op_req_q = 0;
4910 
4911 	kfree(mrioc->op_reply_qinfo);
4912 	mrioc->op_reply_qinfo = NULL;
4913 	mrioc->num_op_reply_q = 0;
4914 
4915 	kfree(mrioc->init_cmds.reply);
4916 	mrioc->init_cmds.reply = NULL;
4917 
4918 	kfree(mrioc->bsg_cmds.reply);
4919 	mrioc->bsg_cmds.reply = NULL;
4920 
4921 	kfree(mrioc->host_tm_cmds.reply);
4922 	mrioc->host_tm_cmds.reply = NULL;
4923 
4924 	kfree(mrioc->pel_cmds.reply);
4925 	mrioc->pel_cmds.reply = NULL;
4926 
4927 	kfree(mrioc->pel_abort_cmd.reply);
4928 	mrioc->pel_abort_cmd.reply = NULL;
4929 
4930 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4931 		kfree(mrioc->evtack_cmds[i].reply);
4932 		mrioc->evtack_cmds[i].reply = NULL;
4933 	}
4934 
4935 	bitmap_free(mrioc->removepend_bitmap);
4936 	mrioc->removepend_bitmap = NULL;
4937 
4938 	bitmap_free(mrioc->devrem_bitmap);
4939 	mrioc->devrem_bitmap = NULL;
4940 
4941 	bitmap_free(mrioc->evtack_cmds_bitmap);
4942 	mrioc->evtack_cmds_bitmap = NULL;
4943 
4944 	bitmap_free(mrioc->chain_bitmap);
4945 	mrioc->chain_bitmap = NULL;
4946 
4947 	kfree(mrioc->transport_cmds.reply);
4948 	mrioc->transport_cmds.reply = NULL;
4949 
4950 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4951 		kfree(mrioc->dev_rmhs_cmds[i].reply);
4952 		mrioc->dev_rmhs_cmds[i].reply = NULL;
4953 	}
4954 
4955 	if (mrioc->chain_buf_pool) {
4956 		for (i = 0; i < mrioc->chain_buf_count; i++) {
4957 			if (mrioc->chain_sgl_list[i].addr) {
4958 				dma_pool_free(mrioc->chain_buf_pool,
4959 				    mrioc->chain_sgl_list[i].addr,
4960 				    mrioc->chain_sgl_list[i].dma_addr);
4961 				mrioc->chain_sgl_list[i].addr = NULL;
4962 			}
4963 		}
4964 		dma_pool_destroy(mrioc->chain_buf_pool);
4965 		mrioc->chain_buf_pool = NULL;
4966 	}
4967 
4968 	kfree(mrioc->chain_sgl_list);
4969 	mrioc->chain_sgl_list = NULL;
4970 
4971 	if (mrioc->admin_reply_base) {
4972 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4973 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
4974 		mrioc->admin_reply_base = NULL;
4975 	}
4976 	if (mrioc->admin_req_base) {
4977 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4978 		    mrioc->admin_req_base, mrioc->admin_req_dma);
4979 		mrioc->admin_req_base = NULL;
4980 	}
4981 
4982 	if (mrioc->pel_seqnum_virt) {
4983 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4984 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4985 		mrioc->pel_seqnum_virt = NULL;
4986 	}
4987 
4988 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4989 		diag_buffer = &mrioc->diag_buffers[i];
4990 		if ((i == 0) && mrioc->seg_tb_support) {
4991 			if (mrioc->trace_buf_pool) {
4992 				for (j = 0; j < mrioc->num_tb_segs; j++) {
4993 					if (mrioc->trace_buf[j].segment) {
4994 						dma_pool_free(mrioc->trace_buf_pool,
4995 						    mrioc->trace_buf[j].segment,
4996 						    mrioc->trace_buf[j].segment_dma);
4997 						mrioc->trace_buf[j].segment = NULL;
4998 					}
4999 
5000 					mrioc->trace_buf[j].segment = NULL;
5001 				}
5002 				dma_pool_destroy(mrioc->trace_buf_pool);
5003 				mrioc->trace_buf_pool = NULL;
5004 			}
5005 
5006 			kfree(mrioc->trace_buf);
5007 			mrioc->trace_buf = NULL;
5008 			diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
5009 		}
5010 		if (diag_buffer->addr) {
5011 			dma_free_coherent(&mrioc->pdev->dev,
5012 			    diag_buffer->size, diag_buffer->addr,
5013 			    diag_buffer->dma_addr);
5014 			diag_buffer->addr = NULL;
5015 			diag_buffer->size = 0;
5016 			diag_buffer->type = 0;
5017 			diag_buffer->status = 0;
5018 		}
5019 	}
5020 
5021 	kfree(mrioc->throttle_groups);
5022 	mrioc->throttle_groups = NULL;
5023 
5024 	kfree(mrioc->logdata_buf);
5025 	mrioc->logdata_buf = NULL;
5026 
5027 }
5028 
5029 /**
5030  * mpi3mr_issue_ioc_shutdown - shutdown controller
5031  * @mrioc: Adapter instance reference
5032  *
5033  * Send shutodwn notification to the controller and wait for the
5034  * shutdown_timeout for it to be completed.
5035  *
5036  * Return: Nothing.
5037  */
5038 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
5039 {
5040 	u32 ioc_config, ioc_status;
5041 	u8 retval = 1;
5042 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
5043 
5044 	ioc_info(mrioc, "Issuing shutdown Notification\n");
5045 	if (mrioc->unrecoverable) {
5046 		ioc_warn(mrioc,
5047 		    "IOC is unrecoverable shutdown is not issued\n");
5048 		return;
5049 	}
5050 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5051 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5052 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
5053 		ioc_info(mrioc, "shutdown already in progress\n");
5054 		return;
5055 	}
5056 
5057 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5058 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
5059 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
5060 
5061 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
5062 
5063 	if (mrioc->facts.shutdown_timeout)
5064 		timeout = mrioc->facts.shutdown_timeout * 10;
5065 
5066 	do {
5067 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5068 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5069 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
5070 			retval = 0;
5071 			break;
5072 		}
5073 		msleep(100);
5074 	} while (--timeout);
5075 
5076 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5077 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5078 
5079 	if (retval) {
5080 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5081 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
5082 			ioc_warn(mrioc,
5083 			    "shutdown still in progress after timeout\n");
5084 	}
5085 
5086 	ioc_info(mrioc,
5087 	    "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
5088 	    (!retval) ? "successful" : "failed", ioc_status,
5089 	    ioc_config);
5090 }
5091 
5092 /**
5093  * mpi3mr_cleanup_ioc - Cleanup controller
5094  * @mrioc: Adapter instance reference
5095  *
5096  * controller cleanup handler, Message unit reset or soft reset
5097  * and shutdown notification is issued to the controller.
5098  *
5099  * Return: Nothing.
5100  */
5101 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
5102 {
5103 	enum mpi3mr_iocstate ioc_state;
5104 
5105 	dprint_exit(mrioc, "cleaning up the controller\n");
5106 	mpi3mr_ioc_disable_intr(mrioc);
5107 
5108 	ioc_state = mpi3mr_get_iocstate(mrioc);
5109 
5110 	if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
5111 	    !mrioc->pci_err_recovery &&
5112 	    (ioc_state == MRIOC_STATE_READY)) {
5113 		if (mpi3mr_issue_and_process_mur(mrioc,
5114 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
5115 			mpi3mr_issue_reset(mrioc,
5116 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5117 			    MPI3MR_RESET_FROM_MUR_FAILURE);
5118 		mpi3mr_issue_ioc_shutdown(mrioc);
5119 	}
5120 	dprint_exit(mrioc, "controller cleanup completed\n");
5121 }
5122 
5123 /**
5124  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5125  * @mrioc: Adapter instance reference
5126  * @cmdptr: Internal command tracker
5127  *
5128  * Complete an internal driver commands with state indicating it
5129  * is completed due to reset.
5130  *
5131  * Return: Nothing.
5132  */
5133 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5134 	struct mpi3mr_drv_cmd *cmdptr)
5135 {
5136 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5137 		cmdptr->state |= MPI3MR_CMD_RESET;
5138 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5139 		if (cmdptr->is_waiting) {
5140 			complete(&cmdptr->done);
5141 			cmdptr->is_waiting = 0;
5142 		} else if (cmdptr->callback)
5143 			cmdptr->callback(mrioc, cmdptr);
5144 	}
5145 }
5146 
5147 /**
5148  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5149  * @mrioc: Adapter instance reference
5150  *
5151  * Flush all internal driver commands post reset
5152  *
5153  * Return: Nothing.
5154  */
5155 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5156 {
5157 	struct mpi3mr_drv_cmd *cmdptr;
5158 	u8 i;
5159 
5160 	cmdptr = &mrioc->init_cmds;
5161 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5162 
5163 	cmdptr = &mrioc->cfg_cmds;
5164 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5165 
5166 	cmdptr = &mrioc->bsg_cmds;
5167 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5168 	cmdptr = &mrioc->host_tm_cmds;
5169 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5170 
5171 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5172 		cmdptr = &mrioc->dev_rmhs_cmds[i];
5173 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5174 	}
5175 
5176 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5177 		cmdptr = &mrioc->evtack_cmds[i];
5178 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5179 	}
5180 
5181 	cmdptr = &mrioc->pel_cmds;
5182 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5183 
5184 	cmdptr = &mrioc->pel_abort_cmd;
5185 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5186 
5187 	cmdptr = &mrioc->transport_cmds;
5188 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5189 }
5190 
5191 /**
5192  * mpi3mr_pel_wait_post - Issue PEL Wait
5193  * @mrioc: Adapter instance reference
5194  * @drv_cmd: Internal command tracker
5195  *
5196  * Issue PEL Wait MPI request through admin queue and return.
5197  *
5198  * Return: Nothing.
5199  */
5200 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5201 	struct mpi3mr_drv_cmd *drv_cmd)
5202 {
5203 	struct mpi3_pel_req_action_wait pel_wait;
5204 
5205 	mrioc->pel_abort_requested = false;
5206 
5207 	memset(&pel_wait, 0, sizeof(pel_wait));
5208 	drv_cmd->state = MPI3MR_CMD_PENDING;
5209 	drv_cmd->is_waiting = 0;
5210 	drv_cmd->callback = mpi3mr_pel_wait_complete;
5211 	drv_cmd->ioc_status = 0;
5212 	drv_cmd->ioc_loginfo = 0;
5213 	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5214 	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5215 	pel_wait.action = MPI3_PEL_ACTION_WAIT;
5216 	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5217 	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5218 	pel_wait.class = cpu_to_le16(mrioc->pel_class);
5219 	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5220 	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5221 	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5222 
5223 	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5224 		dprint_bsg_err(mrioc,
5225 			    "Issuing PELWait: Admin post failed\n");
5226 		drv_cmd->state = MPI3MR_CMD_NOTUSED;
5227 		drv_cmd->callback = NULL;
5228 		drv_cmd->retry_count = 0;
5229 		mrioc->pel_enabled = false;
5230 	}
5231 }
5232 
5233 /**
5234  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5235  * @mrioc: Adapter instance reference
5236  * @drv_cmd: Internal command tracker
5237  *
5238  * Issue PEL get sequence number MPI request through admin queue
5239  * and return.
5240  *
5241  * Return: 0 on success, non-zero on failure.
5242  */
5243 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5244 	struct mpi3mr_drv_cmd *drv_cmd)
5245 {
5246 	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5247 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5248 	int retval = 0;
5249 
5250 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5251 	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5252 	mrioc->pel_cmds.is_waiting = 0;
5253 	mrioc->pel_cmds.ioc_status = 0;
5254 	mrioc->pel_cmds.ioc_loginfo = 0;
5255 	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5256 	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5257 	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5258 	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5259 	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5260 	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5261 
5262 	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5263 			sizeof(pel_getseq_req), 0);
5264 	if (retval) {
5265 		if (drv_cmd) {
5266 			drv_cmd->state = MPI3MR_CMD_NOTUSED;
5267 			drv_cmd->callback = NULL;
5268 			drv_cmd->retry_count = 0;
5269 		}
5270 		mrioc->pel_enabled = false;
5271 	}
5272 
5273 	return retval;
5274 }
5275 
5276 /**
5277  * mpi3mr_pel_wait_complete - PELWait Completion callback
5278  * @mrioc: Adapter instance reference
5279  * @drv_cmd: Internal command tracker
5280  *
5281  * This is a callback handler for the PELWait request and
5282  * firmware completes a PELWait request when it is aborted or a
5283  * new PEL entry is available. This sends AEN to the application
5284  * and if the PELwait completion is not due to PELAbort then
5285  * this will send a request for new PEL Sequence number
5286  *
5287  * Return: Nothing.
5288  */
5289 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5290 	struct mpi3mr_drv_cmd *drv_cmd)
5291 {
5292 	struct mpi3_pel_reply *pel_reply = NULL;
5293 	u16 ioc_status, pe_log_status;
5294 	bool do_retry = false;
5295 
5296 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5297 		goto cleanup_drv_cmd;
5298 
5299 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5300 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5301 		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5302 			__func__, ioc_status, drv_cmd->ioc_loginfo);
5303 		dprint_bsg_err(mrioc,
5304 		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5305 		    ioc_status, drv_cmd->ioc_loginfo);
5306 		do_retry = true;
5307 	}
5308 
5309 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5310 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5311 
5312 	if (!pel_reply) {
5313 		dprint_bsg_err(mrioc,
5314 		    "pel_wait: failed due to no reply\n");
5315 		goto out_failed;
5316 	}
5317 
5318 	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5319 	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5320 	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5321 		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5322 			__func__, pe_log_status);
5323 		dprint_bsg_err(mrioc,
5324 		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
5325 		    pe_log_status);
5326 		do_retry = true;
5327 	}
5328 
5329 	if (do_retry) {
5330 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5331 			drv_cmd->retry_count++;
5332 			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5333 			    drv_cmd->retry_count);
5334 			mpi3mr_pel_wait_post(mrioc, drv_cmd);
5335 			return;
5336 		}
5337 		dprint_bsg_err(mrioc,
5338 		    "pel_wait: failed after all retries(%d)\n",
5339 		    drv_cmd->retry_count);
5340 		goto out_failed;
5341 	}
5342 	atomic64_inc(&event_counter);
5343 	if (!mrioc->pel_abort_requested) {
5344 		mrioc->pel_cmds.retry_count = 0;
5345 		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5346 	}
5347 
5348 	return;
5349 out_failed:
5350 	mrioc->pel_enabled = false;
5351 cleanup_drv_cmd:
5352 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5353 	drv_cmd->callback = NULL;
5354 	drv_cmd->retry_count = 0;
5355 }
5356 
5357 /**
5358  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5359  * @mrioc: Adapter instance reference
5360  * @drv_cmd: Internal command tracker
5361  *
5362  * This is a callback handler for the PEL get sequence number
5363  * request and a new PEL wait request will be issued to the
5364  * firmware from this
5365  *
5366  * Return: Nothing.
5367  */
5368 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5369 	struct mpi3mr_drv_cmd *drv_cmd)
5370 {
5371 	struct mpi3_pel_reply *pel_reply = NULL;
5372 	struct mpi3_pel_seq *pel_seqnum_virt;
5373 	u16 ioc_status;
5374 	bool do_retry = false;
5375 
5376 	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5377 
5378 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5379 		goto cleanup_drv_cmd;
5380 
5381 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5382 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5383 		dprint_bsg_err(mrioc,
5384 		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5385 		    ioc_status, drv_cmd->ioc_loginfo);
5386 		do_retry = true;
5387 	}
5388 
5389 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5390 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5391 	if (!pel_reply) {
5392 		dprint_bsg_err(mrioc,
5393 		    "pel_get_seqnum: failed due to no reply\n");
5394 		goto out_failed;
5395 	}
5396 
5397 	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5398 		dprint_bsg_err(mrioc,
5399 		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5400 		    le16_to_cpu(pel_reply->pe_log_status));
5401 		do_retry = true;
5402 	}
5403 
5404 	if (do_retry) {
5405 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5406 			drv_cmd->retry_count++;
5407 			dprint_bsg_err(mrioc,
5408 			    "pel_get_seqnum: retrying(%d)\n",
5409 			    drv_cmd->retry_count);
5410 			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5411 			return;
5412 		}
5413 
5414 		dprint_bsg_err(mrioc,
5415 		    "pel_get_seqnum: failed after all retries(%d)\n",
5416 		    drv_cmd->retry_count);
5417 		goto out_failed;
5418 	}
5419 	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5420 	drv_cmd->retry_count = 0;
5421 	mpi3mr_pel_wait_post(mrioc, drv_cmd);
5422 
5423 	return;
5424 out_failed:
5425 	mrioc->pel_enabled = false;
5426 cleanup_drv_cmd:
5427 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5428 	drv_cmd->callback = NULL;
5429 	drv_cmd->retry_count = 0;
5430 }
5431 
5432 /**
5433  * mpi3mr_check_op_admin_proc -
5434  * @mrioc: Adapter instance reference
5435  *
5436  * Check if any of the operation reply queues
5437  * or the admin reply queue are currently in use.
5438  * If any queue is in use, this function waits for
5439  * a maximum of 10 seconds for them to become available.
5440  *
5441  * Return: 0 on success, non-zero on failure.
5442  */
5443 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5444 {
5445 
5446 	u16 timeout = 10 * 10;
5447 	u16 elapsed_time = 0;
5448 	bool op_admin_in_use = false;
5449 
5450 	do {
5451 		op_admin_in_use = false;
5452 
5453 		/* Check admin_reply queue first to exit early */
5454 		if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5455 			op_admin_in_use = true;
5456 		else {
5457 			/* Check op_reply queues */
5458 			int i;
5459 
5460 			for (i = 0; i < mrioc->num_queues; i++) {
5461 				if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5462 					op_admin_in_use = true;
5463 					break;
5464 				}
5465 			}
5466 		}
5467 
5468 		if (!op_admin_in_use)
5469 			break;
5470 
5471 		msleep(100);
5472 
5473 	} while (++elapsed_time < timeout);
5474 
5475 	if (op_admin_in_use)
5476 		return 1;
5477 
5478 	return 0;
5479 }
5480 
5481 /**
5482  * mpi3mr_soft_reset_handler - Reset the controller
5483  * @mrioc: Adapter instance reference
5484  * @reset_reason: Reset reason code
5485  * @snapdump: Flag to generate snapdump in firmware or not
5486  *
5487  * This is an handler for recovering controller by issuing soft
5488  * reset are diag fault reset.  This is a blocking function and
5489  * when one reset is executed if any other resets they will be
5490  * blocked. All BSG requests will be blocked during the reset. If
5491  * controller reset is successful then the controller will be
5492  * reinitalized, otherwise the controller will be marked as not
5493  * recoverable
5494  *
5495  * In snapdump bit is set, the controller is issued with diag
5496  * fault reset so that the firmware can create a snap dump and
5497  * post that the firmware will result in F000 fault and the
5498  * driver will issue soft reset to recover from that.
5499  *
5500  * Return: 0 on success, non-zero on failure.
5501  */
5502 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5503 	u16 reset_reason, u8 snapdump)
5504 {
5505 	int retval = 0, i;
5506 	unsigned long flags;
5507 	enum mpi3mr_iocstate ioc_state;
5508 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5509 	union mpi3mr_trigger_data trigger_data;
5510 
5511 	/* Block the reset handler until diag save in progress*/
5512 	dprint_reset(mrioc,
5513 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5514 	    mrioc->diagsave_timeout);
5515 	while (mrioc->diagsave_timeout)
5516 		ssleep(1);
5517 	/*
5518 	 * Block new resets until the currently executing one is finished and
5519 	 * return the status of the existing reset for all blocked resets
5520 	 */
5521 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5522 	if (!mutex_trylock(&mrioc->reset_mutex)) {
5523 		ioc_info(mrioc,
5524 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
5525 		    mpi3mr_reset_rc_name(reset_reason));
5526 		do {
5527 			ssleep(1);
5528 		} while (mrioc->reset_in_progress == 1);
5529 		ioc_info(mrioc,
5530 		    "returning previous reset result(%d) for the reset triggered by %s\n",
5531 		    mrioc->prev_reset_result,
5532 		    mpi3mr_reset_rc_name(reset_reason));
5533 		return mrioc->prev_reset_result;
5534 	}
5535 	ioc_info(mrioc, "controller reset is triggered by %s\n",
5536 	    mpi3mr_reset_rc_name(reset_reason));
5537 
5538 	mrioc->device_refresh_on = 0;
5539 	scsi_block_requests(mrioc->shost);
5540 	mrioc->reset_in_progress = 1;
5541 	mrioc->stop_bsgs = 1;
5542 	mrioc->prev_reset_result = -1;
5543 	memset(&trigger_data, 0, sizeof(trigger_data));
5544 
5545 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5546 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5547 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5548 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5549 		    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5550 		dprint_reset(mrioc,
5551 		    "soft_reset_handler: releasing host diagnostic buffers\n");
5552 		mpi3mr_release_diag_bufs(mrioc, 0);
5553 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5554 			mrioc->event_masks[i] = -1;
5555 
5556 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5557 		mpi3mr_issue_event_notification(mrioc);
5558 	}
5559 
5560 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5561 
5562 	mpi3mr_ioc_disable_intr(mrioc);
5563 	mrioc->io_admin_reset_sync = 1;
5564 
5565 	if (snapdump) {
5566 		retval = mpi3mr_issue_reset(mrioc,
5567 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5568 		if (!retval) {
5569 			trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5570 				      MPI3_SYSIF_FAULT_CODE_MASK);
5571 			do {
5572 				host_diagnostic =
5573 				    readl(&mrioc->sysif_regs->host_diagnostic);
5574 				if (!(host_diagnostic &
5575 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5576 					break;
5577 				msleep(100);
5578 			} while (--timeout);
5579 
5580 			mpi3mr_save_fault_info(mrioc);
5581 			mpi3mr_fault_uevent_emit(mrioc);
5582 			mrioc->fwfault_counter++;
5583 			mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5584 			    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5585 		}
5586 	}
5587 
5588 	retval = mpi3mr_issue_reset(mrioc,
5589 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5590 	if (retval) {
5591 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5592 		goto out;
5593 	}
5594 
5595 	retval = mpi3mr_check_op_admin_proc(mrioc);
5596 	if (retval) {
5597 		ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5598 				"thread still processing replies even after a 10 second\n"
5599 				"timeout. Marking the controller as unrecoverable!\n");
5600 
5601 		goto out;
5602 	}
5603 
5604 	if (mrioc->num_io_throttle_group !=
5605 	    mrioc->facts.max_io_throttle_group) {
5606 		ioc_err(mrioc,
5607 		    "max io throttle group doesn't match old(%d), new(%d)\n",
5608 		    mrioc->num_io_throttle_group,
5609 		    mrioc->facts.max_io_throttle_group);
5610 		retval = -EPERM;
5611 		goto out;
5612 	}
5613 
5614 	mpi3mr_flush_delayed_cmd_lists(mrioc);
5615 	mpi3mr_flush_drv_cmds(mrioc);
5616 	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5617 	bitmap_clear(mrioc->removepend_bitmap, 0,
5618 		     mrioc->dev_handle_bitmap_bits);
5619 	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5620 	mpi3mr_flush_host_io(mrioc);
5621 	mpi3mr_cleanup_fwevt_list(mrioc);
5622 	mpi3mr_invalidate_devhandles(mrioc);
5623 	mpi3mr_free_enclosure_list(mrioc);
5624 
5625 	if (mrioc->prepare_for_reset) {
5626 		mrioc->prepare_for_reset = 0;
5627 		mrioc->prepare_for_reset_timeout_counter = 0;
5628 	}
5629 	mpi3mr_memset_buffers(mrioc);
5630 	mpi3mr_release_diag_bufs(mrioc, 1);
5631 	mrioc->fw_release_trigger_active = false;
5632 	mrioc->trace_release_trigger_active = false;
5633 	mrioc->snapdump_trigger_active = false;
5634 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5635 	    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5636 
5637 	dprint_reset(mrioc,
5638 	    "soft_reset_handler: reinitializing the controller\n");
5639 	retval = mpi3mr_reinit_ioc(mrioc, 0);
5640 	if (retval) {
5641 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5642 		    mrioc->name, reset_reason);
5643 		goto out;
5644 	}
5645 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5646 
5647 out:
5648 	if (!retval) {
5649 		mrioc->diagsave_timeout = 0;
5650 		mrioc->reset_in_progress = 0;
5651 		scsi_unblock_requests(mrioc->shost);
5652 		mrioc->pel_abort_requested = 0;
5653 		if (mrioc->pel_enabled) {
5654 			mrioc->pel_cmds.retry_count = 0;
5655 			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5656 		}
5657 
5658 		mrioc->device_refresh_on = 0;
5659 
5660 		mrioc->ts_update_counter = 0;
5661 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5662 		if (mrioc->watchdog_work_q)
5663 			queue_delayed_work(mrioc->watchdog_work_q,
5664 			    &mrioc->watchdog_work,
5665 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5666 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5667 		mrioc->stop_bsgs = 0;
5668 		if (mrioc->pel_enabled)
5669 			atomic64_inc(&event_counter);
5670 	} else {
5671 		dprint_reset(mrioc,
5672 			"soft_reset_handler failed, marking controller as unrecoverable\n");
5673 		ioc_state = mpi3mr_get_iocstate(mrioc);
5674 
5675 		if (ioc_state != MRIOC_STATE_FAULT)
5676 			mpi3mr_issue_reset(mrioc,
5677 				MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5678 		mrioc->device_refresh_on = 0;
5679 		mrioc->unrecoverable = 1;
5680 		mrioc->reset_in_progress = 0;
5681 		scsi_unblock_requests(mrioc->shost);
5682 		mrioc->stop_bsgs = 0;
5683 		retval = -1;
5684 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5685 	}
5686 	mrioc->prev_reset_result = retval;
5687 	mutex_unlock(&mrioc->reset_mutex);
5688 	ioc_info(mrioc, "controller reset is %s\n",
5689 	    ((retval == 0) ? "successful" : "failed"));
5690 	return retval;
5691 }
5692 
5693 /**
5694  * mpi3mr_post_cfg_req - Issue config requests and wait
5695  * @mrioc: Adapter instance reference
5696  * @cfg_req: Configuration request
5697  * @timeout: Timeout in seconds
5698  * @ioc_status: Pointer to return ioc status
5699  *
5700  * A generic function for posting MPI3 configuration request to
5701  * the firmware. This blocks for the completion of request for
5702  * timeout seconds and if the request times out this function
5703  * faults the controller with proper reason code.
5704  *
5705  * On successful completion of the request this function returns
5706  * appropriate ioc status from the firmware back to the caller.
5707  *
5708  * Return: 0 on success, non-zero on failure.
5709  */
5710 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5711 	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5712 {
5713 	int retval = 0;
5714 
5715 	mutex_lock(&mrioc->cfg_cmds.mutex);
5716 	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5717 		retval = -1;
5718 		ioc_err(mrioc, "sending config request failed due to command in use\n");
5719 		mutex_unlock(&mrioc->cfg_cmds.mutex);
5720 		goto out;
5721 	}
5722 	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5723 	mrioc->cfg_cmds.is_waiting = 1;
5724 	mrioc->cfg_cmds.callback = NULL;
5725 	mrioc->cfg_cmds.ioc_status = 0;
5726 	mrioc->cfg_cmds.ioc_loginfo = 0;
5727 
5728 	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5729 	cfg_req->function = MPI3_FUNCTION_CONFIG;
5730 
5731 	init_completion(&mrioc->cfg_cmds.done);
5732 	dprint_cfg_info(mrioc, "posting config request\n");
5733 	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5734 		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5735 		    "mpi3_cfg_req");
5736 	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5737 	if (retval) {
5738 		ioc_err(mrioc, "posting config request failed\n");
5739 		goto out_unlock;
5740 	}
5741 	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5742 	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5743 		mpi3mr_check_rh_fault_ioc(mrioc,
5744 		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5745 		ioc_err(mrioc, "config request timed out\n");
5746 		retval = -1;
5747 		goto out_unlock;
5748 	}
5749 	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5750 	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5751 		dprint_cfg_err(mrioc,
5752 		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5753 		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5754 
5755 out_unlock:
5756 	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5757 	mutex_unlock(&mrioc->cfg_cmds.mutex);
5758 
5759 out:
5760 	return retval;
5761 }
5762 
5763 /**
5764  * mpi3mr_process_cfg_req - config page request processor
5765  * @mrioc: Adapter instance reference
5766  * @cfg_req: Configuration request
5767  * @cfg_hdr: Configuration page header
5768  * @timeout: Timeout in seconds
5769  * @ioc_status: Pointer to return ioc status
5770  * @cfg_buf: Memory pointer to copy config page or header
5771  * @cfg_buf_sz: Size of the memory to get config page or header
5772  *
5773  * This is handler for config page read, write and config page
5774  * header read operations.
5775  *
5776  * This function expects the cfg_req to be populated with page
5777  * type, page number, action for the header read and with page
5778  * address for all other operations.
5779  *
5780  * The cfg_hdr can be passed as null for reading required header
5781  * details for read/write pages the cfg_hdr should point valid
5782  * configuration page header.
5783  *
5784  * This allocates dmaable memory based on the size of the config
5785  * buffer and set the SGE of the cfg_req.
5786  *
5787  * For write actions, the config page data has to be passed in
5788  * the cfg_buf and size of the data has to be mentioned in the
5789  * cfg_buf_sz.
5790  *
5791  * For read/header actions, on successful completion of the
5792  * request with successful ioc_status the data will be copied
5793  * into the cfg_buf limited to a minimum of actual page size and
5794  * cfg_buf_sz
5795  *
5796  *
5797  * Return: 0 on success, non-zero on failure.
5798  */
5799 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5800 	struct mpi3_config_request *cfg_req,
5801 	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5802 	void *cfg_buf, u32 cfg_buf_sz)
5803 {
5804 	struct dma_memory_desc mem_desc;
5805 	int retval = -1;
5806 	u8 invalid_action = 0;
5807 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5808 
5809 	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5810 
5811 	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5812 		mem_desc.size = sizeof(struct mpi3_config_page_header);
5813 	else {
5814 		if (!cfg_hdr) {
5815 			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5816 			    cfg_req->action, cfg_req->page_type,
5817 			    cfg_req->page_number);
5818 			goto out;
5819 		}
5820 		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5821 		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5822 			if (cfg_req->action
5823 			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5824 				invalid_action = 1;
5825 			break;
5826 		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5827 			if ((cfg_req->action ==
5828 			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5829 			    (cfg_req->action ==
5830 			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5831 				invalid_action = 1;
5832 			break;
5833 		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5834 		default:
5835 			break;
5836 		}
5837 		if (invalid_action) {
5838 			ioc_err(mrioc,
5839 			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5840 			    cfg_req->action, cfg_req->page_type,
5841 			    cfg_req->page_number, cfg_hdr->page_attribute);
5842 			goto out;
5843 		}
5844 		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5845 		cfg_req->page_length = cfg_hdr->page_length;
5846 		cfg_req->page_version = cfg_hdr->page_version;
5847 	}
5848 
5849 	mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5850 		mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5851 
5852 	if (!mem_desc.addr)
5853 		return retval;
5854 
5855 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5856 	    mem_desc.dma_addr);
5857 
5858 	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5859 	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5860 		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5861 		    cfg_buf_sz));
5862 		dprint_cfg_info(mrioc, "config buffer to be written\n");
5863 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5864 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5865 	}
5866 
5867 	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5868 		goto out;
5869 
5870 	retval = 0;
5871 	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5872 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5873 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5874 		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5875 		    cfg_buf_sz));
5876 		dprint_cfg_info(mrioc, "config buffer read\n");
5877 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5878 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5879 	}
5880 
5881 out:
5882 	if (mem_desc.addr) {
5883 		dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5884 			mem_desc.addr, mem_desc.dma_addr);
5885 		mem_desc.addr = NULL;
5886 	}
5887 
5888 	return retval;
5889 }
5890 
5891 /**
5892  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5893  * @mrioc: Adapter instance reference
5894  * @ioc_status: Pointer to return ioc status
5895  * @dev_pg0: Pointer to return device page 0
5896  * @pg_sz: Size of the memory allocated to the page pointer
5897  * @form: The form to be used for addressing the page
5898  * @form_spec: Form specific information like device handle
5899  *
5900  * This is handler for config page read for a specific device
5901  * page0. The ioc_status has the controller returned ioc_status.
5902  * This routine doesn't check ioc_status to decide whether the
5903  * page read is success or not and it is the callers
5904  * responsibility.
5905  *
5906  * Return: 0 on success, non-zero on failure.
5907  */
5908 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5909 	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5910 {
5911 	struct mpi3_config_page_header cfg_hdr;
5912 	struct mpi3_config_request cfg_req;
5913 	u32 page_address;
5914 
5915 	memset(dev_pg0, 0, pg_sz);
5916 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5917 	memset(&cfg_req, 0, sizeof(cfg_req));
5918 
5919 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5920 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5921 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5922 	cfg_req.page_number = 0;
5923 	cfg_req.page_address = 0;
5924 
5925 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5926 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5927 		ioc_err(mrioc, "device page0 header read failed\n");
5928 		goto out_failed;
5929 	}
5930 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5931 		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5932 		    *ioc_status);
5933 		goto out_failed;
5934 	}
5935 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5936 	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5937 	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5938 	cfg_req.page_address = cpu_to_le32(page_address);
5939 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5940 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5941 		ioc_err(mrioc, "device page0 read failed\n");
5942 		goto out_failed;
5943 	}
5944 	return 0;
5945 out_failed:
5946 	return -1;
5947 }
5948 
5949 
5950 /**
5951  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5952  * @mrioc: Adapter instance reference
5953  * @ioc_status: Pointer to return ioc status
5954  * @phy_pg0: Pointer to return SAS Phy page 0
5955  * @pg_sz: Size of the memory allocated to the page pointer
5956  * @form: The form to be used for addressing the page
5957  * @form_spec: Form specific information like phy number
5958  *
5959  * This is handler for config page read for a specific SAS Phy
5960  * page0. The ioc_status has the controller returned ioc_status.
5961  * This routine doesn't check ioc_status to decide whether the
5962  * page read is success or not and it is the callers
5963  * responsibility.
5964  *
5965  * Return: 0 on success, non-zero on failure.
5966  */
5967 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5968 	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5969 	u32 form_spec)
5970 {
5971 	struct mpi3_config_page_header cfg_hdr;
5972 	struct mpi3_config_request cfg_req;
5973 	u32 page_address;
5974 
5975 	memset(phy_pg0, 0, pg_sz);
5976 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5977 	memset(&cfg_req, 0, sizeof(cfg_req));
5978 
5979 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5980 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5981 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5982 	cfg_req.page_number = 0;
5983 	cfg_req.page_address = 0;
5984 
5985 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5986 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5987 		ioc_err(mrioc, "sas phy page0 header read failed\n");
5988 		goto out_failed;
5989 	}
5990 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5991 		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5992 		    *ioc_status);
5993 		goto out_failed;
5994 	}
5995 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5996 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5997 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5998 	cfg_req.page_address = cpu_to_le32(page_address);
5999 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6000 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
6001 		ioc_err(mrioc, "sas phy page0 read failed\n");
6002 		goto out_failed;
6003 	}
6004 	return 0;
6005 out_failed:
6006 	return -1;
6007 }
6008 
6009 /**
6010  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
6011  * @mrioc: Adapter instance reference
6012  * @ioc_status: Pointer to return ioc status
6013  * @phy_pg1: Pointer to return SAS Phy page 1
6014  * @pg_sz: Size of the memory allocated to the page pointer
6015  * @form: The form to be used for addressing the page
6016  * @form_spec: Form specific information like phy number
6017  *
6018  * This is handler for config page read for a specific SAS Phy
6019  * page1. The ioc_status has the controller returned ioc_status.
6020  * This routine doesn't check ioc_status to decide whether the
6021  * page read is success or not and it is the callers
6022  * responsibility.
6023  *
6024  * Return: 0 on success, non-zero on failure.
6025  */
6026 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6027 	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
6028 	u32 form_spec)
6029 {
6030 	struct mpi3_config_page_header cfg_hdr;
6031 	struct mpi3_config_request cfg_req;
6032 	u32 page_address;
6033 
6034 	memset(phy_pg1, 0, pg_sz);
6035 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6036 	memset(&cfg_req, 0, sizeof(cfg_req));
6037 
6038 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6039 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6040 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
6041 	cfg_req.page_number = 1;
6042 	cfg_req.page_address = 0;
6043 
6044 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6045 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6046 		ioc_err(mrioc, "sas phy page1 header read failed\n");
6047 		goto out_failed;
6048 	}
6049 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6050 		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
6051 		    *ioc_status);
6052 		goto out_failed;
6053 	}
6054 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6055 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
6056 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
6057 	cfg_req.page_address = cpu_to_le32(page_address);
6058 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6059 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
6060 		ioc_err(mrioc, "sas phy page1 read failed\n");
6061 		goto out_failed;
6062 	}
6063 	return 0;
6064 out_failed:
6065 	return -1;
6066 }
6067 
6068 
6069 /**
6070  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
6071  * @mrioc: Adapter instance reference
6072  * @ioc_status: Pointer to return ioc status
6073  * @exp_pg0: Pointer to return SAS Expander page 0
6074  * @pg_sz: Size of the memory allocated to the page pointer
6075  * @form: The form to be used for addressing the page
6076  * @form_spec: Form specific information like device handle
6077  *
6078  * This is handler for config page read for a specific SAS
6079  * Expander page0. The ioc_status has the controller returned
6080  * ioc_status. This routine doesn't check ioc_status to decide
6081  * whether the page read is success or not and it is the callers
6082  * responsibility.
6083  *
6084  * Return: 0 on success, non-zero on failure.
6085  */
6086 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6087 	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
6088 	u32 form_spec)
6089 {
6090 	struct mpi3_config_page_header cfg_hdr;
6091 	struct mpi3_config_request cfg_req;
6092 	u32 page_address;
6093 
6094 	memset(exp_pg0, 0, pg_sz);
6095 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6096 	memset(&cfg_req, 0, sizeof(cfg_req));
6097 
6098 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6099 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6100 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6101 	cfg_req.page_number = 0;
6102 	cfg_req.page_address = 0;
6103 
6104 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6105 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6106 		ioc_err(mrioc, "expander page0 header read failed\n");
6107 		goto out_failed;
6108 	}
6109 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6110 		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
6111 		    *ioc_status);
6112 		goto out_failed;
6113 	}
6114 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6115 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6116 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6117 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6118 	cfg_req.page_address = cpu_to_le32(page_address);
6119 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6120 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
6121 		ioc_err(mrioc, "expander page0 read failed\n");
6122 		goto out_failed;
6123 	}
6124 	return 0;
6125 out_failed:
6126 	return -1;
6127 }
6128 
6129 /**
6130  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
6131  * @mrioc: Adapter instance reference
6132  * @ioc_status: Pointer to return ioc status
6133  * @exp_pg1: Pointer to return SAS Expander page 1
6134  * @pg_sz: Size of the memory allocated to the page pointer
6135  * @form: The form to be used for addressing the page
6136  * @form_spec: Form specific information like phy number
6137  *
6138  * This is handler for config page read for a specific SAS
6139  * Expander page1. The ioc_status has the controller returned
6140  * ioc_status. This routine doesn't check ioc_status to decide
6141  * whether the page read is success or not and it is the callers
6142  * responsibility.
6143  *
6144  * Return: 0 on success, non-zero on failure.
6145  */
6146 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6147 	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6148 	u32 form_spec)
6149 {
6150 	struct mpi3_config_page_header cfg_hdr;
6151 	struct mpi3_config_request cfg_req;
6152 	u32 page_address;
6153 
6154 	memset(exp_pg1, 0, pg_sz);
6155 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6156 	memset(&cfg_req, 0, sizeof(cfg_req));
6157 
6158 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6159 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6160 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6161 	cfg_req.page_number = 1;
6162 	cfg_req.page_address = 0;
6163 
6164 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6165 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6166 		ioc_err(mrioc, "expander page1 header read failed\n");
6167 		goto out_failed;
6168 	}
6169 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6170 		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6171 		    *ioc_status);
6172 		goto out_failed;
6173 	}
6174 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6175 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6176 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6177 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6178 	cfg_req.page_address = cpu_to_le32(page_address);
6179 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6180 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6181 		ioc_err(mrioc, "expander page1 read failed\n");
6182 		goto out_failed;
6183 	}
6184 	return 0;
6185 out_failed:
6186 	return -1;
6187 }
6188 
6189 /**
6190  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6191  * @mrioc: Adapter instance reference
6192  * @ioc_status: Pointer to return ioc status
6193  * @encl_pg0: Pointer to return Enclosure page 0
6194  * @pg_sz: Size of the memory allocated to the page pointer
6195  * @form: The form to be used for addressing the page
6196  * @form_spec: Form specific information like device handle
6197  *
6198  * This is handler for config page read for a specific Enclosure
6199  * page0. The ioc_status has the controller returned ioc_status.
6200  * This routine doesn't check ioc_status to decide whether the
6201  * page read is success or not and it is the callers
6202  * responsibility.
6203  *
6204  * Return: 0 on success, non-zero on failure.
6205  */
6206 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6207 	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6208 	u32 form_spec)
6209 {
6210 	struct mpi3_config_page_header cfg_hdr;
6211 	struct mpi3_config_request cfg_req;
6212 	u32 page_address;
6213 
6214 	memset(encl_pg0, 0, pg_sz);
6215 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6216 	memset(&cfg_req, 0, sizeof(cfg_req));
6217 
6218 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6219 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6220 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6221 	cfg_req.page_number = 0;
6222 	cfg_req.page_address = 0;
6223 
6224 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6225 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6226 		ioc_err(mrioc, "enclosure page0 header read failed\n");
6227 		goto out_failed;
6228 	}
6229 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6230 		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6231 		    *ioc_status);
6232 		goto out_failed;
6233 	}
6234 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6235 	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6236 	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6237 	cfg_req.page_address = cpu_to_le32(page_address);
6238 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6239 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6240 		ioc_err(mrioc, "enclosure page0 read failed\n");
6241 		goto out_failed;
6242 	}
6243 	return 0;
6244 out_failed:
6245 	return -1;
6246 }
6247 
6248 
6249 /**
6250  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6251  * @mrioc: Adapter instance reference
6252  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6253  * @pg_sz: Size of the memory allocated to the page pointer
6254  *
6255  * This is handler for config page read for the SAS IO Unit
6256  * page0. This routine checks ioc_status to decide whether the
6257  * page read is success or not.
6258  *
6259  * Return: 0 on success, non-zero on failure.
6260  */
6261 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6262 	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6263 {
6264 	struct mpi3_config_page_header cfg_hdr;
6265 	struct mpi3_config_request cfg_req;
6266 	u16 ioc_status = 0;
6267 
6268 	memset(sas_io_unit_pg0, 0, pg_sz);
6269 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6270 	memset(&cfg_req, 0, sizeof(cfg_req));
6271 
6272 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6273 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6274 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6275 	cfg_req.page_number = 0;
6276 	cfg_req.page_address = 0;
6277 
6278 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6279 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6280 		ioc_err(mrioc, "sas io unit page0 header read failed\n");
6281 		goto out_failed;
6282 	}
6283 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6284 		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6285 		    ioc_status);
6286 		goto out_failed;
6287 	}
6288 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6289 
6290 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6291 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6292 		ioc_err(mrioc, "sas io unit page0 read failed\n");
6293 		goto out_failed;
6294 	}
6295 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6296 		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6297 		    ioc_status);
6298 		goto out_failed;
6299 	}
6300 	return 0;
6301 out_failed:
6302 	return -1;
6303 }
6304 
6305 /**
6306  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6307  * @mrioc: Adapter instance reference
6308  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6309  * @pg_sz: Size of the memory allocated to the page pointer
6310  *
6311  * This is handler for config page read for the SAS IO Unit
6312  * page1. This routine checks ioc_status to decide whether the
6313  * page read is success or not.
6314  *
6315  * Return: 0 on success, non-zero on failure.
6316  */
6317 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6318 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6319 {
6320 	struct mpi3_config_page_header cfg_hdr;
6321 	struct mpi3_config_request cfg_req;
6322 	u16 ioc_status = 0;
6323 
6324 	memset(sas_io_unit_pg1, 0, pg_sz);
6325 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6326 	memset(&cfg_req, 0, sizeof(cfg_req));
6327 
6328 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6329 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6330 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6331 	cfg_req.page_number = 1;
6332 	cfg_req.page_address = 0;
6333 
6334 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6335 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6336 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6337 		goto out_failed;
6338 	}
6339 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6340 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6341 		    ioc_status);
6342 		goto out_failed;
6343 	}
6344 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6345 
6346 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6347 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6348 		ioc_err(mrioc, "sas io unit page1 read failed\n");
6349 		goto out_failed;
6350 	}
6351 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6352 		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6353 		    ioc_status);
6354 		goto out_failed;
6355 	}
6356 	return 0;
6357 out_failed:
6358 	return -1;
6359 }
6360 
6361 /**
6362  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6363  * @mrioc: Adapter instance reference
6364  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6365  * @pg_sz: Size of the memory allocated to the page pointer
6366  *
6367  * This is handler for config page write for the SAS IO Unit
6368  * page1. This routine checks ioc_status to decide whether the
6369  * page read is success or not. This will modify both current
6370  * and persistent page.
6371  *
6372  * Return: 0 on success, non-zero on failure.
6373  */
6374 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6375 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6376 {
6377 	struct mpi3_config_page_header cfg_hdr;
6378 	struct mpi3_config_request cfg_req;
6379 	u16 ioc_status = 0;
6380 
6381 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6382 	memset(&cfg_req, 0, sizeof(cfg_req));
6383 
6384 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6385 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6386 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6387 	cfg_req.page_number = 1;
6388 	cfg_req.page_address = 0;
6389 
6390 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6391 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6392 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6393 		goto out_failed;
6394 	}
6395 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6396 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6397 		    ioc_status);
6398 		goto out_failed;
6399 	}
6400 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6401 
6402 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6403 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6404 		ioc_err(mrioc, "sas io unit page1 write current failed\n");
6405 		goto out_failed;
6406 	}
6407 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6408 		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6409 		    ioc_status);
6410 		goto out_failed;
6411 	}
6412 
6413 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6414 
6415 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6416 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6417 		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6418 		goto out_failed;
6419 	}
6420 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6421 		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6422 		    ioc_status);
6423 		goto out_failed;
6424 	}
6425 	return 0;
6426 out_failed:
6427 	return -1;
6428 }
6429 
6430 /**
6431  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6432  * @mrioc: Adapter instance reference
6433  * @driver_pg1: Pointer to return Driver page 1
6434  * @pg_sz: Size of the memory allocated to the page pointer
6435  *
6436  * This is handler for config page read for the Driver page1.
6437  * This routine checks ioc_status to decide whether the page
6438  * read is success or not.
6439  *
6440  * Return: 0 on success, non-zero on failure.
6441  */
6442 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6443 	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6444 {
6445 	struct mpi3_config_page_header cfg_hdr;
6446 	struct mpi3_config_request cfg_req;
6447 	u16 ioc_status = 0;
6448 
6449 	memset(driver_pg1, 0, pg_sz);
6450 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6451 	memset(&cfg_req, 0, sizeof(cfg_req));
6452 
6453 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6454 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6455 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6456 	cfg_req.page_number = 1;
6457 	cfg_req.page_address = 0;
6458 
6459 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6460 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6461 		ioc_err(mrioc, "driver page1 header read failed\n");
6462 		goto out_failed;
6463 	}
6464 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6465 		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6466 		    ioc_status);
6467 		goto out_failed;
6468 	}
6469 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6470 
6471 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6472 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6473 		ioc_err(mrioc, "driver page1 read failed\n");
6474 		goto out_failed;
6475 	}
6476 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6477 		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6478 		    ioc_status);
6479 		goto out_failed;
6480 	}
6481 	return 0;
6482 out_failed:
6483 	return -1;
6484 }
6485 
6486 /**
6487  * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6488  * @mrioc: Adapter instance reference
6489  * @driver_pg2: Pointer to return driver page 2
6490  * @pg_sz: Size of the memory allocated to the page pointer
6491  * @page_action: Page action
6492  *
6493  * This is handler for config page read for the driver page2.
6494  * This routine checks ioc_status to decide whether the page
6495  * read is success or not.
6496  *
6497  * Return: 0 on success, non-zero on failure.
6498  */
6499 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6500 	struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6501 {
6502 	struct mpi3_config_page_header cfg_hdr;
6503 	struct mpi3_config_request cfg_req;
6504 	u16 ioc_status = 0;
6505 
6506 	memset(driver_pg2, 0, pg_sz);
6507 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6508 	memset(&cfg_req, 0, sizeof(cfg_req));
6509 
6510 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6511 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6512 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6513 	cfg_req.page_number = 2;
6514 	cfg_req.page_address = 0;
6515 	cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6516 
6517 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6518 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6519 		ioc_err(mrioc, "driver page2 header read failed\n");
6520 		goto out_failed;
6521 	}
6522 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6523 		ioc_err(mrioc, "driver page2 header read failed with\n"
6524 			       "ioc_status(0x%04x)\n",
6525 		    ioc_status);
6526 		goto out_failed;
6527 	}
6528 	cfg_req.action = page_action;
6529 
6530 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6531 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6532 		ioc_err(mrioc, "driver page2 read failed\n");
6533 		goto out_failed;
6534 	}
6535 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6536 		ioc_err(mrioc, "driver page2 read failed with\n"
6537 			       "ioc_status(0x%04x)\n",
6538 		    ioc_status);
6539 		goto out_failed;
6540 	}
6541 	return 0;
6542 out_failed:
6543 	return -1;
6544 }
6545 
6546