xref: /linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 9557b4376d02088a33e5f4116bcc324d35a3b64c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 	struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 	struct mpi3mr_drv_cmd *drv_cmd);
20 
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24 
25 #if defined(writeq) && defined(CONFIG_64BIT)
26 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
27 {
28 	writeq(b, addr);
29 }
30 #else
31 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
32 {
33 	__u64 data_out = b;
34 
35 	writel((u32)(data_out), addr);
36 	writel((u32)(data_out >> 32), (addr + 4));
37 }
38 #endif
39 
40 static inline bool
41 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
42 {
43 	u16 pi, ci, max_entries;
44 	bool is_qfull = false;
45 
46 	pi = op_req_q->pi;
47 	ci = READ_ONCE(op_req_q->ci);
48 	max_entries = op_req_q->num_requests;
49 
50 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
51 		is_qfull = true;
52 
53 	return is_qfull;
54 }
55 
56 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
57 {
58 	u16 i, max_vectors;
59 
60 	max_vectors = mrioc->intr_info_count;
61 
62 	for (i = 0; i < max_vectors; i++)
63 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
64 }
65 
66 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
67 {
68 	mrioc->intr_enabled = 0;
69 	mpi3mr_sync_irqs(mrioc);
70 }
71 
72 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
73 {
74 	mrioc->intr_enabled = 1;
75 }
76 
77 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
78 {
79 	u16 i;
80 
81 	mpi3mr_ioc_disable_intr(mrioc);
82 
83 	if (!mrioc->intr_info)
84 		return;
85 
86 	for (i = 0; i < mrioc->intr_info_count; i++)
87 		free_irq(pci_irq_vector(mrioc->pdev, i),
88 		    (mrioc->intr_info + i));
89 
90 	kfree(mrioc->intr_info);
91 	mrioc->intr_info = NULL;
92 	mrioc->intr_info_count = 0;
93 	mrioc->is_intr_info_set = false;
94 	pci_free_irq_vectors(mrioc->pdev);
95 }
96 
97 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
98 	dma_addr_t dma_addr)
99 {
100 	struct mpi3_sge_common *sgel = paddr;
101 
102 	sgel->flags = flags;
103 	sgel->length = cpu_to_le32(length);
104 	sgel->address = cpu_to_le64(dma_addr);
105 }
106 
107 void mpi3mr_build_zero_len_sge(void *paddr)
108 {
109 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
110 
111 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
112 }
113 
114 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
115 	dma_addr_t phys_addr)
116 {
117 	if (!phys_addr)
118 		return NULL;
119 
120 	if ((phys_addr < mrioc->reply_buf_dma) ||
121 	    (phys_addr > mrioc->reply_buf_dma_max_address))
122 		return NULL;
123 
124 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
125 }
126 
127 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
128 	dma_addr_t phys_addr)
129 {
130 	if (!phys_addr)
131 		return NULL;
132 
133 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
134 }
135 
136 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
137 	u64 reply_dma)
138 {
139 	u32 old_idx = 0;
140 	unsigned long flags;
141 
142 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
143 	old_idx  =  mrioc->reply_free_queue_host_index;
144 	mrioc->reply_free_queue_host_index = (
145 	    (mrioc->reply_free_queue_host_index ==
146 	    (mrioc->reply_free_qsz - 1)) ? 0 :
147 	    (mrioc->reply_free_queue_host_index + 1));
148 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
149 	writel(mrioc->reply_free_queue_host_index,
150 	    &mrioc->sysif_regs->reply_free_host_index);
151 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
152 }
153 
154 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
155 	u64 sense_buf_dma)
156 {
157 	u32 old_idx = 0;
158 	unsigned long flags;
159 
160 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
161 	old_idx  =  mrioc->sbq_host_index;
162 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
163 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
164 	    (mrioc->sbq_host_index + 1));
165 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
166 	writel(mrioc->sbq_host_index,
167 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
168 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
169 }
170 
171 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
172 	struct mpi3_event_notification_reply *event_reply)
173 {
174 	char *desc = NULL;
175 	u16 event;
176 
177 	event = event_reply->event;
178 
179 	switch (event) {
180 	case MPI3_EVENT_LOG_DATA:
181 		desc = "Log Data";
182 		break;
183 	case MPI3_EVENT_CHANGE:
184 		desc = "Event Change";
185 		break;
186 	case MPI3_EVENT_GPIO_INTERRUPT:
187 		desc = "GPIO Interrupt";
188 		break;
189 	case MPI3_EVENT_CABLE_MGMT:
190 		desc = "Cable Management";
191 		break;
192 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
193 		desc = "Energy Pack Change";
194 		break;
195 	case MPI3_EVENT_DEVICE_ADDED:
196 	{
197 		struct mpi3_device_page0 *event_data =
198 		    (struct mpi3_device_page0 *)event_reply->event_data;
199 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
200 		    event_data->dev_handle, event_data->device_form);
201 		return;
202 	}
203 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
204 	{
205 		struct mpi3_device_page0 *event_data =
206 		    (struct mpi3_device_page0 *)event_reply->event_data;
207 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
208 		    event_data->dev_handle, event_data->device_form);
209 		return;
210 	}
211 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
212 	{
213 		struct mpi3_event_data_device_status_change *event_data =
214 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
215 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
216 		    event_data->dev_handle, event_data->reason_code);
217 		return;
218 	}
219 	case MPI3_EVENT_SAS_DISCOVERY:
220 	{
221 		struct mpi3_event_data_sas_discovery *event_data =
222 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
223 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
224 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
225 		    "start" : "stop",
226 		    le32_to_cpu(event_data->discovery_status));
227 		return;
228 	}
229 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
230 		desc = "SAS Broadcast Primitive";
231 		break;
232 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
233 		desc = "SAS Notify Primitive";
234 		break;
235 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
236 		desc = "SAS Init Device Status Change";
237 		break;
238 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
239 		desc = "SAS Init Table Overflow";
240 		break;
241 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
242 		desc = "SAS Topology Change List";
243 		break;
244 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
245 		desc = "Enclosure Device Status Change";
246 		break;
247 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
248 		desc = "Enclosure Added";
249 		break;
250 	case MPI3_EVENT_HARD_RESET_RECEIVED:
251 		desc = "Hard Reset Received";
252 		break;
253 	case MPI3_EVENT_SAS_PHY_COUNTER:
254 		desc = "SAS PHY Counter";
255 		break;
256 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
257 		desc = "SAS Device Discovery Error";
258 		break;
259 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
260 		desc = "PCIE Topology Change List";
261 		break;
262 	case MPI3_EVENT_PCIE_ENUMERATION:
263 	{
264 		struct mpi3_event_data_pcie_enumeration *event_data =
265 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
266 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
267 		    (event_data->reason_code ==
268 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
269 		if (event_data->enumeration_status)
270 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
271 			    le32_to_cpu(event_data->enumeration_status));
272 		return;
273 	}
274 	case MPI3_EVENT_PREPARE_FOR_RESET:
275 		desc = "Prepare For Reset";
276 		break;
277 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
278 		desc = "Diagnostic Buffer Status Change";
279 		break;
280 	}
281 
282 	if (!desc)
283 		return;
284 
285 	ioc_info(mrioc, "%s\n", desc);
286 }
287 
288 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
289 	struct mpi3_default_reply *def_reply)
290 {
291 	struct mpi3_event_notification_reply *event_reply =
292 	    (struct mpi3_event_notification_reply *)def_reply;
293 
294 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
295 	mpi3mr_print_event_data(mrioc, event_reply);
296 	mpi3mr_os_handle_events(mrioc, event_reply);
297 }
298 
299 static struct mpi3mr_drv_cmd *
300 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
301 	struct mpi3_default_reply *def_reply)
302 {
303 	u16 idx;
304 
305 	switch (host_tag) {
306 	case MPI3MR_HOSTTAG_INITCMDS:
307 		return &mrioc->init_cmds;
308 	case MPI3MR_HOSTTAG_CFG_CMDS:
309 		return &mrioc->cfg_cmds;
310 	case MPI3MR_HOSTTAG_BSG_CMDS:
311 		return &mrioc->bsg_cmds;
312 	case MPI3MR_HOSTTAG_BLK_TMS:
313 		return &mrioc->host_tm_cmds;
314 	case MPI3MR_HOSTTAG_PEL_ABORT:
315 		return &mrioc->pel_abort_cmd;
316 	case MPI3MR_HOSTTAG_PEL_WAIT:
317 		return &mrioc->pel_cmds;
318 	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
319 		return &mrioc->transport_cmds;
320 	case MPI3MR_HOSTTAG_INVALID:
321 		if (def_reply && def_reply->function ==
322 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
323 			mpi3mr_handle_events(mrioc, def_reply);
324 		return NULL;
325 	default:
326 		break;
327 	}
328 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
329 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
330 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
331 		return &mrioc->dev_rmhs_cmds[idx];
332 	}
333 
334 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
335 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
336 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
337 		return &mrioc->evtack_cmds[idx];
338 	}
339 
340 	return NULL;
341 }
342 
343 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
344 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
345 {
346 	u16 reply_desc_type, host_tag = 0;
347 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
348 	u32 ioc_loginfo = 0, sense_count = 0;
349 	struct mpi3_status_reply_descriptor *status_desc;
350 	struct mpi3_address_reply_descriptor *addr_desc;
351 	struct mpi3_success_reply_descriptor *success_desc;
352 	struct mpi3_default_reply *def_reply = NULL;
353 	struct mpi3mr_drv_cmd *cmdptr = NULL;
354 	struct mpi3_scsi_io_reply *scsi_reply;
355 	struct scsi_sense_hdr sshdr;
356 	u8 *sense_buf = NULL;
357 
358 	*reply_dma = 0;
359 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
360 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
361 	switch (reply_desc_type) {
362 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
363 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
364 		host_tag = le16_to_cpu(status_desc->host_tag);
365 		ioc_status = le16_to_cpu(status_desc->ioc_status);
366 		if (ioc_status &
367 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
368 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
369 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
370 		mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
371 		break;
372 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
373 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
374 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
375 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
376 		if (!def_reply)
377 			goto out;
378 		host_tag = le16_to_cpu(def_reply->host_tag);
379 		ioc_status = le16_to_cpu(def_reply->ioc_status);
380 		if (ioc_status &
381 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
382 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
383 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
384 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
385 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
386 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
387 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
388 			sense_count = le32_to_cpu(scsi_reply->sense_count);
389 			if (sense_buf) {
390 				scsi_normalize_sense(sense_buf, sense_count,
391 				    &sshdr);
392 				mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
393 				    sshdr.asc, sshdr.ascq);
394 			}
395 		}
396 		mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
397 		break;
398 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
399 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
400 		host_tag = le16_to_cpu(success_desc->host_tag);
401 		break;
402 	default:
403 		break;
404 	}
405 
406 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
407 	if (cmdptr) {
408 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
409 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
410 			cmdptr->ioc_loginfo = ioc_loginfo;
411 			cmdptr->ioc_status = ioc_status;
412 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
413 			if (def_reply) {
414 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
415 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
416 				    mrioc->reply_sz);
417 			}
418 			if (sense_buf && cmdptr->sensebuf) {
419 				cmdptr->is_sense = 1;
420 				memcpy(cmdptr->sensebuf, sense_buf,
421 				       MPI3MR_SENSE_BUF_SZ);
422 			}
423 			if (cmdptr->is_waiting) {
424 				complete(&cmdptr->done);
425 				cmdptr->is_waiting = 0;
426 			} else if (cmdptr->callback)
427 				cmdptr->callback(mrioc, cmdptr);
428 		}
429 	}
430 out:
431 	if (sense_buf)
432 		mpi3mr_repost_sense_buf(mrioc,
433 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
434 }
435 
436 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
437 {
438 	u32 exp_phase = mrioc->admin_reply_ephase;
439 	u32 admin_reply_ci = mrioc->admin_reply_ci;
440 	u32 num_admin_replies = 0;
441 	u64 reply_dma = 0;
442 	struct mpi3_default_reply_descriptor *reply_desc;
443 
444 	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
445 		return 0;
446 
447 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
448 	    admin_reply_ci;
449 
450 	if ((le16_to_cpu(reply_desc->reply_flags) &
451 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
452 		atomic_dec(&mrioc->admin_reply_q_in_use);
453 		return 0;
454 	}
455 
456 	do {
457 		if (mrioc->unrecoverable)
458 			break;
459 
460 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
461 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
462 		if (reply_dma)
463 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
464 		num_admin_replies++;
465 		if (++admin_reply_ci == mrioc->num_admin_replies) {
466 			admin_reply_ci = 0;
467 			exp_phase ^= 1;
468 		}
469 		reply_desc =
470 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
471 		    admin_reply_ci;
472 		if ((le16_to_cpu(reply_desc->reply_flags) &
473 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
474 			break;
475 	} while (1);
476 
477 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
478 	mrioc->admin_reply_ci = admin_reply_ci;
479 	mrioc->admin_reply_ephase = exp_phase;
480 	atomic_dec(&mrioc->admin_reply_q_in_use);
481 
482 	return num_admin_replies;
483 }
484 
485 /**
486  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
487  *	queue's consumer index from operational reply descriptor queue.
488  * @op_reply_q: op_reply_qinfo object
489  * @reply_ci: operational reply descriptor's queue consumer index
490  *
491  * Returns: reply descriptor frame address
492  */
493 static inline struct mpi3_default_reply_descriptor *
494 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
495 {
496 	void *segment_base_addr;
497 	struct segments *segments = op_reply_q->q_segments;
498 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
499 
500 	segment_base_addr =
501 	    segments[reply_ci / op_reply_q->segment_qd].segment;
502 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
503 	    (reply_ci % op_reply_q->segment_qd);
504 	return reply_desc;
505 }
506 
507 /**
508  * mpi3mr_process_op_reply_q - Operational reply queue handler
509  * @mrioc: Adapter instance reference
510  * @op_reply_q: Operational reply queue info
511  *
512  * Checks the specific operational reply queue and drains the
513  * reply queue entries until the queue is empty and process the
514  * individual reply descriptors.
515  *
516  * Return: 0 if queue is already processed,or number of reply
517  *	    descriptors processed.
518  */
519 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
520 	struct op_reply_qinfo *op_reply_q)
521 {
522 	struct op_req_qinfo *op_req_q;
523 	u32 exp_phase;
524 	u32 reply_ci;
525 	u32 num_op_reply = 0;
526 	u64 reply_dma = 0;
527 	struct mpi3_default_reply_descriptor *reply_desc;
528 	u16 req_q_idx = 0, reply_qidx;
529 
530 	reply_qidx = op_reply_q->qid - 1;
531 
532 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
533 		return 0;
534 
535 	exp_phase = op_reply_q->ephase;
536 	reply_ci = op_reply_q->ci;
537 
538 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
539 	if ((le16_to_cpu(reply_desc->reply_flags) &
540 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
541 		atomic_dec(&op_reply_q->in_use);
542 		return 0;
543 	}
544 
545 	do {
546 		if (mrioc->unrecoverable)
547 			break;
548 
549 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
550 		op_req_q = &mrioc->req_qinfo[req_q_idx];
551 
552 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
553 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
554 		    reply_qidx);
555 		atomic_dec(&op_reply_q->pend_ios);
556 		if (reply_dma)
557 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
558 		num_op_reply++;
559 
560 		if (++reply_ci == op_reply_q->num_replies) {
561 			reply_ci = 0;
562 			exp_phase ^= 1;
563 		}
564 
565 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
566 
567 		if ((le16_to_cpu(reply_desc->reply_flags) &
568 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
569 			break;
570 #ifndef CONFIG_PREEMPT_RT
571 		/*
572 		 * Exit completion loop to avoid CPU lockup
573 		 * Ensure remaining completion happens from threaded ISR.
574 		 */
575 		if (num_op_reply > mrioc->max_host_ios) {
576 			op_reply_q->enable_irq_poll = true;
577 			break;
578 		}
579 #endif
580 	} while (1);
581 
582 	writel(reply_ci,
583 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
584 	op_reply_q->ci = reply_ci;
585 	op_reply_q->ephase = exp_phase;
586 
587 	atomic_dec(&op_reply_q->in_use);
588 	return num_op_reply;
589 }
590 
591 /**
592  * mpi3mr_blk_mq_poll - Operational reply queue handler
593  * @shost: SCSI Host reference
594  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
595  *
596  * Checks the specific operational reply queue and drains the
597  * reply queue entries until the queue is empty and process the
598  * individual reply descriptors.
599  *
600  * Return: 0 if queue is already processed,or number of reply
601  *	    descriptors processed.
602  */
603 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
604 {
605 	int num_entries = 0;
606 	struct mpi3mr_ioc *mrioc;
607 
608 	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
609 
610 	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
611 	    mrioc->unrecoverable || mrioc->pci_err_recovery))
612 		return 0;
613 
614 	num_entries = mpi3mr_process_op_reply_q(mrioc,
615 			&mrioc->op_reply_qinfo[queue_num]);
616 
617 	return num_entries;
618 }
619 
620 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
621 {
622 	struct mpi3mr_intr_info *intr_info = privdata;
623 	struct mpi3mr_ioc *mrioc;
624 	u16 midx;
625 	u32 num_admin_replies = 0, num_op_reply = 0;
626 
627 	if (!intr_info)
628 		return IRQ_NONE;
629 
630 	mrioc = intr_info->mrioc;
631 
632 	if (!mrioc->intr_enabled)
633 		return IRQ_NONE;
634 
635 	midx = intr_info->msix_index;
636 
637 	if (!midx)
638 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
639 	if (intr_info->op_reply_q)
640 		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
641 		    intr_info->op_reply_q);
642 
643 	if (num_admin_replies || num_op_reply)
644 		return IRQ_HANDLED;
645 	else
646 		return IRQ_NONE;
647 }
648 
649 #ifndef CONFIG_PREEMPT_RT
650 
651 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
652 {
653 	struct mpi3mr_intr_info *intr_info = privdata;
654 	int ret;
655 
656 	if (!intr_info)
657 		return IRQ_NONE;
658 
659 	/* Call primary ISR routine */
660 	ret = mpi3mr_isr_primary(irq, privdata);
661 
662 	/*
663 	 * If more IOs are expected, schedule IRQ polling thread.
664 	 * Otherwise exit from ISR.
665 	 */
666 	if (!intr_info->op_reply_q)
667 		return ret;
668 
669 	if (!intr_info->op_reply_q->enable_irq_poll ||
670 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
671 		return ret;
672 
673 	disable_irq_nosync(intr_info->os_irq);
674 
675 	return IRQ_WAKE_THREAD;
676 }
677 
678 /**
679  * mpi3mr_isr_poll - Reply queue polling routine
680  * @irq: IRQ
681  * @privdata: Interrupt info
682  *
683  * poll for pending I/O completions in a loop until pending I/Os
684  * present or controller queue depth I/Os are processed.
685  *
686  * Return: IRQ_NONE or IRQ_HANDLED
687  */
688 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
689 {
690 	struct mpi3mr_intr_info *intr_info = privdata;
691 	struct mpi3mr_ioc *mrioc;
692 	u16 midx;
693 	u32 num_op_reply = 0;
694 
695 	if (!intr_info || !intr_info->op_reply_q)
696 		return IRQ_NONE;
697 
698 	mrioc = intr_info->mrioc;
699 	midx = intr_info->msix_index;
700 
701 	/* Poll for pending IOs completions */
702 	do {
703 		if (!mrioc->intr_enabled || mrioc->unrecoverable)
704 			break;
705 
706 		if (!midx)
707 			mpi3mr_process_admin_reply_q(mrioc);
708 		if (intr_info->op_reply_q)
709 			num_op_reply +=
710 			    mpi3mr_process_op_reply_q(mrioc,
711 				intr_info->op_reply_q);
712 
713 		usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
714 
715 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
716 	    (num_op_reply < mrioc->max_host_ios));
717 
718 	intr_info->op_reply_q->enable_irq_poll = false;
719 	enable_irq(intr_info->os_irq);
720 
721 	return IRQ_HANDLED;
722 }
723 
724 #endif
725 
726 /**
727  * mpi3mr_request_irq - Request IRQ and register ISR
728  * @mrioc: Adapter instance reference
729  * @index: IRQ vector index
730  *
731  * Request threaded ISR with primary ISR and secondary
732  *
733  * Return: 0 on success and non zero on failures.
734  */
735 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
736 {
737 	struct pci_dev *pdev = mrioc->pdev;
738 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
739 	int retval = 0;
740 
741 	intr_info->mrioc = mrioc;
742 	intr_info->msix_index = index;
743 	intr_info->op_reply_q = NULL;
744 
745 	snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
746 	    mrioc->driver_name, mrioc->id, index);
747 
748 #ifndef CONFIG_PREEMPT_RT
749 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
750 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
751 #else
752 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
753 	    NULL, IRQF_SHARED, intr_info->name, intr_info);
754 #endif
755 	if (retval) {
756 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
757 		    intr_info->name, pci_irq_vector(pdev, index));
758 		return retval;
759 	}
760 
761 	intr_info->os_irq = pci_irq_vector(pdev, index);
762 	return retval;
763 }
764 
765 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
766 {
767 	if (!mrioc->requested_poll_qcount)
768 		return;
769 
770 	/* Reserved for Admin and Default Queue */
771 	if (max_vectors > 2 &&
772 		(mrioc->requested_poll_qcount < max_vectors - 2)) {
773 		ioc_info(mrioc,
774 		    "enabled polled queues (%d) msix (%d)\n",
775 		    mrioc->requested_poll_qcount, max_vectors);
776 	} else {
777 		ioc_info(mrioc,
778 		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
779 		    mrioc->requested_poll_qcount, max_vectors);
780 		mrioc->requested_poll_qcount = 0;
781 	}
782 }
783 
784 /**
785  * mpi3mr_setup_isr - Setup ISR for the controller
786  * @mrioc: Adapter instance reference
787  * @setup_one: Request one IRQ or more
788  *
789  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
790  *
791  * Return: 0 on success and non zero on failures.
792  */
793 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
794 {
795 	unsigned int irq_flags = PCI_IRQ_MSIX;
796 	int max_vectors, min_vec;
797 	int retval;
798 	int i;
799 	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
800 
801 	if (mrioc->is_intr_info_set)
802 		return 0;
803 
804 	mpi3mr_cleanup_isr(mrioc);
805 
806 	if (setup_one || reset_devices) {
807 		max_vectors = 1;
808 		retval = pci_alloc_irq_vectors(mrioc->pdev,
809 		    1, max_vectors, irq_flags);
810 		if (retval < 0) {
811 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
812 			    retval);
813 			goto out_failed;
814 		}
815 	} else {
816 		max_vectors =
817 		    min_t(int, mrioc->cpu_count + 1 +
818 			mrioc->requested_poll_qcount, mrioc->msix_count);
819 
820 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
821 
822 		ioc_info(mrioc,
823 		    "MSI-X vectors supported: %d, no of cores: %d,",
824 		    mrioc->msix_count, mrioc->cpu_count);
825 		ioc_info(mrioc,
826 		    "MSI-x vectors requested: %d poll_queues %d\n",
827 		    max_vectors, mrioc->requested_poll_qcount);
828 
829 		desc.post_vectors = mrioc->requested_poll_qcount;
830 		min_vec = desc.pre_vectors + desc.post_vectors;
831 		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
832 
833 		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
834 			min_vec, max_vectors, irq_flags, &desc);
835 
836 		if (retval < 0) {
837 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
838 			    retval);
839 			goto out_failed;
840 		}
841 
842 
843 		/*
844 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
845 		 * between Admin queue and operational queue
846 		 */
847 		if (retval == min_vec)
848 			mrioc->op_reply_q_offset = 0;
849 		else if (retval != (max_vectors)) {
850 			ioc_info(mrioc,
851 			    "allocated vectors (%d) are less than configured (%d)\n",
852 			    retval, max_vectors);
853 		}
854 
855 		max_vectors = retval;
856 		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
857 
858 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
859 
860 	}
861 
862 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
863 	    GFP_KERNEL);
864 	if (!mrioc->intr_info) {
865 		retval = -ENOMEM;
866 		pci_free_irq_vectors(mrioc->pdev);
867 		goto out_failed;
868 	}
869 	for (i = 0; i < max_vectors; i++) {
870 		retval = mpi3mr_request_irq(mrioc, i);
871 		if (retval) {
872 			mrioc->intr_info_count = i;
873 			goto out_failed;
874 		}
875 	}
876 	if (reset_devices || !setup_one)
877 		mrioc->is_intr_info_set = true;
878 	mrioc->intr_info_count = max_vectors;
879 	mpi3mr_ioc_enable_intr(mrioc);
880 	return 0;
881 
882 out_failed:
883 	mpi3mr_cleanup_isr(mrioc);
884 
885 	return retval;
886 }
887 
888 static const struct {
889 	enum mpi3mr_iocstate value;
890 	char *name;
891 } mrioc_states[] = {
892 	{ MRIOC_STATE_READY, "ready" },
893 	{ MRIOC_STATE_FAULT, "fault" },
894 	{ MRIOC_STATE_RESET, "reset" },
895 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
896 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
897 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
898 };
899 
900 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
901 {
902 	int i;
903 	char *name = NULL;
904 
905 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
906 		if (mrioc_states[i].value == mrioc_state) {
907 			name = mrioc_states[i].name;
908 			break;
909 		}
910 	}
911 	return name;
912 }
913 
914 /* Reset reason to name mapper structure*/
915 static const struct {
916 	enum mpi3mr_reset_reason value;
917 	char *name;
918 } mpi3mr_reset_reason_codes[] = {
919 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
920 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
921 	{ MPI3MR_RESET_FROM_APP, "application invocation" },
922 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
923 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
924 	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
925 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
926 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
927 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
928 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
929 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
930 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
931 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
932 	{
933 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
934 		"create request queue timeout"
935 	},
936 	{
937 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
938 		"create reply queue timeout"
939 	},
940 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
941 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
942 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
943 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
944 	{
945 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
946 		"component image activation timeout"
947 	},
948 	{
949 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
950 		"get package version timeout"
951 	},
952 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
953 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
954 	{
955 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
956 		"diagnostic buffer post timeout"
957 	},
958 	{
959 		MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
960 		"diagnostic buffer release timeout"
961 	},
962 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
963 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
964 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
965 };
966 
967 /**
968  * mpi3mr_reset_rc_name - get reset reason code name
969  * @reason_code: reset reason code value
970  *
971  * Map reset reason to an NULL terminated ASCII string
972  *
973  * Return: name corresponding to reset reason value or NULL.
974  */
975 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
976 {
977 	int i;
978 	char *name = NULL;
979 
980 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
981 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
982 			name = mpi3mr_reset_reason_codes[i].name;
983 			break;
984 		}
985 	}
986 	return name;
987 }
988 
989 /* Reset type to name mapper structure*/
990 static const struct {
991 	u16 reset_type;
992 	char *name;
993 } mpi3mr_reset_types[] = {
994 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
995 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
996 };
997 
998 /**
999  * mpi3mr_reset_type_name - get reset type name
1000  * @reset_type: reset type value
1001  *
1002  * Map reset type to an NULL terminated ASCII string
1003  *
1004  * Return: name corresponding to reset type value or NULL.
1005  */
1006 static const char *mpi3mr_reset_type_name(u16 reset_type)
1007 {
1008 	int i;
1009 	char *name = NULL;
1010 
1011 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1012 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1013 			name = mpi3mr_reset_types[i].name;
1014 			break;
1015 		}
1016 	}
1017 	return name;
1018 }
1019 
1020 /**
1021  * mpi3mr_print_fault_info - Display fault information
1022  * @mrioc: Adapter instance reference
1023  *
1024  * Display the controller fault information if there is a
1025  * controller fault.
1026  *
1027  * Return: Nothing.
1028  */
1029 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1030 {
1031 	u32 ioc_status, code, code1, code2, code3;
1032 
1033 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1034 
1035 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1036 		code = readl(&mrioc->sysif_regs->fault);
1037 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1038 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1039 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1040 
1041 		ioc_info(mrioc,
1042 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1043 		    code, code1, code2, code3);
1044 	}
1045 }
1046 
1047 /**
1048  * mpi3mr_get_iocstate - Get IOC State
1049  * @mrioc: Adapter instance reference
1050  *
1051  * Return a proper IOC state enum based on the IOC status and
1052  * IOC configuration and unrcoverable state of the controller.
1053  *
1054  * Return: Current IOC state.
1055  */
1056 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1057 {
1058 	u32 ioc_status, ioc_config;
1059 	u8 ready, enabled;
1060 
1061 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1062 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1063 
1064 	if (mrioc->unrecoverable)
1065 		return MRIOC_STATE_UNRECOVERABLE;
1066 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1067 		return MRIOC_STATE_FAULT;
1068 
1069 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1070 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1071 
1072 	if (ready && enabled)
1073 		return MRIOC_STATE_READY;
1074 	if ((!ready) && (!enabled))
1075 		return MRIOC_STATE_RESET;
1076 	if ((!ready) && (enabled))
1077 		return MRIOC_STATE_BECOMING_READY;
1078 
1079 	return MRIOC_STATE_RESET_REQUESTED;
1080 }
1081 
1082 /**
1083  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1084  * @mrioc: Adapter instance reference
1085  *
1086  * Free the DMA memory allocated for IOCTL handling purpose.
1087  *
1088  * Return: None
1089  */
1090 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1091 {
1092 	struct dma_memory_desc *mem_desc;
1093 	u16 i;
1094 
1095 	if (!mrioc->ioctl_dma_pool)
1096 		return;
1097 
1098 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1099 		mem_desc = &mrioc->ioctl_sge[i];
1100 		if (mem_desc->addr) {
1101 			dma_pool_free(mrioc->ioctl_dma_pool,
1102 				      mem_desc->addr,
1103 				      mem_desc->dma_addr);
1104 			mem_desc->addr = NULL;
1105 		}
1106 	}
1107 	dma_pool_destroy(mrioc->ioctl_dma_pool);
1108 	mrioc->ioctl_dma_pool = NULL;
1109 	mem_desc = &mrioc->ioctl_chain_sge;
1110 
1111 	if (mem_desc->addr) {
1112 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1113 				  mem_desc->addr, mem_desc->dma_addr);
1114 		mem_desc->addr = NULL;
1115 	}
1116 	mem_desc = &mrioc->ioctl_resp_sge;
1117 	if (mem_desc->addr) {
1118 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1119 				  mem_desc->addr, mem_desc->dma_addr);
1120 		mem_desc->addr = NULL;
1121 	}
1122 
1123 	mrioc->ioctl_sges_allocated = false;
1124 }
1125 
1126 /**
1127  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1128  * @mrioc: Adapter instance reference
1129  *
1130  * This function allocates dmaable memory required to handle the
1131  * application issued MPI3 IOCTL requests.
1132  *
1133  * Return: None
1134  */
1135 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1136 
1137 {
1138 	struct dma_memory_desc *mem_desc;
1139 	u16 i;
1140 
1141 	mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1142 						&mrioc->pdev->dev,
1143 						MPI3MR_IOCTL_SGE_SIZE,
1144 						MPI3MR_PAGE_SIZE_4K, 0);
1145 
1146 	if (!mrioc->ioctl_dma_pool) {
1147 		ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1148 		goto out_failed;
1149 	}
1150 
1151 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1152 		mem_desc = &mrioc->ioctl_sge[i];
1153 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1154 		mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1155 						 GFP_KERNEL,
1156 						 &mem_desc->dma_addr);
1157 		if (!mem_desc->addr)
1158 			goto out_failed;
1159 	}
1160 
1161 	mem_desc = &mrioc->ioctl_chain_sge;
1162 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1163 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1164 					    mem_desc->size,
1165 					    &mem_desc->dma_addr,
1166 					    GFP_KERNEL);
1167 	if (!mem_desc->addr)
1168 		goto out_failed;
1169 
1170 	mem_desc = &mrioc->ioctl_resp_sge;
1171 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1172 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1173 					    mem_desc->size,
1174 					    &mem_desc->dma_addr,
1175 					    GFP_KERNEL);
1176 	if (!mem_desc->addr)
1177 		goto out_failed;
1178 
1179 	mrioc->ioctl_sges_allocated = true;
1180 
1181 	return;
1182 out_failed:
1183 	ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1184 		 "from the applications, application interface for MPT command is disabled\n");
1185 	mpi3mr_free_ioctl_dma_memory(mrioc);
1186 }
1187 
1188 /**
1189  * mpi3mr_clear_reset_history - clear reset history
1190  * @mrioc: Adapter instance reference
1191  *
1192  * Write the reset history bit in IOC status to clear the bit,
1193  * if it is already set.
1194  *
1195  * Return: Nothing.
1196  */
1197 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1198 {
1199 	u32 ioc_status;
1200 
1201 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1202 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1203 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1204 }
1205 
1206 /**
1207  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1208  * @mrioc: Adapter instance reference
1209  * @reset_reason: Reset reason code
1210  *
1211  * Issue Message unit Reset to the controller and wait for it to
1212  * be complete.
1213  *
1214  * Return: 0 on success, -1 on failure.
1215  */
1216 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1217 	u32 reset_reason)
1218 {
1219 	u32 ioc_config, timeout, ioc_status, scratch_pad0;
1220 	int retval = -1;
1221 
1222 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1223 	if (mrioc->unrecoverable) {
1224 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1225 		return retval;
1226 	}
1227 	mpi3mr_clear_reset_history(mrioc);
1228 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1229 			 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1230 			(mrioc->facts.ioc_num <<
1231 			 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1232 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1233 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1234 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1235 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1236 
1237 	timeout = MPI3MR_MUR_TIMEOUT * 10;
1238 	do {
1239 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1240 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1241 			mpi3mr_clear_reset_history(mrioc);
1242 			break;
1243 		}
1244 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1245 			mpi3mr_print_fault_info(mrioc);
1246 			break;
1247 		}
1248 		msleep(100);
1249 	} while (--timeout);
1250 
1251 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1252 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1253 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1254 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1255 		retval = 0;
1256 
1257 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
1258 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1259 	return retval;
1260 }
1261 
1262 /**
1263  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1264  * during reset/resume
1265  * @mrioc: Adapter instance reference
1266  *
1267  * Return: zero if the new IOCFacts parameters value is compatible with
1268  * older values else return -EPERM
1269  */
1270 static int
1271 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1272 {
1273 	unsigned long *removepend_bitmap;
1274 
1275 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1276 		ioc_err(mrioc,
1277 		    "cannot increase reply size from %d to %d\n",
1278 		    mrioc->reply_sz, mrioc->facts.reply_sz);
1279 		return -EPERM;
1280 	}
1281 
1282 	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1283 		ioc_err(mrioc,
1284 		    "cannot reduce number of operational reply queues from %d to %d\n",
1285 		    mrioc->num_op_reply_q,
1286 		    mrioc->facts.max_op_reply_q);
1287 		return -EPERM;
1288 	}
1289 
1290 	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1291 		ioc_err(mrioc,
1292 		    "cannot reduce number of operational request queues from %d to %d\n",
1293 		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1294 		return -EPERM;
1295 	}
1296 
1297 	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1298 		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1299 			    "\tchanged after reset: previous(%d), new(%d),\n"
1300 			    "the driver cannot change this at run time\n",
1301 			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1302 
1303 	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1304 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1305 		ioc_err(mrioc,
1306 		    "critical error: multipath capability is enabled at the\n"
1307 		    "\tcontroller while sas transport support is enabled at the\n"
1308 		    "\tdriver, please reboot the system or reload the driver\n");
1309 
1310 	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1311 		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1312 						  GFP_KERNEL);
1313 		if (!removepend_bitmap) {
1314 			ioc_err(mrioc,
1315 				"failed to increase removepend_bitmap bits from %d to %d\n",
1316 				mrioc->dev_handle_bitmap_bits,
1317 				mrioc->facts.max_devhandle);
1318 			return -EPERM;
1319 		}
1320 		bitmap_free(mrioc->removepend_bitmap);
1321 		mrioc->removepend_bitmap = removepend_bitmap;
1322 		ioc_info(mrioc,
1323 			 "increased bits of dev_handle_bitmap from %d to %d\n",
1324 			 mrioc->dev_handle_bitmap_bits,
1325 			 mrioc->facts.max_devhandle);
1326 		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 /**
1333  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1334  * @mrioc: Adapter instance reference
1335  *
1336  * Set Enable IOC bit in IOC configuration register and wait for
1337  * the controller to become ready.
1338  *
1339  * Return: 0 on success, appropriate error on failure.
1340  */
1341 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1342 {
1343 	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1344 	int retval = 0;
1345 	enum mpi3mr_iocstate ioc_state;
1346 	u64 base_info;
1347 
1348 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1349 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1350 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1351 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1352 	    ioc_status, ioc_config, base_info);
1353 
1354 	/*The timeout value is in 2sec unit, changing it to seconds*/
1355 	mrioc->ready_timeout =
1356 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1357 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1358 
1359 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1360 
1361 	ioc_state = mpi3mr_get_iocstate(mrioc);
1362 	ioc_info(mrioc, "controller is in %s state during detection\n",
1363 	    mpi3mr_iocstate_name(ioc_state));
1364 
1365 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1366 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1367 		timeout = mrioc->ready_timeout * 10;
1368 		do {
1369 			msleep(100);
1370 		} while (--timeout);
1371 
1372 		if (!pci_device_is_present(mrioc->pdev)) {
1373 			mrioc->unrecoverable = 1;
1374 			ioc_err(mrioc,
1375 			    "controller is not present while waiting to reset\n");
1376 			retval = -1;
1377 			goto out_device_not_present;
1378 		}
1379 
1380 		ioc_state = mpi3mr_get_iocstate(mrioc);
1381 		ioc_info(mrioc,
1382 		    "controller is in %s state after waiting to reset\n",
1383 		    mpi3mr_iocstate_name(ioc_state));
1384 	}
1385 
1386 	if (ioc_state == MRIOC_STATE_READY) {
1387 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1388 		retval = mpi3mr_issue_and_process_mur(mrioc,
1389 		    MPI3MR_RESET_FROM_BRINGUP);
1390 		ioc_state = mpi3mr_get_iocstate(mrioc);
1391 		if (retval)
1392 			ioc_err(mrioc,
1393 			    "message unit reset failed with error %d current state %s\n",
1394 			    retval, mpi3mr_iocstate_name(ioc_state));
1395 	}
1396 	if (ioc_state != MRIOC_STATE_RESET) {
1397 		if (ioc_state == MRIOC_STATE_FAULT) {
1398 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1399 			mpi3mr_print_fault_info(mrioc);
1400 			do {
1401 				host_diagnostic =
1402 					readl(&mrioc->sysif_regs->host_diagnostic);
1403 				if (!(host_diagnostic &
1404 				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1405 					break;
1406 				if (!pci_device_is_present(mrioc->pdev)) {
1407 					mrioc->unrecoverable = 1;
1408 					ioc_err(mrioc, "controller is not present at the bringup\n");
1409 					goto out_device_not_present;
1410 				}
1411 				msleep(100);
1412 			} while (--timeout);
1413 		}
1414 		mpi3mr_print_fault_info(mrioc);
1415 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1416 		retval = mpi3mr_issue_reset(mrioc,
1417 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1418 		    MPI3MR_RESET_FROM_BRINGUP);
1419 		if (retval) {
1420 			ioc_err(mrioc,
1421 			    "soft reset failed with error %d\n", retval);
1422 			goto out_failed;
1423 		}
1424 	}
1425 	ioc_state = mpi3mr_get_iocstate(mrioc);
1426 	if (ioc_state != MRIOC_STATE_RESET) {
1427 		ioc_err(mrioc,
1428 		    "cannot bring controller to reset state, current state: %s\n",
1429 		    mpi3mr_iocstate_name(ioc_state));
1430 		goto out_failed;
1431 	}
1432 	mpi3mr_clear_reset_history(mrioc);
1433 	retval = mpi3mr_setup_admin_qpair(mrioc);
1434 	if (retval) {
1435 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1436 		    retval);
1437 		goto out_failed;
1438 	}
1439 
1440 	ioc_info(mrioc, "bringing controller to ready state\n");
1441 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1442 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1443 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1444 
1445 	timeout = mrioc->ready_timeout * 10;
1446 	do {
1447 		ioc_state = mpi3mr_get_iocstate(mrioc);
1448 		if (ioc_state == MRIOC_STATE_READY) {
1449 			ioc_info(mrioc,
1450 			    "successfully transitioned to %s state\n",
1451 			    mpi3mr_iocstate_name(ioc_state));
1452 			return 0;
1453 		}
1454 		if (!pci_device_is_present(mrioc->pdev)) {
1455 			mrioc->unrecoverable = 1;
1456 			ioc_err(mrioc,
1457 			    "controller is not present at the bringup\n");
1458 			retval = -1;
1459 			goto out_device_not_present;
1460 		}
1461 		msleep(100);
1462 	} while (--timeout);
1463 
1464 out_failed:
1465 	ioc_state = mpi3mr_get_iocstate(mrioc);
1466 	ioc_err(mrioc,
1467 	    "failed to bring to ready state,  current state: %s\n",
1468 	    mpi3mr_iocstate_name(ioc_state));
1469 out_device_not_present:
1470 	return retval;
1471 }
1472 
1473 /**
1474  * mpi3mr_soft_reset_success - Check softreset is success or not
1475  * @ioc_status: IOC status register value
1476  * @ioc_config: IOC config register value
1477  *
1478  * Check whether the soft reset is successful or not based on
1479  * IOC status and IOC config register values.
1480  *
1481  * Return: True when the soft reset is success, false otherwise.
1482  */
1483 static inline bool
1484 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1485 {
1486 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1487 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1488 		return true;
1489 	return false;
1490 }
1491 
1492 /**
1493  * mpi3mr_diagfault_success - Check diag fault is success or not
1494  * @mrioc: Adapter reference
1495  * @ioc_status: IOC status register value
1496  *
1497  * Check whether the controller hit diag reset fault code.
1498  *
1499  * Return: True when there is diag fault, false otherwise.
1500  */
1501 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1502 	u32 ioc_status)
1503 {
1504 	u32 fault;
1505 
1506 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1507 		return false;
1508 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1509 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1510 		mpi3mr_print_fault_info(mrioc);
1511 		return true;
1512 	}
1513 	return false;
1514 }
1515 
1516 /**
1517  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1518  * @mrioc: Adapter reference
1519  *
1520  * Set diag save bit in IOC configuration register to enable
1521  * snapdump.
1522  *
1523  * Return: Nothing.
1524  */
1525 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1526 {
1527 	u32 ioc_config;
1528 
1529 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1530 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1531 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1532 }
1533 
1534 /**
1535  * mpi3mr_issue_reset - Issue reset to the controller
1536  * @mrioc: Adapter reference
1537  * @reset_type: Reset type
1538  * @reset_reason: Reset reason code
1539  *
1540  * Unlock the host diagnostic registers and write the specific
1541  * reset type to that, wait for reset acknowledgment from the
1542  * controller, if the reset is not successful retry for the
1543  * predefined number of times.
1544  *
1545  * Return: 0 on success, non-zero on failure.
1546  */
1547 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1548 	u16 reset_reason)
1549 {
1550 	int retval = -1;
1551 	u8 unlock_retry_count = 0;
1552 	u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1553 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1554 
1555 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1556 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1557 		return retval;
1558 	if (mrioc->unrecoverable)
1559 		return retval;
1560 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1561 		retval = 0;
1562 		return retval;
1563 	}
1564 
1565 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1566 	    mpi3mr_reset_type_name(reset_type),
1567 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1568 
1569 	mpi3mr_clear_reset_history(mrioc);
1570 	do {
1571 		ioc_info(mrioc,
1572 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1573 		    ++unlock_retry_count);
1574 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1575 			ioc_err(mrioc,
1576 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1577 			    mpi3mr_reset_type_name(reset_type),
1578 			    host_diagnostic);
1579 			mrioc->unrecoverable = 1;
1580 			return retval;
1581 		}
1582 
1583 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1584 		    &mrioc->sysif_regs->write_sequence);
1585 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1586 		    &mrioc->sysif_regs->write_sequence);
1587 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1588 		    &mrioc->sysif_regs->write_sequence);
1589 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1590 		    &mrioc->sysif_regs->write_sequence);
1591 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1592 		    &mrioc->sysif_regs->write_sequence);
1593 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1594 		    &mrioc->sysif_regs->write_sequence);
1595 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1596 		    &mrioc->sysif_regs->write_sequence);
1597 		usleep_range(1000, 1100);
1598 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1599 		ioc_info(mrioc,
1600 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1601 		    unlock_retry_count, host_diagnostic);
1602 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1603 
1604 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1605 	    MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1606 	    MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1607 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1608 	writel(host_diagnostic | reset_type,
1609 	    &mrioc->sysif_regs->host_diagnostic);
1610 	switch (reset_type) {
1611 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1612 		do {
1613 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1614 			ioc_config =
1615 			    readl(&mrioc->sysif_regs->ioc_configuration);
1616 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1617 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1618 			    ) {
1619 				mpi3mr_clear_reset_history(mrioc);
1620 				retval = 0;
1621 				break;
1622 			}
1623 			msleep(100);
1624 		} while (--timeout);
1625 		mpi3mr_print_fault_info(mrioc);
1626 		break;
1627 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1628 		do {
1629 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1630 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1631 				retval = 0;
1632 				break;
1633 			}
1634 			msleep(100);
1635 		} while (--timeout);
1636 		break;
1637 	default:
1638 		break;
1639 	}
1640 
1641 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1642 	    &mrioc->sysif_regs->write_sequence);
1643 
1644 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1645 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1646 	ioc_info(mrioc,
1647 	    "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1648 	    (!retval)?"successful":"failed", ioc_status,
1649 	    ioc_config);
1650 	if (retval)
1651 		mrioc->unrecoverable = 1;
1652 	return retval;
1653 }
1654 
1655 /**
1656  * mpi3mr_admin_request_post - Post request to admin queue
1657  * @mrioc: Adapter reference
1658  * @admin_req: MPI3 request
1659  * @admin_req_sz: Request size
1660  * @ignore_reset: Ignore reset in process
1661  *
1662  * Post the MPI3 request into admin request queue and
1663  * inform the controller, if the queue is full return
1664  * appropriate error.
1665  *
1666  * Return: 0 on success, non-zero on failure.
1667  */
1668 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1669 	u16 admin_req_sz, u8 ignore_reset)
1670 {
1671 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1672 	int retval = 0;
1673 	unsigned long flags;
1674 	u8 *areq_entry;
1675 
1676 	if (mrioc->unrecoverable) {
1677 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1678 		return -EFAULT;
1679 	}
1680 
1681 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1682 	areq_pi = mrioc->admin_req_pi;
1683 	areq_ci = mrioc->admin_req_ci;
1684 	max_entries = mrioc->num_admin_req;
1685 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1686 	    (areq_pi == (max_entries - 1)))) {
1687 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1688 		retval = -EAGAIN;
1689 		goto out;
1690 	}
1691 	if (!ignore_reset && mrioc->reset_in_progress) {
1692 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1693 		retval = -EAGAIN;
1694 		goto out;
1695 	}
1696 	if (mrioc->pci_err_recovery) {
1697 		ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1698 		retval = -EAGAIN;
1699 		goto out;
1700 	}
1701 
1702 	areq_entry = (u8 *)mrioc->admin_req_base +
1703 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1704 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1705 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1706 
1707 	if (++areq_pi == max_entries)
1708 		areq_pi = 0;
1709 	mrioc->admin_req_pi = areq_pi;
1710 
1711 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1712 
1713 out:
1714 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1715 
1716 	return retval;
1717 }
1718 
1719 /**
1720  * mpi3mr_free_op_req_q_segments - free request memory segments
1721  * @mrioc: Adapter instance reference
1722  * @q_idx: operational request queue index
1723  *
1724  * Free memory segments allocated for operational request queue
1725  *
1726  * Return: Nothing.
1727  */
1728 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1729 {
1730 	u16 j;
1731 	int size;
1732 	struct segments *segments;
1733 
1734 	segments = mrioc->req_qinfo[q_idx].q_segments;
1735 	if (!segments)
1736 		return;
1737 
1738 	if (mrioc->enable_segqueue) {
1739 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1740 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1741 			dma_free_coherent(&mrioc->pdev->dev,
1742 			    MPI3MR_MAX_SEG_LIST_SIZE,
1743 			    mrioc->req_qinfo[q_idx].q_segment_list,
1744 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1745 			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1746 		}
1747 	} else
1748 		size = mrioc->req_qinfo[q_idx].segment_qd *
1749 		    mrioc->facts.op_req_sz;
1750 
1751 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1752 		if (!segments[j].segment)
1753 			continue;
1754 		dma_free_coherent(&mrioc->pdev->dev,
1755 		    size, segments[j].segment, segments[j].segment_dma);
1756 		segments[j].segment = NULL;
1757 	}
1758 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1759 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1760 	mrioc->req_qinfo[q_idx].qid = 0;
1761 }
1762 
1763 /**
1764  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1765  * @mrioc: Adapter instance reference
1766  * @q_idx: operational reply queue index
1767  *
1768  * Free memory segments allocated for operational reply queue
1769  *
1770  * Return: Nothing.
1771  */
1772 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1773 {
1774 	u16 j;
1775 	int size;
1776 	struct segments *segments;
1777 
1778 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1779 	if (!segments)
1780 		return;
1781 
1782 	if (mrioc->enable_segqueue) {
1783 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1784 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1785 			dma_free_coherent(&mrioc->pdev->dev,
1786 			    MPI3MR_MAX_SEG_LIST_SIZE,
1787 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1788 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1789 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1790 		}
1791 	} else
1792 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1793 		    mrioc->op_reply_desc_sz;
1794 
1795 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1796 		if (!segments[j].segment)
1797 			continue;
1798 		dma_free_coherent(&mrioc->pdev->dev,
1799 		    size, segments[j].segment, segments[j].segment_dma);
1800 		segments[j].segment = NULL;
1801 	}
1802 
1803 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1804 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1805 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1806 }
1807 
1808 /**
1809  * mpi3mr_delete_op_reply_q - delete operational reply queue
1810  * @mrioc: Adapter instance reference
1811  * @qidx: operational reply queue index
1812  *
1813  * Delete operatinal reply queue by issuing MPI request
1814  * through admin queue.
1815  *
1816  * Return:  0 on success, non-zero on failure.
1817  */
1818 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1819 {
1820 	struct mpi3_delete_reply_queue_request delq_req;
1821 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1822 	int retval = 0;
1823 	u16 reply_qid = 0, midx;
1824 
1825 	reply_qid = op_reply_q->qid;
1826 
1827 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1828 
1829 	if (!reply_qid)	{
1830 		retval = -1;
1831 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1832 		goto out;
1833 	}
1834 
1835 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1836 	    mrioc->active_poll_qcount--;
1837 
1838 	memset(&delq_req, 0, sizeof(delq_req));
1839 	mutex_lock(&mrioc->init_cmds.mutex);
1840 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1841 		retval = -1;
1842 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1843 		mutex_unlock(&mrioc->init_cmds.mutex);
1844 		goto out;
1845 	}
1846 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1847 	mrioc->init_cmds.is_waiting = 1;
1848 	mrioc->init_cmds.callback = NULL;
1849 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1850 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1851 	delq_req.queue_id = cpu_to_le16(reply_qid);
1852 
1853 	init_completion(&mrioc->init_cmds.done);
1854 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1855 	    1);
1856 	if (retval) {
1857 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1858 		goto out_unlock;
1859 	}
1860 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1861 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1862 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1863 		ioc_err(mrioc, "delete reply queue timed out\n");
1864 		mpi3mr_check_rh_fault_ioc(mrioc,
1865 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1866 		retval = -1;
1867 		goto out_unlock;
1868 	}
1869 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1870 	    != MPI3_IOCSTATUS_SUCCESS) {
1871 		ioc_err(mrioc,
1872 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1873 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1874 		    mrioc->init_cmds.ioc_loginfo);
1875 		retval = -1;
1876 		goto out_unlock;
1877 	}
1878 	mrioc->intr_info[midx].op_reply_q = NULL;
1879 
1880 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1881 out_unlock:
1882 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1883 	mutex_unlock(&mrioc->init_cmds.mutex);
1884 out:
1885 
1886 	return retval;
1887 }
1888 
1889 /**
1890  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1891  * @mrioc: Adapter instance reference
1892  * @qidx: request queue index
1893  *
1894  * Allocate segmented memory pools for operational reply
1895  * queue.
1896  *
1897  * Return: 0 on success, non-zero on failure.
1898  */
1899 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1900 {
1901 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1902 	int i, size;
1903 	u64 *q_segment_list_entry = NULL;
1904 	struct segments *segments;
1905 
1906 	if (mrioc->enable_segqueue) {
1907 		op_reply_q->segment_qd =
1908 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1909 
1910 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1911 
1912 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1913 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1914 		    GFP_KERNEL);
1915 		if (!op_reply_q->q_segment_list)
1916 			return -ENOMEM;
1917 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1918 	} else {
1919 		op_reply_q->segment_qd = op_reply_q->num_replies;
1920 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1921 	}
1922 
1923 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1924 	    op_reply_q->segment_qd);
1925 
1926 	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1927 	    sizeof(struct segments), GFP_KERNEL);
1928 	if (!op_reply_q->q_segments)
1929 		return -ENOMEM;
1930 
1931 	segments = op_reply_q->q_segments;
1932 	for (i = 0; i < op_reply_q->num_segments; i++) {
1933 		segments[i].segment =
1934 		    dma_alloc_coherent(&mrioc->pdev->dev,
1935 		    size, &segments[i].segment_dma, GFP_KERNEL);
1936 		if (!segments[i].segment)
1937 			return -ENOMEM;
1938 		if (mrioc->enable_segqueue)
1939 			q_segment_list_entry[i] =
1940 			    (unsigned long)segments[i].segment_dma;
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 /**
1947  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1948  * @mrioc: Adapter instance reference
1949  * @qidx: request queue index
1950  *
1951  * Allocate segmented memory pools for operational request
1952  * queue.
1953  *
1954  * Return: 0 on success, non-zero on failure.
1955  */
1956 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1957 {
1958 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1959 	int i, size;
1960 	u64 *q_segment_list_entry = NULL;
1961 	struct segments *segments;
1962 
1963 	if (mrioc->enable_segqueue) {
1964 		op_req_q->segment_qd =
1965 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1966 
1967 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1968 
1969 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1970 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1971 		    GFP_KERNEL);
1972 		if (!op_req_q->q_segment_list)
1973 			return -ENOMEM;
1974 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1975 
1976 	} else {
1977 		op_req_q->segment_qd = op_req_q->num_requests;
1978 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1979 	}
1980 
1981 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1982 	    op_req_q->segment_qd);
1983 
1984 	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1985 	    sizeof(struct segments), GFP_KERNEL);
1986 	if (!op_req_q->q_segments)
1987 		return -ENOMEM;
1988 
1989 	segments = op_req_q->q_segments;
1990 	for (i = 0; i < op_req_q->num_segments; i++) {
1991 		segments[i].segment =
1992 		    dma_alloc_coherent(&mrioc->pdev->dev,
1993 		    size, &segments[i].segment_dma, GFP_KERNEL);
1994 		if (!segments[i].segment)
1995 			return -ENOMEM;
1996 		if (mrioc->enable_segqueue)
1997 			q_segment_list_entry[i] =
1998 			    (unsigned long)segments[i].segment_dma;
1999 	}
2000 
2001 	return 0;
2002 }
2003 
2004 /**
2005  * mpi3mr_create_op_reply_q - create operational reply queue
2006  * @mrioc: Adapter instance reference
2007  * @qidx: operational reply queue index
2008  *
2009  * Create operatinal reply queue by issuing MPI request
2010  * through admin queue.
2011  *
2012  * Return:  0 on success, non-zero on failure.
2013  */
2014 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2015 {
2016 	struct mpi3_create_reply_queue_request create_req;
2017 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2018 	int retval = 0;
2019 	u16 reply_qid = 0, midx;
2020 
2021 	reply_qid = op_reply_q->qid;
2022 
2023 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2024 
2025 	if (reply_qid) {
2026 		retval = -1;
2027 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2028 		    reply_qid);
2029 
2030 		return retval;
2031 	}
2032 
2033 	reply_qid = qidx + 1;
2034 	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2035 	if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
2036 		!mrioc->pdev->revision)
2037 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2038 	op_reply_q->ci = 0;
2039 	op_reply_q->ephase = 1;
2040 	atomic_set(&op_reply_q->pend_ios, 0);
2041 	atomic_set(&op_reply_q->in_use, 0);
2042 	op_reply_q->enable_irq_poll = false;
2043 
2044 	if (!op_reply_q->q_segments) {
2045 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2046 		if (retval) {
2047 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2048 			goto out;
2049 		}
2050 	}
2051 
2052 	memset(&create_req, 0, sizeof(create_req));
2053 	mutex_lock(&mrioc->init_cmds.mutex);
2054 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2055 		retval = -1;
2056 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2057 		goto out_unlock;
2058 	}
2059 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2060 	mrioc->init_cmds.is_waiting = 1;
2061 	mrioc->init_cmds.callback = NULL;
2062 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2063 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2064 	create_req.queue_id = cpu_to_le16(reply_qid);
2065 
2066 	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2067 		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2068 	else
2069 		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2070 
2071 	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2072 		create_req.flags =
2073 			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2074 		create_req.msix_index =
2075 			cpu_to_le16(mrioc->intr_info[midx].msix_index);
2076 	} else {
2077 		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2078 		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2079 			reply_qid, midx);
2080 		if (!mrioc->active_poll_qcount)
2081 			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2082 			    mrioc->intr_info_count - 1));
2083 	}
2084 
2085 	if (mrioc->enable_segqueue) {
2086 		create_req.flags |=
2087 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2088 		create_req.base_address = cpu_to_le64(
2089 		    op_reply_q->q_segment_list_dma);
2090 	} else
2091 		create_req.base_address = cpu_to_le64(
2092 		    op_reply_q->q_segments[0].segment_dma);
2093 
2094 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
2095 
2096 	init_completion(&mrioc->init_cmds.done);
2097 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2098 	    sizeof(create_req), 1);
2099 	if (retval) {
2100 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2101 		goto out_unlock;
2102 	}
2103 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2104 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2105 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2106 		ioc_err(mrioc, "create reply queue timed out\n");
2107 		mpi3mr_check_rh_fault_ioc(mrioc,
2108 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2109 		retval = -1;
2110 		goto out_unlock;
2111 	}
2112 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2113 	    != MPI3_IOCSTATUS_SUCCESS) {
2114 		ioc_err(mrioc,
2115 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2116 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2117 		    mrioc->init_cmds.ioc_loginfo);
2118 		retval = -1;
2119 		goto out_unlock;
2120 	}
2121 	op_reply_q->qid = reply_qid;
2122 	if (midx < mrioc->intr_info_count)
2123 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
2124 
2125 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2126 	    mrioc->active_poll_qcount++;
2127 
2128 out_unlock:
2129 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2130 	mutex_unlock(&mrioc->init_cmds.mutex);
2131 out:
2132 
2133 	return retval;
2134 }
2135 
2136 /**
2137  * mpi3mr_create_op_req_q - create operational request queue
2138  * @mrioc: Adapter instance reference
2139  * @idx: operational request queue index
2140  * @reply_qid: Reply queue ID
2141  *
2142  * Create operatinal request queue by issuing MPI request
2143  * through admin queue.
2144  *
2145  * Return:  0 on success, non-zero on failure.
2146  */
2147 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2148 	u16 reply_qid)
2149 {
2150 	struct mpi3_create_request_queue_request create_req;
2151 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2152 	int retval = 0;
2153 	u16 req_qid = 0;
2154 
2155 	req_qid = op_req_q->qid;
2156 
2157 	if (req_qid) {
2158 		retval = -1;
2159 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2160 		    req_qid);
2161 
2162 		return retval;
2163 	}
2164 	req_qid = idx + 1;
2165 
2166 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2167 	op_req_q->ci = 0;
2168 	op_req_q->pi = 0;
2169 	op_req_q->reply_qid = reply_qid;
2170 	spin_lock_init(&op_req_q->q_lock);
2171 
2172 	if (!op_req_q->q_segments) {
2173 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2174 		if (retval) {
2175 			mpi3mr_free_op_req_q_segments(mrioc, idx);
2176 			goto out;
2177 		}
2178 	}
2179 
2180 	memset(&create_req, 0, sizeof(create_req));
2181 	mutex_lock(&mrioc->init_cmds.mutex);
2182 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2183 		retval = -1;
2184 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2185 		goto out_unlock;
2186 	}
2187 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2188 	mrioc->init_cmds.is_waiting = 1;
2189 	mrioc->init_cmds.callback = NULL;
2190 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2191 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2192 	create_req.queue_id = cpu_to_le16(req_qid);
2193 	if (mrioc->enable_segqueue) {
2194 		create_req.flags =
2195 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2196 		create_req.base_address = cpu_to_le64(
2197 		    op_req_q->q_segment_list_dma);
2198 	} else
2199 		create_req.base_address = cpu_to_le64(
2200 		    op_req_q->q_segments[0].segment_dma);
2201 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2202 	create_req.size = cpu_to_le16(op_req_q->num_requests);
2203 
2204 	init_completion(&mrioc->init_cmds.done);
2205 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2206 	    sizeof(create_req), 1);
2207 	if (retval) {
2208 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2209 		goto out_unlock;
2210 	}
2211 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2212 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2213 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2214 		ioc_err(mrioc, "create request queue timed out\n");
2215 		mpi3mr_check_rh_fault_ioc(mrioc,
2216 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2217 		retval = -1;
2218 		goto out_unlock;
2219 	}
2220 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2221 	    != MPI3_IOCSTATUS_SUCCESS) {
2222 		ioc_err(mrioc,
2223 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2224 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2225 		    mrioc->init_cmds.ioc_loginfo);
2226 		retval = -1;
2227 		goto out_unlock;
2228 	}
2229 	op_req_q->qid = req_qid;
2230 
2231 out_unlock:
2232 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2233 	mutex_unlock(&mrioc->init_cmds.mutex);
2234 out:
2235 
2236 	return retval;
2237 }
2238 
2239 /**
2240  * mpi3mr_create_op_queues - create operational queue pairs
2241  * @mrioc: Adapter instance reference
2242  *
2243  * Allocate memory for operational queue meta data and call
2244  * create request and reply queue functions.
2245  *
2246  * Return: 0 on success, non-zero on failures.
2247  */
2248 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2249 {
2250 	int retval = 0;
2251 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2252 
2253 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2254 	    mrioc->facts.max_op_req_q);
2255 
2256 	msix_count_op_q =
2257 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2258 	if (!mrioc->num_queues)
2259 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2260 	/*
2261 	 * During reset set the num_queues to the number of queues
2262 	 * that was set before the reset.
2263 	 */
2264 	num_queues = mrioc->num_op_reply_q ?
2265 	    mrioc->num_op_reply_q : mrioc->num_queues;
2266 	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2267 	    num_queues);
2268 
2269 	if (!mrioc->req_qinfo) {
2270 		mrioc->req_qinfo = kcalloc(num_queues,
2271 		    sizeof(struct op_req_qinfo), GFP_KERNEL);
2272 		if (!mrioc->req_qinfo) {
2273 			retval = -1;
2274 			goto out_failed;
2275 		}
2276 
2277 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2278 		    num_queues, GFP_KERNEL);
2279 		if (!mrioc->op_reply_qinfo) {
2280 			retval = -1;
2281 			goto out_failed;
2282 		}
2283 	}
2284 
2285 	if (mrioc->enable_segqueue)
2286 		ioc_info(mrioc,
2287 		    "allocating operational queues through segmented queues\n");
2288 
2289 	for (i = 0; i < num_queues; i++) {
2290 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2291 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2292 			break;
2293 		}
2294 		if (mpi3mr_create_op_req_q(mrioc, i,
2295 		    mrioc->op_reply_qinfo[i].qid)) {
2296 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2297 			mpi3mr_delete_op_reply_q(mrioc, i);
2298 			break;
2299 		}
2300 	}
2301 
2302 	if (i == 0) {
2303 		/* Not even one queue is created successfully*/
2304 		retval = -1;
2305 		goto out_failed;
2306 	}
2307 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2308 	ioc_info(mrioc,
2309 	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2310 	    mrioc->num_op_reply_q, mrioc->default_qcount,
2311 	    mrioc->active_poll_qcount);
2312 
2313 	return retval;
2314 out_failed:
2315 	kfree(mrioc->req_qinfo);
2316 	mrioc->req_qinfo = NULL;
2317 
2318 	kfree(mrioc->op_reply_qinfo);
2319 	mrioc->op_reply_qinfo = NULL;
2320 
2321 	return retval;
2322 }
2323 
2324 /**
2325  * mpi3mr_op_request_post - Post request to operational queue
2326  * @mrioc: Adapter reference
2327  * @op_req_q: Operational request queue info
2328  * @req: MPI3 request
2329  *
2330  * Post the MPI3 request into operational request queue and
2331  * inform the controller, if the queue is full return
2332  * appropriate error.
2333  *
2334  * Return: 0 on success, non-zero on failure.
2335  */
2336 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2337 	struct op_req_qinfo *op_req_q, u8 *req)
2338 {
2339 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2340 	int retval = 0;
2341 	unsigned long flags;
2342 	u8 *req_entry;
2343 	void *segment_base_addr;
2344 	u16 req_sz = mrioc->facts.op_req_sz;
2345 	struct segments *segments = op_req_q->q_segments;
2346 
2347 	reply_qidx = op_req_q->reply_qid - 1;
2348 
2349 	if (mrioc->unrecoverable)
2350 		return -EFAULT;
2351 
2352 	spin_lock_irqsave(&op_req_q->q_lock, flags);
2353 	pi = op_req_q->pi;
2354 	max_entries = op_req_q->num_requests;
2355 
2356 	if (mpi3mr_check_req_qfull(op_req_q)) {
2357 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2358 		    reply_qidx, mrioc->op_reply_q_offset);
2359 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2360 
2361 		if (mpi3mr_check_req_qfull(op_req_q)) {
2362 			retval = -EAGAIN;
2363 			goto out;
2364 		}
2365 	}
2366 
2367 	if (mrioc->reset_in_progress) {
2368 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2369 		retval = -EAGAIN;
2370 		goto out;
2371 	}
2372 	if (mrioc->pci_err_recovery) {
2373 		ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2374 		retval = -EAGAIN;
2375 		goto out;
2376 	}
2377 
2378 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2379 	req_entry = (u8 *)segment_base_addr +
2380 	    ((pi % op_req_q->segment_qd) * req_sz);
2381 
2382 	memset(req_entry, 0, req_sz);
2383 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2384 
2385 	if (++pi == max_entries)
2386 		pi = 0;
2387 	op_req_q->pi = pi;
2388 
2389 #ifndef CONFIG_PREEMPT_RT
2390 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2391 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2392 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2393 #else
2394 	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2395 #endif
2396 
2397 	writel(op_req_q->pi,
2398 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2399 
2400 out:
2401 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2402 	return retval;
2403 }
2404 
2405 /**
2406  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2407  * controller
2408  * @mrioc: Adapter instance reference
2409  * @reason_code: reason code for the fault.
2410  *
2411  * This routine will save snapdump and fault the controller with
2412  * the given reason code if it is not already in the fault or
2413  * not asynchronosuly reset. This will be used to handle
2414  * initilaization time faults/resets/timeout as in those cases
2415  * immediate soft reset invocation is not required.
2416  *
2417  * Return:  None.
2418  */
2419 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2420 {
2421 	u32 ioc_status, host_diagnostic, timeout;
2422 	union mpi3mr_trigger_data trigger_data;
2423 
2424 	if (mrioc->unrecoverable) {
2425 		ioc_err(mrioc, "controller is unrecoverable\n");
2426 		return;
2427 	}
2428 
2429 	if (!pci_device_is_present(mrioc->pdev)) {
2430 		mrioc->unrecoverable = 1;
2431 		ioc_err(mrioc, "controller is not present\n");
2432 		return;
2433 	}
2434 	memset(&trigger_data, 0, sizeof(trigger_data));
2435 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2436 
2437 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2438 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2439 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2440 		return;
2441 	} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2442 		trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2443 		      MPI3_SYSIF_FAULT_CODE_MASK);
2444 
2445 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2446 		    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2447 		mpi3mr_print_fault_info(mrioc);
2448 		return;
2449 	}
2450 
2451 	mpi3mr_set_diagsave(mrioc);
2452 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2453 	    reason_code);
2454 	trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2455 		      MPI3_SYSIF_FAULT_CODE_MASK);
2456 	mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2457 	    &trigger_data, 0);
2458 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2459 	do {
2460 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2461 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2462 			break;
2463 		msleep(100);
2464 	} while (--timeout);
2465 }
2466 
2467 /**
2468  * mpi3mr_sync_timestamp - Issue time stamp sync request
2469  * @mrioc: Adapter reference
2470  *
2471  * Issue IO unit control MPI request to synchornize firmware
2472  * timestamp with host time.
2473  *
2474  * Return: 0 on success, non-zero on failure.
2475  */
2476 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2477 {
2478 	ktime_t current_time;
2479 	struct mpi3_iounit_control_request iou_ctrl;
2480 	int retval = 0;
2481 
2482 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2483 	mutex_lock(&mrioc->init_cmds.mutex);
2484 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2485 		retval = -1;
2486 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2487 		mutex_unlock(&mrioc->init_cmds.mutex);
2488 		goto out;
2489 	}
2490 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2491 	mrioc->init_cmds.is_waiting = 1;
2492 	mrioc->init_cmds.callback = NULL;
2493 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2494 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2495 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2496 	current_time = ktime_get_real();
2497 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2498 
2499 	init_completion(&mrioc->init_cmds.done);
2500 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2501 	    sizeof(iou_ctrl), 0);
2502 	if (retval) {
2503 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2504 		goto out_unlock;
2505 	}
2506 
2507 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2508 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2509 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2510 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2511 		mrioc->init_cmds.is_waiting = 0;
2512 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2513 			mpi3mr_check_rh_fault_ioc(mrioc,
2514 			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2515 		retval = -1;
2516 		goto out_unlock;
2517 	}
2518 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2519 	    != MPI3_IOCSTATUS_SUCCESS) {
2520 		ioc_err(mrioc,
2521 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2522 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2523 		    mrioc->init_cmds.ioc_loginfo);
2524 		retval = -1;
2525 		goto out_unlock;
2526 	}
2527 
2528 out_unlock:
2529 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2530 	mutex_unlock(&mrioc->init_cmds.mutex);
2531 
2532 out:
2533 	return retval;
2534 }
2535 
2536 /**
2537  * mpi3mr_print_pkg_ver - display controller fw package version
2538  * @mrioc: Adapter reference
2539  *
2540  * Retrieve firmware package version from the component image
2541  * header of the controller flash and display it.
2542  *
2543  * Return: 0 on success and non-zero on failure.
2544  */
2545 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2546 {
2547 	struct mpi3_ci_upload_request ci_upload;
2548 	int retval = -1;
2549 	void *data = NULL;
2550 	dma_addr_t data_dma;
2551 	struct mpi3_ci_manifest_mpi *manifest;
2552 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2553 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2554 
2555 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2556 	    GFP_KERNEL);
2557 	if (!data)
2558 		return -ENOMEM;
2559 
2560 	memset(&ci_upload, 0, sizeof(ci_upload));
2561 	mutex_lock(&mrioc->init_cmds.mutex);
2562 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2563 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2564 		mutex_unlock(&mrioc->init_cmds.mutex);
2565 		goto out;
2566 	}
2567 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2568 	mrioc->init_cmds.is_waiting = 1;
2569 	mrioc->init_cmds.callback = NULL;
2570 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2571 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2572 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2573 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2574 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2575 	ci_upload.segment_size = cpu_to_le32(data_len);
2576 
2577 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2578 	    data_dma);
2579 	init_completion(&mrioc->init_cmds.done);
2580 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2581 	    sizeof(ci_upload), 1);
2582 	if (retval) {
2583 		ioc_err(mrioc, "posting get package version failed\n");
2584 		goto out_unlock;
2585 	}
2586 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2587 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2588 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2589 		ioc_err(mrioc, "get package version timed out\n");
2590 		mpi3mr_check_rh_fault_ioc(mrioc,
2591 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2592 		retval = -1;
2593 		goto out_unlock;
2594 	}
2595 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2596 	    == MPI3_IOCSTATUS_SUCCESS) {
2597 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2598 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2599 			ioc_info(mrioc,
2600 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2601 			    manifest->package_version.gen_major,
2602 			    manifest->package_version.gen_minor,
2603 			    manifest->package_version.phase_major,
2604 			    manifest->package_version.phase_minor,
2605 			    manifest->package_version.customer_id,
2606 			    manifest->package_version.build_num);
2607 		}
2608 	}
2609 	retval = 0;
2610 out_unlock:
2611 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2612 	mutex_unlock(&mrioc->init_cmds.mutex);
2613 
2614 out:
2615 	if (data)
2616 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2617 		    data_dma);
2618 	return retval;
2619 }
2620 
2621 /**
2622  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2623  * @work: work struct
2624  *
2625  * Watch dog work periodically executed (1 second interval) to
2626  * monitor firmware fault and to issue periodic timer sync to
2627  * the firmware.
2628  *
2629  * Return: Nothing.
2630  */
2631 static void mpi3mr_watchdog_work(struct work_struct *work)
2632 {
2633 	struct mpi3mr_ioc *mrioc =
2634 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2635 	unsigned long flags;
2636 	enum mpi3mr_iocstate ioc_state;
2637 	u32 host_diagnostic, ioc_status;
2638 	union mpi3mr_trigger_data trigger_data;
2639 	u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2640 
2641 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2642 		return;
2643 
2644 	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2645 		ioc_err(mrioc, "watchdog could not detect the controller\n");
2646 		mrioc->unrecoverable = 1;
2647 	}
2648 
2649 	if (mrioc->unrecoverable) {
2650 		ioc_err(mrioc,
2651 		    "flush pending commands for unrecoverable controller\n");
2652 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2653 		return;
2654 	}
2655 
2656 	if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2657 		mrioc->ts_update_counter = 0;
2658 		mpi3mr_sync_timestamp(mrioc);
2659 	}
2660 
2661 	if ((mrioc->prepare_for_reset) &&
2662 	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2663 	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2664 		mpi3mr_soft_reset_handler(mrioc,
2665 		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2666 		return;
2667 	}
2668 
2669 	memset(&trigger_data, 0, sizeof(trigger_data));
2670 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2671 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2672 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2673 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2674 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2675 		return;
2676 	}
2677 
2678 	/*Check for fault state every one second and issue Soft reset*/
2679 	ioc_state = mpi3mr_get_iocstate(mrioc);
2680 	if (ioc_state != MRIOC_STATE_FAULT)
2681 		goto schedule_work;
2682 
2683 	trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2684 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2685 	    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2686 	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2687 	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2688 		if (!mrioc->diagsave_timeout) {
2689 			mpi3mr_print_fault_info(mrioc);
2690 			ioc_warn(mrioc, "diag save in progress\n");
2691 		}
2692 		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2693 			goto schedule_work;
2694 	}
2695 
2696 	mpi3mr_print_fault_info(mrioc);
2697 	mrioc->diagsave_timeout = 0;
2698 
2699 	switch (trigger_data.fault) {
2700 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2701 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2702 		ioc_warn(mrioc,
2703 		    "controller requires system power cycle, marking controller as unrecoverable\n");
2704 		mrioc->unrecoverable = 1;
2705 		goto schedule_work;
2706 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2707 		goto schedule_work;
2708 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2709 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2710 		break;
2711 	default:
2712 		break;
2713 	}
2714 	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2715 	return;
2716 
2717 schedule_work:
2718 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2719 	if (mrioc->watchdog_work_q)
2720 		queue_delayed_work(mrioc->watchdog_work_q,
2721 		    &mrioc->watchdog_work,
2722 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2723 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2724 	return;
2725 }
2726 
2727 /**
2728  * mpi3mr_start_watchdog - Start watchdog
2729  * @mrioc: Adapter instance reference
2730  *
2731  * Create and start the watchdog thread to monitor controller
2732  * faults.
2733  *
2734  * Return: Nothing.
2735  */
2736 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2737 {
2738 	if (mrioc->watchdog_work_q)
2739 		return;
2740 
2741 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2742 	snprintf(mrioc->watchdog_work_q_name,
2743 	    sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2744 	    mrioc->id);
2745 	mrioc->watchdog_work_q =
2746 	    create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2747 	if (!mrioc->watchdog_work_q) {
2748 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2749 		return;
2750 	}
2751 
2752 	if (mrioc->watchdog_work_q)
2753 		queue_delayed_work(mrioc->watchdog_work_q,
2754 		    &mrioc->watchdog_work,
2755 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2756 }
2757 
2758 /**
2759  * mpi3mr_stop_watchdog - Stop watchdog
2760  * @mrioc: Adapter instance reference
2761  *
2762  * Stop the watchdog thread created to monitor controller
2763  * faults.
2764  *
2765  * Return: Nothing.
2766  */
2767 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2768 {
2769 	unsigned long flags;
2770 	struct workqueue_struct *wq;
2771 
2772 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2773 	wq = mrioc->watchdog_work_q;
2774 	mrioc->watchdog_work_q = NULL;
2775 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2776 	if (wq) {
2777 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2778 			flush_workqueue(wq);
2779 		destroy_workqueue(wq);
2780 	}
2781 }
2782 
2783 /**
2784  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2785  * @mrioc: Adapter instance reference
2786  *
2787  * Allocate memory for admin queue pair if required and register
2788  * the admin queue with the controller.
2789  *
2790  * Return: 0 on success, non-zero on failures.
2791  */
2792 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2793 {
2794 	int retval = 0;
2795 	u32 num_admin_entries = 0;
2796 
2797 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2798 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
2799 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
2800 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2801 
2802 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2803 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2804 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
2805 	mrioc->admin_reply_ci = 0;
2806 	mrioc->admin_reply_ephase = 1;
2807 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
2808 
2809 	if (!mrioc->admin_req_base) {
2810 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2811 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2812 
2813 		if (!mrioc->admin_req_base) {
2814 			retval = -1;
2815 			goto out_failed;
2816 		}
2817 
2818 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2819 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2820 		    GFP_KERNEL);
2821 
2822 		if (!mrioc->admin_reply_base) {
2823 			retval = -1;
2824 			goto out_failed;
2825 		}
2826 	}
2827 
2828 	num_admin_entries = (mrioc->num_admin_replies << 16) |
2829 	    (mrioc->num_admin_req);
2830 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2831 	mpi3mr_writeq(mrioc->admin_req_dma,
2832 	    &mrioc->sysif_regs->admin_request_queue_address);
2833 	mpi3mr_writeq(mrioc->admin_reply_dma,
2834 	    &mrioc->sysif_regs->admin_reply_queue_address);
2835 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2836 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2837 	return retval;
2838 
2839 out_failed:
2840 
2841 	if (mrioc->admin_reply_base) {
2842 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2843 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
2844 		mrioc->admin_reply_base = NULL;
2845 	}
2846 	if (mrioc->admin_req_base) {
2847 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2848 		    mrioc->admin_req_base, mrioc->admin_req_dma);
2849 		mrioc->admin_req_base = NULL;
2850 	}
2851 	return retval;
2852 }
2853 
2854 /**
2855  * mpi3mr_issue_iocfacts - Send IOC Facts
2856  * @mrioc: Adapter instance reference
2857  * @facts_data: Cached IOC facts data
2858  *
2859  * Issue IOC Facts MPI request through admin queue and wait for
2860  * the completion of it or time out.
2861  *
2862  * Return: 0 on success, non-zero on failures.
2863  */
2864 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2865 	struct mpi3_ioc_facts_data *facts_data)
2866 {
2867 	struct mpi3_ioc_facts_request iocfacts_req;
2868 	void *data = NULL;
2869 	dma_addr_t data_dma;
2870 	u32 data_len = sizeof(*facts_data);
2871 	int retval = 0;
2872 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2873 
2874 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2875 	    GFP_KERNEL);
2876 
2877 	if (!data) {
2878 		retval = -1;
2879 		goto out;
2880 	}
2881 
2882 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2883 	mutex_lock(&mrioc->init_cmds.mutex);
2884 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2885 		retval = -1;
2886 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2887 		mutex_unlock(&mrioc->init_cmds.mutex);
2888 		goto out;
2889 	}
2890 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2891 	mrioc->init_cmds.is_waiting = 1;
2892 	mrioc->init_cmds.callback = NULL;
2893 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2894 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2895 
2896 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2897 	    data_dma);
2898 
2899 	init_completion(&mrioc->init_cmds.done);
2900 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2901 	    sizeof(iocfacts_req), 1);
2902 	if (retval) {
2903 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2904 		goto out_unlock;
2905 	}
2906 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2907 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2908 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2909 		ioc_err(mrioc, "ioc_facts timed out\n");
2910 		mpi3mr_check_rh_fault_ioc(mrioc,
2911 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2912 		retval = -1;
2913 		goto out_unlock;
2914 	}
2915 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2916 	    != MPI3_IOCSTATUS_SUCCESS) {
2917 		ioc_err(mrioc,
2918 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2919 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2920 		    mrioc->init_cmds.ioc_loginfo);
2921 		retval = -1;
2922 		goto out_unlock;
2923 	}
2924 	memcpy(facts_data, (u8 *)data, data_len);
2925 	mpi3mr_process_factsdata(mrioc, facts_data);
2926 out_unlock:
2927 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2928 	mutex_unlock(&mrioc->init_cmds.mutex);
2929 
2930 out:
2931 	if (data)
2932 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2933 
2934 	return retval;
2935 }
2936 
2937 /**
2938  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2939  * @mrioc: Adapter instance reference
2940  *
2941  * Check whether the new DMA mask requested through IOCFacts by
2942  * firmware needs to be set, if so set it .
2943  *
2944  * Return: 0 on success, non-zero on failure.
2945  */
2946 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2947 {
2948 	struct pci_dev *pdev = mrioc->pdev;
2949 	int r;
2950 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2951 
2952 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2953 		return 0;
2954 
2955 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2956 	    mrioc->dma_mask, facts_dma_mask);
2957 
2958 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2959 	if (r) {
2960 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2961 		    facts_dma_mask, r);
2962 		return r;
2963 	}
2964 	mrioc->dma_mask = facts_dma_mask;
2965 	return r;
2966 }
2967 
2968 /**
2969  * mpi3mr_process_factsdata - Process IOC facts data
2970  * @mrioc: Adapter instance reference
2971  * @facts_data: Cached IOC facts data
2972  *
2973  * Convert IOC facts data into cpu endianness and cache it in
2974  * the driver .
2975  *
2976  * Return: Nothing.
2977  */
2978 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2979 	struct mpi3_ioc_facts_data *facts_data)
2980 {
2981 	u32 ioc_config, req_sz, facts_flags;
2982 
2983 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2984 	    (sizeof(*facts_data) / 4)) {
2985 		ioc_warn(mrioc,
2986 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2987 		    sizeof(*facts_data),
2988 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2989 	}
2990 
2991 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2992 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2993 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2994 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2995 		ioc_err(mrioc,
2996 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2997 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2998 	}
2999 
3000 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3001 
3002 	facts_flags = le32_to_cpu(facts_data->flags);
3003 	mrioc->facts.op_req_sz = req_sz;
3004 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3005 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3006 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3007 
3008 	mrioc->facts.ioc_num = facts_data->ioc_number;
3009 	mrioc->facts.who_init = facts_data->who_init;
3010 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3011 	mrioc->facts.personality = (facts_flags &
3012 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3013 	mrioc->facts.dma_mask = (facts_flags &
3014 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3015 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3016 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
3017 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3018 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3019 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3020 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3021 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3022 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3023 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3024 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3025 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3026 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3027 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3028 	mrioc->facts.max_pcie_switches =
3029 	    le16_to_cpu(facts_data->max_pcie_switches);
3030 	mrioc->facts.max_sasexpanders =
3031 	    le16_to_cpu(facts_data->max_sas_expanders);
3032 	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3033 	mrioc->facts.max_sasinitiators =
3034 	    le16_to_cpu(facts_data->max_sas_initiators);
3035 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3036 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3037 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3038 	mrioc->facts.max_op_req_q =
3039 	    le16_to_cpu(facts_data->max_operational_request_queues);
3040 	mrioc->facts.max_op_reply_q =
3041 	    le16_to_cpu(facts_data->max_operational_reply_queues);
3042 	mrioc->facts.ioc_capabilities =
3043 	    le32_to_cpu(facts_data->ioc_capabilities);
3044 	mrioc->facts.fw_ver.build_num =
3045 	    le16_to_cpu(facts_data->fw_version.build_num);
3046 	mrioc->facts.fw_ver.cust_id =
3047 	    le16_to_cpu(facts_data->fw_version.customer_id);
3048 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3049 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3050 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3051 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3052 	mrioc->msix_count = min_t(int, mrioc->msix_count,
3053 	    mrioc->facts.max_msix_vectors);
3054 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3055 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3056 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3057 	mrioc->facts.shutdown_timeout =
3058 	    le16_to_cpu(facts_data->shutdown_timeout);
3059 	mrioc->facts.diag_trace_sz =
3060 	    le32_to_cpu(facts_data->diag_trace_size);
3061 	mrioc->facts.diag_fw_sz =
3062 	    le32_to_cpu(facts_data->diag_fw_size);
3063 	mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3064 	mrioc->facts.max_dev_per_tg =
3065 	    facts_data->max_devices_per_throttle_group;
3066 	mrioc->facts.io_throttle_data_length =
3067 	    le16_to_cpu(facts_data->io_throttle_data_length);
3068 	mrioc->facts.max_io_throttle_group =
3069 	    le16_to_cpu(facts_data->max_io_throttle_group);
3070 	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3071 	mrioc->facts.io_throttle_high =
3072 	    le16_to_cpu(facts_data->io_throttle_high);
3073 
3074 	if (mrioc->facts.max_data_length ==
3075 	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3076 		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3077 	else
3078 		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3079 	/* Store in 512b block count */
3080 	if (mrioc->facts.io_throttle_data_length)
3081 		mrioc->io_throttle_data_length =
3082 		    (mrioc->facts.io_throttle_data_length * 2 * 4);
3083 	else
3084 		/* set the length to 1MB + 1K to disable throttle */
3085 		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3086 
3087 	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3088 	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3089 
3090 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3091 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3092 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3093 	ioc_info(mrioc,
3094 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3095 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3096 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3097 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3098 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3099 	    mrioc->facts.sge_mod_shift);
3100 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3101 	    mrioc->facts.dma_mask, (facts_flags &
3102 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3103 	ioc_info(mrioc,
3104 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3105 	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3106 	ioc_info(mrioc,
3107 	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3108 	   mrioc->facts.io_throttle_data_length * 4,
3109 	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3110 }
3111 
3112 /**
3113  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3114  * @mrioc: Adapter instance reference
3115  *
3116  * Allocate and initialize the reply free buffers, sense
3117  * buffers, reply free queue and sense buffer queue.
3118  *
3119  * Return: 0 on success, non-zero on failures.
3120  */
3121 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3122 {
3123 	int retval = 0;
3124 	u32 sz, i;
3125 
3126 	if (mrioc->init_cmds.reply)
3127 		return retval;
3128 
3129 	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3130 	if (!mrioc->init_cmds.reply)
3131 		goto out_failed;
3132 
3133 	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3134 	if (!mrioc->bsg_cmds.reply)
3135 		goto out_failed;
3136 
3137 	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3138 	if (!mrioc->transport_cmds.reply)
3139 		goto out_failed;
3140 
3141 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3142 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3143 		    GFP_KERNEL);
3144 		if (!mrioc->dev_rmhs_cmds[i].reply)
3145 			goto out_failed;
3146 	}
3147 
3148 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3149 		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3150 		    GFP_KERNEL);
3151 		if (!mrioc->evtack_cmds[i].reply)
3152 			goto out_failed;
3153 	}
3154 
3155 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3156 	if (!mrioc->host_tm_cmds.reply)
3157 		goto out_failed;
3158 
3159 	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3160 	if (!mrioc->pel_cmds.reply)
3161 		goto out_failed;
3162 
3163 	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3164 	if (!mrioc->pel_abort_cmd.reply)
3165 		goto out_failed;
3166 
3167 	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3168 	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3169 						 GFP_KERNEL);
3170 	if (!mrioc->removepend_bitmap)
3171 		goto out_failed;
3172 
3173 	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3174 	if (!mrioc->devrem_bitmap)
3175 		goto out_failed;
3176 
3177 	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3178 						  GFP_KERNEL);
3179 	if (!mrioc->evtack_cmds_bitmap)
3180 		goto out_failed;
3181 
3182 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3183 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3184 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3185 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3186 
3187 	/* reply buffer pool, 16 byte align */
3188 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3189 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3190 	    &mrioc->pdev->dev, sz, 16, 0);
3191 	if (!mrioc->reply_buf_pool) {
3192 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3193 		goto out_failed;
3194 	}
3195 
3196 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3197 	    &mrioc->reply_buf_dma);
3198 	if (!mrioc->reply_buf)
3199 		goto out_failed;
3200 
3201 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3202 
3203 	/* reply free queue, 8 byte align */
3204 	sz = mrioc->reply_free_qsz * 8;
3205 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3206 	    &mrioc->pdev->dev, sz, 8, 0);
3207 	if (!mrioc->reply_free_q_pool) {
3208 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3209 		goto out_failed;
3210 	}
3211 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3212 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3213 	if (!mrioc->reply_free_q)
3214 		goto out_failed;
3215 
3216 	/* sense buffer pool,  4 byte align */
3217 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3218 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3219 	    &mrioc->pdev->dev, sz, 4, 0);
3220 	if (!mrioc->sense_buf_pool) {
3221 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3222 		goto out_failed;
3223 	}
3224 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3225 	    &mrioc->sense_buf_dma);
3226 	if (!mrioc->sense_buf)
3227 		goto out_failed;
3228 
3229 	/* sense buffer queue, 8 byte align */
3230 	sz = mrioc->sense_buf_q_sz * 8;
3231 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3232 	    &mrioc->pdev->dev, sz, 8, 0);
3233 	if (!mrioc->sense_buf_q_pool) {
3234 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3235 		goto out_failed;
3236 	}
3237 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3238 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3239 	if (!mrioc->sense_buf_q)
3240 		goto out_failed;
3241 
3242 	return retval;
3243 
3244 out_failed:
3245 	retval = -1;
3246 	return retval;
3247 }
3248 
3249 /**
3250  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3251  * buffers
3252  * @mrioc: Adapter instance reference
3253  *
3254  * Helper function to initialize reply and sense buffers along
3255  * with some debug prints.
3256  *
3257  * Return:  None.
3258  */
3259 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3260 {
3261 	u32 sz, i;
3262 	dma_addr_t phy_addr;
3263 
3264 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3265 	ioc_info(mrioc,
3266 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3267 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3268 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3269 	sz = mrioc->reply_free_qsz * 8;
3270 	ioc_info(mrioc,
3271 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3272 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3273 	    (unsigned long long)mrioc->reply_free_q_dma);
3274 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3275 	ioc_info(mrioc,
3276 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3277 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3278 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3279 	sz = mrioc->sense_buf_q_sz * 8;
3280 	ioc_info(mrioc,
3281 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3282 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3283 	    (unsigned long long)mrioc->sense_buf_q_dma);
3284 
3285 	/* initialize Reply buffer Queue */
3286 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3287 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3288 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3289 	mrioc->reply_free_q[i] = cpu_to_le64(0);
3290 
3291 	/* initialize Sense Buffer Queue */
3292 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3293 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3294 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3295 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3296 }
3297 
3298 /**
3299  * mpi3mr_issue_iocinit - Send IOC Init
3300  * @mrioc: Adapter instance reference
3301  *
3302  * Issue IOC Init MPI request through admin queue and wait for
3303  * the completion of it or time out.
3304  *
3305  * Return: 0 on success, non-zero on failures.
3306  */
3307 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3308 {
3309 	struct mpi3_ioc_init_request iocinit_req;
3310 	struct mpi3_driver_info_layout *drv_info;
3311 	dma_addr_t data_dma;
3312 	u32 data_len = sizeof(*drv_info);
3313 	int retval = 0;
3314 	ktime_t current_time;
3315 
3316 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3317 	    GFP_KERNEL);
3318 	if (!drv_info) {
3319 		retval = -1;
3320 		goto out;
3321 	}
3322 	mpimr_initialize_reply_sbuf_queues(mrioc);
3323 
3324 	drv_info->information_length = cpu_to_le32(data_len);
3325 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3326 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3327 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3328 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3329 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3330 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3331 	    sizeof(drv_info->driver_release_date));
3332 	drv_info->driver_capabilities = 0;
3333 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3334 	    sizeof(mrioc->driver_info));
3335 
3336 	memset(&iocinit_req, 0, sizeof(iocinit_req));
3337 	mutex_lock(&mrioc->init_cmds.mutex);
3338 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3339 		retval = -1;
3340 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3341 		mutex_unlock(&mrioc->init_cmds.mutex);
3342 		goto out;
3343 	}
3344 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3345 	mrioc->init_cmds.is_waiting = 1;
3346 	mrioc->init_cmds.callback = NULL;
3347 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3348 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3349 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3350 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3351 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3352 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3353 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3354 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3355 	iocinit_req.reply_free_queue_address =
3356 	    cpu_to_le64(mrioc->reply_free_q_dma);
3357 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3358 	iocinit_req.sense_buffer_free_queue_depth =
3359 	    cpu_to_le16(mrioc->sense_buf_q_sz);
3360 	iocinit_req.sense_buffer_free_queue_address =
3361 	    cpu_to_le64(mrioc->sense_buf_q_dma);
3362 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3363 
3364 	current_time = ktime_get_real();
3365 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3366 
3367 	iocinit_req.msg_flags |=
3368 	    MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3369 	iocinit_req.msg_flags |=
3370 		MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3371 
3372 	init_completion(&mrioc->init_cmds.done);
3373 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3374 	    sizeof(iocinit_req), 1);
3375 	if (retval) {
3376 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3377 		goto out_unlock;
3378 	}
3379 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3380 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3381 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3382 		mpi3mr_check_rh_fault_ioc(mrioc,
3383 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3384 		ioc_err(mrioc, "ioc_init timed out\n");
3385 		retval = -1;
3386 		goto out_unlock;
3387 	}
3388 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3389 	    != MPI3_IOCSTATUS_SUCCESS) {
3390 		ioc_err(mrioc,
3391 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3392 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3393 		    mrioc->init_cmds.ioc_loginfo);
3394 		retval = -1;
3395 		goto out_unlock;
3396 	}
3397 
3398 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3399 	writel(mrioc->reply_free_queue_host_index,
3400 	    &mrioc->sysif_regs->reply_free_host_index);
3401 
3402 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3403 	writel(mrioc->sbq_host_index,
3404 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3405 out_unlock:
3406 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3407 	mutex_unlock(&mrioc->init_cmds.mutex);
3408 
3409 out:
3410 	if (drv_info)
3411 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3412 		    data_dma);
3413 
3414 	return retval;
3415 }
3416 
3417 /**
3418  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3419  * @mrioc: Adapter instance reference
3420  * @event: MPI event ID
3421  *
3422  * Un mask the specific event by resetting the event_mask
3423  * bitmap.
3424  *
3425  * Return: 0 on success, non-zero on failures.
3426  */
3427 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3428 {
3429 	u32 desired_event;
3430 	u8 word;
3431 
3432 	if (event >= 128)
3433 		return;
3434 
3435 	desired_event = (1 << (event % 32));
3436 	word = event / 32;
3437 
3438 	mrioc->event_masks[word] &= ~desired_event;
3439 }
3440 
3441 /**
3442  * mpi3mr_issue_event_notification - Send event notification
3443  * @mrioc: Adapter instance reference
3444  *
3445  * Issue event notification MPI request through admin queue and
3446  * wait for the completion of it or time out.
3447  *
3448  * Return: 0 on success, non-zero on failures.
3449  */
3450 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3451 {
3452 	struct mpi3_event_notification_request evtnotify_req;
3453 	int retval = 0;
3454 	u8 i;
3455 
3456 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3457 	mutex_lock(&mrioc->init_cmds.mutex);
3458 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3459 		retval = -1;
3460 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3461 		mutex_unlock(&mrioc->init_cmds.mutex);
3462 		goto out;
3463 	}
3464 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3465 	mrioc->init_cmds.is_waiting = 1;
3466 	mrioc->init_cmds.callback = NULL;
3467 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3468 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3469 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3470 		evtnotify_req.event_masks[i] =
3471 		    cpu_to_le32(mrioc->event_masks[i]);
3472 	init_completion(&mrioc->init_cmds.done);
3473 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3474 	    sizeof(evtnotify_req), 1);
3475 	if (retval) {
3476 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3477 		goto out_unlock;
3478 	}
3479 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3480 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3481 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3482 		ioc_err(mrioc, "event notification timed out\n");
3483 		mpi3mr_check_rh_fault_ioc(mrioc,
3484 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3485 		retval = -1;
3486 		goto out_unlock;
3487 	}
3488 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3489 	    != MPI3_IOCSTATUS_SUCCESS) {
3490 		ioc_err(mrioc,
3491 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3492 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3493 		    mrioc->init_cmds.ioc_loginfo);
3494 		retval = -1;
3495 		goto out_unlock;
3496 	}
3497 
3498 out_unlock:
3499 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3500 	mutex_unlock(&mrioc->init_cmds.mutex);
3501 out:
3502 	return retval;
3503 }
3504 
3505 /**
3506  * mpi3mr_process_event_ack - Process event acknowledgment
3507  * @mrioc: Adapter instance reference
3508  * @event: MPI3 event ID
3509  * @event_ctx: event context
3510  *
3511  * Send event acknowledgment through admin queue and wait for
3512  * it to complete.
3513  *
3514  * Return: 0 on success, non-zero on failures.
3515  */
3516 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3517 	u32 event_ctx)
3518 {
3519 	struct mpi3_event_ack_request evtack_req;
3520 	int retval = 0;
3521 
3522 	memset(&evtack_req, 0, sizeof(evtack_req));
3523 	mutex_lock(&mrioc->init_cmds.mutex);
3524 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3525 		retval = -1;
3526 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3527 		mutex_unlock(&mrioc->init_cmds.mutex);
3528 		goto out;
3529 	}
3530 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3531 	mrioc->init_cmds.is_waiting = 1;
3532 	mrioc->init_cmds.callback = NULL;
3533 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3534 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3535 	evtack_req.event = event;
3536 	evtack_req.event_context = cpu_to_le32(event_ctx);
3537 
3538 	init_completion(&mrioc->init_cmds.done);
3539 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3540 	    sizeof(evtack_req), 1);
3541 	if (retval) {
3542 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3543 		goto out_unlock;
3544 	}
3545 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3546 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3547 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3548 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3549 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3550 			mpi3mr_check_rh_fault_ioc(mrioc,
3551 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3552 		retval = -1;
3553 		goto out_unlock;
3554 	}
3555 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3556 	    != MPI3_IOCSTATUS_SUCCESS) {
3557 		ioc_err(mrioc,
3558 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3559 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3560 		    mrioc->init_cmds.ioc_loginfo);
3561 		retval = -1;
3562 		goto out_unlock;
3563 	}
3564 
3565 out_unlock:
3566 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3567 	mutex_unlock(&mrioc->init_cmds.mutex);
3568 out:
3569 	return retval;
3570 }
3571 
3572 /**
3573  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3574  * @mrioc: Adapter instance reference
3575  *
3576  * Allocate chain buffers and set a bitmap to indicate free
3577  * chain buffers. Chain buffers are used to pass the SGE
3578  * information along with MPI3 SCSI IO requests for host I/O.
3579  *
3580  * Return: 0 on success, non-zero on failure
3581  */
3582 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3583 {
3584 	int retval = 0;
3585 	u32 sz, i;
3586 	u16 num_chains;
3587 
3588 	if (mrioc->chain_sgl_list)
3589 		return retval;
3590 
3591 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3592 
3593 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3594 	    | SHOST_DIX_TYPE1_PROTECTION
3595 	    | SHOST_DIX_TYPE2_PROTECTION
3596 	    | SHOST_DIX_TYPE3_PROTECTION))
3597 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3598 
3599 	mrioc->chain_buf_count = num_chains;
3600 	sz = sizeof(struct chain_element) * num_chains;
3601 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3602 	if (!mrioc->chain_sgl_list)
3603 		goto out_failed;
3604 
3605 	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3606 		MPI3MR_PAGE_SIZE_4K))
3607 		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3608 			MPI3MR_PAGE_SIZE_4K;
3609 	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3610 	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3611 			mrioc->max_sgl_entries, sz/1024);
3612 
3613 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3614 	    &mrioc->pdev->dev, sz, 16, 0);
3615 	if (!mrioc->chain_buf_pool) {
3616 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3617 		goto out_failed;
3618 	}
3619 
3620 	for (i = 0; i < num_chains; i++) {
3621 		mrioc->chain_sgl_list[i].addr =
3622 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3623 		    &mrioc->chain_sgl_list[i].dma_addr);
3624 
3625 		if (!mrioc->chain_sgl_list[i].addr)
3626 			goto out_failed;
3627 	}
3628 	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3629 	if (!mrioc->chain_bitmap)
3630 		goto out_failed;
3631 	return retval;
3632 out_failed:
3633 	retval = -1;
3634 	return retval;
3635 }
3636 
3637 /**
3638  * mpi3mr_port_enable_complete - Mark port enable complete
3639  * @mrioc: Adapter instance reference
3640  * @drv_cmd: Internal command tracker
3641  *
3642  * Call back for asynchronous port enable request sets the
3643  * driver command to indicate port enable request is complete.
3644  *
3645  * Return: Nothing
3646  */
3647 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3648 	struct mpi3mr_drv_cmd *drv_cmd)
3649 {
3650 	drv_cmd->callback = NULL;
3651 	mrioc->scan_started = 0;
3652 	if (drv_cmd->state & MPI3MR_CMD_RESET)
3653 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3654 	else
3655 		mrioc->scan_failed = drv_cmd->ioc_status;
3656 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3657 }
3658 
3659 /**
3660  * mpi3mr_issue_port_enable - Issue Port Enable
3661  * @mrioc: Adapter instance reference
3662  * @async: Flag to wait for completion or not
3663  *
3664  * Issue Port Enable MPI request through admin queue and if the
3665  * async flag is not set wait for the completion of the port
3666  * enable or time out.
3667  *
3668  * Return: 0 on success, non-zero on failures.
3669  */
3670 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3671 {
3672 	struct mpi3_port_enable_request pe_req;
3673 	int retval = 0;
3674 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3675 
3676 	memset(&pe_req, 0, sizeof(pe_req));
3677 	mutex_lock(&mrioc->init_cmds.mutex);
3678 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3679 		retval = -1;
3680 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3681 		mutex_unlock(&mrioc->init_cmds.mutex);
3682 		goto out;
3683 	}
3684 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3685 	if (async) {
3686 		mrioc->init_cmds.is_waiting = 0;
3687 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3688 	} else {
3689 		mrioc->init_cmds.is_waiting = 1;
3690 		mrioc->init_cmds.callback = NULL;
3691 		init_completion(&mrioc->init_cmds.done);
3692 	}
3693 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3694 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3695 
3696 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3697 	if (retval) {
3698 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3699 		goto out_unlock;
3700 	}
3701 	if (async) {
3702 		mutex_unlock(&mrioc->init_cmds.mutex);
3703 		goto out;
3704 	}
3705 
3706 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3707 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3708 		ioc_err(mrioc, "port enable timed out\n");
3709 		retval = -1;
3710 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3711 		goto out_unlock;
3712 	}
3713 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3714 
3715 out_unlock:
3716 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3717 	mutex_unlock(&mrioc->init_cmds.mutex);
3718 out:
3719 	return retval;
3720 }
3721 
3722 /* Protocol type to name mapper structure */
3723 static const struct {
3724 	u8 protocol;
3725 	char *name;
3726 } mpi3mr_protocols[] = {
3727 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3728 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3729 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3730 };
3731 
3732 /* Capability to name mapper structure*/
3733 static const struct {
3734 	u32 capability;
3735 	char *name;
3736 } mpi3mr_capabilities[] = {
3737 	{ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3738 	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3739 };
3740 
3741 /**
3742  * mpi3mr_repost_diag_bufs - repost host diag buffers
3743  * @mrioc: Adapter instance reference
3744  *
3745  * repost firmware and trace diag buffers based on global
3746  * trigger flag from driver page 2
3747  *
3748  * Return: 0 on success, non-zero on failures.
3749  */
3750 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
3751 {
3752 	u64 global_trigger;
3753 	union mpi3mr_trigger_data prev_trigger_data;
3754 	struct diag_buffer_desc *trace_hdb = NULL;
3755 	struct diag_buffer_desc *fw_hdb = NULL;
3756 	int retval = 0;
3757 	bool trace_repost_needed = false;
3758 	bool fw_repost_needed = false;
3759 	u8 prev_trigger_type;
3760 
3761 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
3762 	if (retval)
3763 		return -1;
3764 
3765 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
3766 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
3767 
3768 	if (trace_hdb &&
3769 	    trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3770 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3771 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3772 		trace_repost_needed = true;
3773 
3774 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
3775 
3776 	if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3777 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3778 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3779 		fw_repost_needed = true;
3780 
3781 	if (trace_repost_needed || fw_repost_needed) {
3782 		global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
3783 		if (global_trigger &
3784 		      MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
3785 			trace_repost_needed = false;
3786 		if (global_trigger &
3787 		     MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
3788 			fw_repost_needed = false;
3789 	}
3790 
3791 	if (trace_repost_needed) {
3792 		prev_trigger_type = trace_hdb->trigger_type;
3793 		memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
3794 		    sizeof(trace_hdb->trigger_data));
3795 		retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
3796 		if (!retval) {
3797 			dprint_init(mrioc, "trace diag buffer reposted");
3798 			mpi3mr_set_trigger_data_in_hdb(trace_hdb,
3799 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3800 		} else {
3801 			trace_hdb->trigger_type = prev_trigger_type;
3802 			memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
3803 			    sizeof(prev_trigger_data));
3804 			ioc_err(mrioc, "trace diag buffer repost failed");
3805 			return -1;
3806 		}
3807 	}
3808 
3809 	if (fw_repost_needed) {
3810 		prev_trigger_type = fw_hdb->trigger_type;
3811 		memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
3812 		    sizeof(fw_hdb->trigger_data));
3813 		retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
3814 		if (!retval) {
3815 			dprint_init(mrioc, "firmware diag buffer reposted");
3816 			mpi3mr_set_trigger_data_in_hdb(fw_hdb,
3817 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3818 		} else {
3819 			fw_hdb->trigger_type = prev_trigger_type;
3820 			memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
3821 			    sizeof(prev_trigger_data));
3822 			ioc_err(mrioc, "firmware diag buffer repost failed");
3823 			return -1;
3824 		}
3825 	}
3826 	return retval;
3827 }
3828 
3829 /**
3830  * mpi3mr_print_ioc_info - Display controller information
3831  * @mrioc: Adapter instance reference
3832  *
3833  * Display controller personality, capability, supported
3834  * protocols etc.
3835  *
3836  * Return: Nothing
3837  */
3838 static void
3839 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3840 {
3841 	int i = 0, bytes_written = 0;
3842 	const char *personality;
3843 	char protocol[50] = {0};
3844 	char capabilities[100] = {0};
3845 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3846 
3847 	switch (mrioc->facts.personality) {
3848 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3849 		personality = "Enhanced HBA";
3850 		break;
3851 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3852 		personality = "RAID";
3853 		break;
3854 	default:
3855 		personality = "Unknown";
3856 		break;
3857 	}
3858 
3859 	ioc_info(mrioc, "Running in %s Personality", personality);
3860 
3861 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3862 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3863 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3864 
3865 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3866 		if (mrioc->facts.protocol_flags &
3867 		    mpi3mr_protocols[i].protocol) {
3868 			bytes_written += scnprintf(protocol + bytes_written,
3869 				    sizeof(protocol) - bytes_written, "%s%s",
3870 				    bytes_written ? "," : "",
3871 				    mpi3mr_protocols[i].name);
3872 		}
3873 	}
3874 
3875 	bytes_written = 0;
3876 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3877 		if (mrioc->facts.protocol_flags &
3878 		    mpi3mr_capabilities[i].capability) {
3879 			bytes_written += scnprintf(capabilities + bytes_written,
3880 				    sizeof(capabilities) - bytes_written, "%s%s",
3881 				    bytes_written ? "," : "",
3882 				    mpi3mr_capabilities[i].name);
3883 		}
3884 	}
3885 
3886 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3887 		 protocol, capabilities);
3888 }
3889 
3890 /**
3891  * mpi3mr_cleanup_resources - Free PCI resources
3892  * @mrioc: Adapter instance reference
3893  *
3894  * Unmap PCI device memory and disable PCI device.
3895  *
3896  * Return: 0 on success and non-zero on failure.
3897  */
3898 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3899 {
3900 	struct pci_dev *pdev = mrioc->pdev;
3901 
3902 	mpi3mr_cleanup_isr(mrioc);
3903 
3904 	if (mrioc->sysif_regs) {
3905 		iounmap((void __iomem *)mrioc->sysif_regs);
3906 		mrioc->sysif_regs = NULL;
3907 	}
3908 
3909 	if (pci_is_enabled(pdev)) {
3910 		if (mrioc->bars)
3911 			pci_release_selected_regions(pdev, mrioc->bars);
3912 		pci_disable_device(pdev);
3913 	}
3914 }
3915 
3916 /**
3917  * mpi3mr_setup_resources - Enable PCI resources
3918  * @mrioc: Adapter instance reference
3919  *
3920  * Enable PCI device memory, MSI-x registers and set DMA mask.
3921  *
3922  * Return: 0 on success and non-zero on failure.
3923  */
3924 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3925 {
3926 	struct pci_dev *pdev = mrioc->pdev;
3927 	u32 memap_sz = 0;
3928 	int i, retval = 0, capb = 0;
3929 	u16 message_control;
3930 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3931 	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3932 
3933 	if (pci_enable_device_mem(pdev)) {
3934 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3935 		retval = -ENODEV;
3936 		goto out_failed;
3937 	}
3938 
3939 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3940 	if (!capb) {
3941 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3942 		retval = -ENODEV;
3943 		goto out_failed;
3944 	}
3945 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3946 
3947 	if (pci_request_selected_regions(pdev, mrioc->bars,
3948 	    mrioc->driver_name)) {
3949 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3950 		retval = -ENODEV;
3951 		goto out_failed;
3952 	}
3953 
3954 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3955 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3956 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3957 			memap_sz = pci_resource_len(pdev, i);
3958 			mrioc->sysif_regs =
3959 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
3960 			break;
3961 		}
3962 	}
3963 
3964 	pci_set_master(pdev);
3965 
3966 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3967 	if (retval) {
3968 		if (dma_mask != DMA_BIT_MASK(32)) {
3969 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3970 			dma_mask = DMA_BIT_MASK(32);
3971 			retval = dma_set_mask_and_coherent(&pdev->dev,
3972 			    dma_mask);
3973 		}
3974 		if (retval) {
3975 			mrioc->dma_mask = 0;
3976 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3977 			goto out_failed;
3978 		}
3979 	}
3980 	mrioc->dma_mask = dma_mask;
3981 
3982 	if (!mrioc->sysif_regs) {
3983 		ioc_err(mrioc,
3984 		    "Unable to map adapter memory or resource not found\n");
3985 		retval = -EINVAL;
3986 		goto out_failed;
3987 	}
3988 
3989 	pci_read_config_word(pdev, capb + 2, &message_control);
3990 	mrioc->msix_count = (message_control & 0x3FF) + 1;
3991 
3992 	pci_save_state(pdev);
3993 
3994 	pci_set_drvdata(pdev, mrioc->shost);
3995 
3996 	mpi3mr_ioc_disable_intr(mrioc);
3997 
3998 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3999 	    (unsigned long long)mrioc->sysif_regs_phys,
4000 	    mrioc->sysif_regs, memap_sz);
4001 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4002 	    mrioc->msix_count);
4003 
4004 	if (!reset_devices && poll_queues > 0)
4005 		mrioc->requested_poll_qcount = min_t(int, poll_queues,
4006 				mrioc->msix_count - 2);
4007 	return retval;
4008 
4009 out_failed:
4010 	mpi3mr_cleanup_resources(mrioc);
4011 	return retval;
4012 }
4013 
4014 /**
4015  * mpi3mr_enable_events - Enable required events
4016  * @mrioc: Adapter instance reference
4017  *
4018  * This routine unmasks the events required by the driver by
4019  * sennding appropriate event mask bitmapt through an event
4020  * notification request.
4021  *
4022  * Return: 0 on success and non-zero on failure.
4023  */
4024 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4025 {
4026 	int retval = 0;
4027 	u32  i;
4028 
4029 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4030 		mrioc->event_masks[i] = -1;
4031 
4032 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4033 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4034 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4035 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4036 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4037 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4038 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4039 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4040 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4041 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4042 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4043 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4044 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4045 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4046 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4047 
4048 	retval = mpi3mr_issue_event_notification(mrioc);
4049 	if (retval)
4050 		ioc_err(mrioc, "failed to issue event notification %d\n",
4051 		    retval);
4052 	return retval;
4053 }
4054 
4055 /**
4056  * mpi3mr_init_ioc - Initialize the controller
4057  * @mrioc: Adapter instance reference
4058  *
4059  * This the controller initialization routine, executed either
4060  * after soft reset or from pci probe callback.
4061  * Setup the required resources, memory map the controller
4062  * registers, create admin and operational reply queue pairs,
4063  * allocate required memory for reply pool, sense buffer pool,
4064  * issue IOC init request to the firmware, unmask the events and
4065  * issue port enable to discover SAS/SATA/NVMe devies and RAID
4066  * volumes.
4067  *
4068  * Return: 0 on success and non-zero on failure.
4069  */
4070 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4071 {
4072 	int retval = 0;
4073 	u8 retry = 0;
4074 	struct mpi3_ioc_facts_data facts_data;
4075 	u32 sz;
4076 
4077 retry_init:
4078 	retval = mpi3mr_bring_ioc_ready(mrioc);
4079 	if (retval) {
4080 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4081 		    retval);
4082 		goto out_failed_noretry;
4083 	}
4084 
4085 	retval = mpi3mr_setup_isr(mrioc, 1);
4086 	if (retval) {
4087 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
4088 		    retval);
4089 		goto out_failed_noretry;
4090 	}
4091 
4092 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4093 	if (retval) {
4094 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4095 		    retval);
4096 		goto out_failed;
4097 	}
4098 
4099 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4100 	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4101 	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4102 	atomic_set(&mrioc->pend_large_data_sz, 0);
4103 
4104 	if (reset_devices)
4105 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4106 		    MPI3MR_HOST_IOS_KDUMP);
4107 
4108 	if (!(mrioc->facts.ioc_capabilities &
4109 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4110 		mrioc->sas_transport_enabled = 1;
4111 		mrioc->scsi_device_channel = 1;
4112 		mrioc->shost->max_channel = 1;
4113 		mrioc->shost->transportt = mpi3mr_transport_template;
4114 	}
4115 
4116 	mrioc->reply_sz = mrioc->facts.reply_sz;
4117 
4118 	retval = mpi3mr_check_reset_dma_mask(mrioc);
4119 	if (retval) {
4120 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
4121 		    retval);
4122 		goto out_failed_noretry;
4123 	}
4124 
4125 	mpi3mr_print_ioc_info(mrioc);
4126 
4127 	if (!mrioc->cfg_page) {
4128 		dprint_init(mrioc, "allocating config page buffers\n");
4129 		mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
4130 		mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
4131 		    mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
4132 		if (!mrioc->cfg_page) {
4133 			retval = -1;
4134 			goto out_failed_noretry;
4135 		}
4136 	}
4137 
4138 	dprint_init(mrioc, "allocating host diag buffers\n");
4139 	mpi3mr_alloc_diag_bufs(mrioc);
4140 
4141 	dprint_init(mrioc, "allocating ioctl dma buffers\n");
4142 	mpi3mr_alloc_ioctl_dma_memory(mrioc);
4143 
4144 	dprint_init(mrioc, "posting host diag buffers\n");
4145 	retval = mpi3mr_post_diag_bufs(mrioc);
4146 
4147 	if (retval)
4148 		ioc_warn(mrioc, "failed to post host diag buffers\n");
4149 
4150 	if (!mrioc->init_cmds.reply) {
4151 		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4152 		if (retval) {
4153 			ioc_err(mrioc,
4154 			    "%s :Failed to allocated reply sense buffers %d\n",
4155 			    __func__, retval);
4156 			goto out_failed_noretry;
4157 		}
4158 	}
4159 
4160 	if (!mrioc->chain_sgl_list) {
4161 		retval = mpi3mr_alloc_chain_bufs(mrioc);
4162 		if (retval) {
4163 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4164 			    retval);
4165 			goto out_failed_noretry;
4166 		}
4167 	}
4168 
4169 	retval = mpi3mr_issue_iocinit(mrioc);
4170 	if (retval) {
4171 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4172 		    retval);
4173 		goto out_failed;
4174 	}
4175 
4176 	retval = mpi3mr_print_pkg_ver(mrioc);
4177 	if (retval) {
4178 		ioc_err(mrioc, "failed to get package version\n");
4179 		goto out_failed;
4180 	}
4181 
4182 	retval = mpi3mr_setup_isr(mrioc, 0);
4183 	if (retval) {
4184 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4185 		    retval);
4186 		goto out_failed_noretry;
4187 	}
4188 
4189 	retval = mpi3mr_create_op_queues(mrioc);
4190 	if (retval) {
4191 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4192 		    retval);
4193 		goto out_failed;
4194 	}
4195 
4196 	if (!mrioc->pel_seqnum_virt) {
4197 		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4198 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4199 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4200 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4201 		    GFP_KERNEL);
4202 		if (!mrioc->pel_seqnum_virt) {
4203 			retval = -ENOMEM;
4204 			goto out_failed_noretry;
4205 		}
4206 	}
4207 
4208 	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4209 		dprint_init(mrioc, "allocating memory for throttle groups\n");
4210 		sz = sizeof(struct mpi3mr_throttle_group_info);
4211 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4212 		if (!mrioc->throttle_groups) {
4213 			retval = -1;
4214 			goto out_failed_noretry;
4215 		}
4216 	}
4217 
4218 	retval = mpi3mr_enable_events(mrioc);
4219 	if (retval) {
4220 		ioc_err(mrioc, "failed to enable events %d\n",
4221 		    retval);
4222 		goto out_failed;
4223 	}
4224 
4225 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4226 	if (retval) {
4227 		ioc_err(mrioc, "failed to refresh triggers\n");
4228 		goto out_failed;
4229 	}
4230 
4231 	ioc_info(mrioc, "controller initialization completed successfully\n");
4232 	return retval;
4233 out_failed:
4234 	if (retry < 2) {
4235 		retry++;
4236 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4237 		    retry);
4238 		mpi3mr_memset_buffers(mrioc);
4239 		goto retry_init;
4240 	}
4241 	retval = -1;
4242 out_failed_noretry:
4243 	ioc_err(mrioc, "controller initialization failed\n");
4244 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4245 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4246 	mrioc->unrecoverable = 1;
4247 	return retval;
4248 }
4249 
4250 /**
4251  * mpi3mr_reinit_ioc - Re-Initialize the controller
4252  * @mrioc: Adapter instance reference
4253  * @is_resume: Called from resume or reset path
4254  *
4255  * This the controller re-initialization routine, executed from
4256  * the soft reset handler or resume callback. Creates
4257  * operational reply queue pairs, allocate required memory for
4258  * reply pool, sense buffer pool, issue IOC init request to the
4259  * firmware, unmask the events and issue port enable to discover
4260  * SAS/SATA/NVMe devices and RAID volumes.
4261  *
4262  * Return: 0 on success and non-zero on failure.
4263  */
4264 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4265 {
4266 	int retval = 0;
4267 	u8 retry = 0;
4268 	struct mpi3_ioc_facts_data facts_data;
4269 	u32 pe_timeout, ioc_status;
4270 
4271 retry_init:
4272 	pe_timeout =
4273 	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4274 
4275 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
4276 	retval = mpi3mr_bring_ioc_ready(mrioc);
4277 	if (retval) {
4278 		ioc_err(mrioc, "failed to bring to ready state\n");
4279 		goto out_failed_noretry;
4280 	}
4281 
4282 	if (is_resume || mrioc->block_on_pci_err) {
4283 		dprint_reset(mrioc, "setting up single ISR\n");
4284 		retval = mpi3mr_setup_isr(mrioc, 1);
4285 		if (retval) {
4286 			ioc_err(mrioc, "failed to setup ISR\n");
4287 			goto out_failed_noretry;
4288 		}
4289 	} else
4290 		mpi3mr_ioc_enable_intr(mrioc);
4291 
4292 	dprint_reset(mrioc, "getting ioc_facts\n");
4293 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4294 	if (retval) {
4295 		ioc_err(mrioc, "failed to get ioc_facts\n");
4296 		goto out_failed;
4297 	}
4298 
4299 	dprint_reset(mrioc, "validating ioc_facts\n");
4300 	retval = mpi3mr_revalidate_factsdata(mrioc);
4301 	if (retval) {
4302 		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4303 		goto out_failed_noretry;
4304 	}
4305 
4306 	mpi3mr_print_ioc_info(mrioc);
4307 
4308 	if (is_resume) {
4309 		dprint_reset(mrioc, "posting host diag buffers\n");
4310 		retval = mpi3mr_post_diag_bufs(mrioc);
4311 		if (retval)
4312 			ioc_warn(mrioc, "failed to post host diag buffers\n");
4313 	} else {
4314 		retval = mpi3mr_repost_diag_bufs(mrioc);
4315 		if (retval)
4316 			ioc_warn(mrioc, "failed to re post host diag buffers\n");
4317 	}
4318 
4319 	dprint_reset(mrioc, "sending ioc_init\n");
4320 	retval = mpi3mr_issue_iocinit(mrioc);
4321 	if (retval) {
4322 		ioc_err(mrioc, "failed to send ioc_init\n");
4323 		goto out_failed;
4324 	}
4325 
4326 	dprint_reset(mrioc, "getting package version\n");
4327 	retval = mpi3mr_print_pkg_ver(mrioc);
4328 	if (retval) {
4329 		ioc_err(mrioc, "failed to get package version\n");
4330 		goto out_failed;
4331 	}
4332 
4333 	if (is_resume || mrioc->block_on_pci_err) {
4334 		dprint_reset(mrioc, "setting up multiple ISR\n");
4335 		retval = mpi3mr_setup_isr(mrioc, 0);
4336 		if (retval) {
4337 			ioc_err(mrioc, "failed to re-setup ISR\n");
4338 			goto out_failed_noretry;
4339 		}
4340 	}
4341 
4342 	dprint_reset(mrioc, "creating operational queue pairs\n");
4343 	retval = mpi3mr_create_op_queues(mrioc);
4344 	if (retval) {
4345 		ioc_err(mrioc, "failed to create operational queue pairs\n");
4346 		goto out_failed;
4347 	}
4348 
4349 	if (!mrioc->pel_seqnum_virt) {
4350 		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4351 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4352 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4353 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4354 		    GFP_KERNEL);
4355 		if (!mrioc->pel_seqnum_virt) {
4356 			retval = -ENOMEM;
4357 			goto out_failed_noretry;
4358 		}
4359 	}
4360 
4361 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4362 		ioc_err(mrioc,
4363 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4364 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4365 		retval = -1;
4366 		goto out_failed_noretry;
4367 	}
4368 
4369 	dprint_reset(mrioc, "enabling events\n");
4370 	retval = mpi3mr_enable_events(mrioc);
4371 	if (retval) {
4372 		ioc_err(mrioc, "failed to enable events\n");
4373 		goto out_failed;
4374 	}
4375 
4376 	mrioc->device_refresh_on = 1;
4377 	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4378 
4379 	ioc_info(mrioc, "sending port enable\n");
4380 	retval = mpi3mr_issue_port_enable(mrioc, 1);
4381 	if (retval) {
4382 		ioc_err(mrioc, "failed to issue port enable\n");
4383 		goto out_failed;
4384 	}
4385 	do {
4386 		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4387 		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4388 			break;
4389 		if (!pci_device_is_present(mrioc->pdev))
4390 			mrioc->unrecoverable = 1;
4391 		if (mrioc->unrecoverable) {
4392 			retval = -1;
4393 			goto out_failed_noretry;
4394 		}
4395 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4396 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4397 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4398 			mpi3mr_print_fault_info(mrioc);
4399 			mrioc->init_cmds.is_waiting = 0;
4400 			mrioc->init_cmds.callback = NULL;
4401 			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4402 			goto out_failed;
4403 		}
4404 	} while (--pe_timeout);
4405 
4406 	if (!pe_timeout) {
4407 		ioc_err(mrioc, "port enable timed out\n");
4408 		mpi3mr_check_rh_fault_ioc(mrioc,
4409 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4410 		mrioc->init_cmds.is_waiting = 0;
4411 		mrioc->init_cmds.callback = NULL;
4412 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4413 		goto out_failed;
4414 	} else if (mrioc->scan_failed) {
4415 		ioc_err(mrioc,
4416 		    "port enable failed with status=0x%04x\n",
4417 		    mrioc->scan_failed);
4418 	} else
4419 		ioc_info(mrioc, "port enable completed successfully\n");
4420 
4421 	ioc_info(mrioc, "controller %s completed successfully\n",
4422 	    (is_resume)?"resume":"re-initialization");
4423 	return retval;
4424 out_failed:
4425 	if (retry < 2) {
4426 		retry++;
4427 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4428 		    (is_resume)?"resume":"re-initialization", retry);
4429 		mpi3mr_memset_buffers(mrioc);
4430 		goto retry_init;
4431 	}
4432 	retval = -1;
4433 out_failed_noretry:
4434 	ioc_err(mrioc, "controller %s is failed\n",
4435 	    (is_resume)?"resume":"re-initialization");
4436 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4437 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4438 	mrioc->unrecoverable = 1;
4439 	return retval;
4440 }
4441 
4442 /**
4443  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4444  *					segments
4445  * @mrioc: Adapter instance reference
4446  * @qidx: Operational reply queue index
4447  *
4448  * Return: Nothing.
4449  */
4450 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4451 {
4452 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4453 	struct segments *segments;
4454 	int i, size;
4455 
4456 	if (!op_reply_q->q_segments)
4457 		return;
4458 
4459 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4460 	segments = op_reply_q->q_segments;
4461 	for (i = 0; i < op_reply_q->num_segments; i++)
4462 		memset(segments[i].segment, 0, size);
4463 }
4464 
4465 /**
4466  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4467  *					segments
4468  * @mrioc: Adapter instance reference
4469  * @qidx: Operational request queue index
4470  *
4471  * Return: Nothing.
4472  */
4473 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4474 {
4475 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4476 	struct segments *segments;
4477 	int i, size;
4478 
4479 	if (!op_req_q->q_segments)
4480 		return;
4481 
4482 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4483 	segments = op_req_q->q_segments;
4484 	for (i = 0; i < op_req_q->num_segments; i++)
4485 		memset(segments[i].segment, 0, size);
4486 }
4487 
4488 /**
4489  * mpi3mr_memset_buffers - memset memory for a controller
4490  * @mrioc: Adapter instance reference
4491  *
4492  * clear all the memory allocated for a controller, typically
4493  * called post reset to reuse the memory allocated during the
4494  * controller init.
4495  *
4496  * Return: Nothing.
4497  */
4498 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4499 {
4500 	u16 i;
4501 	struct mpi3mr_throttle_group_info *tg;
4502 
4503 	mrioc->change_count = 0;
4504 	mrioc->active_poll_qcount = 0;
4505 	mrioc->default_qcount = 0;
4506 	if (mrioc->admin_req_base)
4507 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4508 	if (mrioc->admin_reply_base)
4509 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4510 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4511 
4512 	if (mrioc->init_cmds.reply) {
4513 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4514 		memset(mrioc->bsg_cmds.reply, 0,
4515 		    sizeof(*mrioc->bsg_cmds.reply));
4516 		memset(mrioc->host_tm_cmds.reply, 0,
4517 		    sizeof(*mrioc->host_tm_cmds.reply));
4518 		memset(mrioc->pel_cmds.reply, 0,
4519 		    sizeof(*mrioc->pel_cmds.reply));
4520 		memset(mrioc->pel_abort_cmd.reply, 0,
4521 		    sizeof(*mrioc->pel_abort_cmd.reply));
4522 		memset(mrioc->transport_cmds.reply, 0,
4523 		    sizeof(*mrioc->transport_cmds.reply));
4524 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4525 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4526 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4527 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4528 			memset(mrioc->evtack_cmds[i].reply, 0,
4529 			    sizeof(*mrioc->evtack_cmds[i].reply));
4530 		bitmap_clear(mrioc->removepend_bitmap, 0,
4531 			     mrioc->dev_handle_bitmap_bits);
4532 		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4533 		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4534 			     MPI3MR_NUM_EVTACKCMD);
4535 	}
4536 
4537 	for (i = 0; i < mrioc->num_queues; i++) {
4538 		mrioc->op_reply_qinfo[i].qid = 0;
4539 		mrioc->op_reply_qinfo[i].ci = 0;
4540 		mrioc->op_reply_qinfo[i].num_replies = 0;
4541 		mrioc->op_reply_qinfo[i].ephase = 0;
4542 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4543 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4544 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4545 
4546 		mrioc->req_qinfo[i].ci = 0;
4547 		mrioc->req_qinfo[i].pi = 0;
4548 		mrioc->req_qinfo[i].num_requests = 0;
4549 		mrioc->req_qinfo[i].qid = 0;
4550 		mrioc->req_qinfo[i].reply_qid = 0;
4551 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4552 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
4553 	}
4554 
4555 	atomic_set(&mrioc->pend_large_data_sz, 0);
4556 	if (mrioc->throttle_groups) {
4557 		tg = mrioc->throttle_groups;
4558 		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4559 			tg->id = 0;
4560 			tg->fw_qd = 0;
4561 			tg->modified_qd = 0;
4562 			tg->io_divert = 0;
4563 			tg->need_qd_reduction = 0;
4564 			tg->high = 0;
4565 			tg->low = 0;
4566 			tg->qd_reduction = 0;
4567 			atomic_set(&tg->pend_large_data_sz, 0);
4568 		}
4569 	}
4570 }
4571 
4572 /**
4573  * mpi3mr_free_mem - Free memory allocated for a controller
4574  * @mrioc: Adapter instance reference
4575  *
4576  * Free all the memory allocated for a controller.
4577  *
4578  * Return: Nothing.
4579  */
4580 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4581 {
4582 	u16 i;
4583 	struct mpi3mr_intr_info *intr_info;
4584 	struct diag_buffer_desc *diag_buffer;
4585 
4586 	mpi3mr_free_enclosure_list(mrioc);
4587 	mpi3mr_free_ioctl_dma_memory(mrioc);
4588 
4589 	if (mrioc->sense_buf_pool) {
4590 		if (mrioc->sense_buf)
4591 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4592 			    mrioc->sense_buf_dma);
4593 		dma_pool_destroy(mrioc->sense_buf_pool);
4594 		mrioc->sense_buf = NULL;
4595 		mrioc->sense_buf_pool = NULL;
4596 	}
4597 	if (mrioc->sense_buf_q_pool) {
4598 		if (mrioc->sense_buf_q)
4599 			dma_pool_free(mrioc->sense_buf_q_pool,
4600 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4601 		dma_pool_destroy(mrioc->sense_buf_q_pool);
4602 		mrioc->sense_buf_q = NULL;
4603 		mrioc->sense_buf_q_pool = NULL;
4604 	}
4605 
4606 	if (mrioc->reply_buf_pool) {
4607 		if (mrioc->reply_buf)
4608 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4609 			    mrioc->reply_buf_dma);
4610 		dma_pool_destroy(mrioc->reply_buf_pool);
4611 		mrioc->reply_buf = NULL;
4612 		mrioc->reply_buf_pool = NULL;
4613 	}
4614 	if (mrioc->reply_free_q_pool) {
4615 		if (mrioc->reply_free_q)
4616 			dma_pool_free(mrioc->reply_free_q_pool,
4617 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4618 		dma_pool_destroy(mrioc->reply_free_q_pool);
4619 		mrioc->reply_free_q = NULL;
4620 		mrioc->reply_free_q_pool = NULL;
4621 	}
4622 
4623 	for (i = 0; i < mrioc->num_op_req_q; i++)
4624 		mpi3mr_free_op_req_q_segments(mrioc, i);
4625 
4626 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4627 		mpi3mr_free_op_reply_q_segments(mrioc, i);
4628 
4629 	for (i = 0; i < mrioc->intr_info_count; i++) {
4630 		intr_info = mrioc->intr_info + i;
4631 		intr_info->op_reply_q = NULL;
4632 	}
4633 
4634 	kfree(mrioc->req_qinfo);
4635 	mrioc->req_qinfo = NULL;
4636 	mrioc->num_op_req_q = 0;
4637 
4638 	kfree(mrioc->op_reply_qinfo);
4639 	mrioc->op_reply_qinfo = NULL;
4640 	mrioc->num_op_reply_q = 0;
4641 
4642 	kfree(mrioc->init_cmds.reply);
4643 	mrioc->init_cmds.reply = NULL;
4644 
4645 	kfree(mrioc->bsg_cmds.reply);
4646 	mrioc->bsg_cmds.reply = NULL;
4647 
4648 	kfree(mrioc->host_tm_cmds.reply);
4649 	mrioc->host_tm_cmds.reply = NULL;
4650 
4651 	kfree(mrioc->pel_cmds.reply);
4652 	mrioc->pel_cmds.reply = NULL;
4653 
4654 	kfree(mrioc->pel_abort_cmd.reply);
4655 	mrioc->pel_abort_cmd.reply = NULL;
4656 
4657 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4658 		kfree(mrioc->evtack_cmds[i].reply);
4659 		mrioc->evtack_cmds[i].reply = NULL;
4660 	}
4661 
4662 	bitmap_free(mrioc->removepend_bitmap);
4663 	mrioc->removepend_bitmap = NULL;
4664 
4665 	bitmap_free(mrioc->devrem_bitmap);
4666 	mrioc->devrem_bitmap = NULL;
4667 
4668 	bitmap_free(mrioc->evtack_cmds_bitmap);
4669 	mrioc->evtack_cmds_bitmap = NULL;
4670 
4671 	bitmap_free(mrioc->chain_bitmap);
4672 	mrioc->chain_bitmap = NULL;
4673 
4674 	kfree(mrioc->transport_cmds.reply);
4675 	mrioc->transport_cmds.reply = NULL;
4676 
4677 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4678 		kfree(mrioc->dev_rmhs_cmds[i].reply);
4679 		mrioc->dev_rmhs_cmds[i].reply = NULL;
4680 	}
4681 
4682 	if (mrioc->chain_buf_pool) {
4683 		for (i = 0; i < mrioc->chain_buf_count; i++) {
4684 			if (mrioc->chain_sgl_list[i].addr) {
4685 				dma_pool_free(mrioc->chain_buf_pool,
4686 				    mrioc->chain_sgl_list[i].addr,
4687 				    mrioc->chain_sgl_list[i].dma_addr);
4688 				mrioc->chain_sgl_list[i].addr = NULL;
4689 			}
4690 		}
4691 		dma_pool_destroy(mrioc->chain_buf_pool);
4692 		mrioc->chain_buf_pool = NULL;
4693 	}
4694 
4695 	kfree(mrioc->chain_sgl_list);
4696 	mrioc->chain_sgl_list = NULL;
4697 
4698 	if (mrioc->admin_reply_base) {
4699 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4700 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
4701 		mrioc->admin_reply_base = NULL;
4702 	}
4703 	if (mrioc->admin_req_base) {
4704 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4705 		    mrioc->admin_req_base, mrioc->admin_req_dma);
4706 		mrioc->admin_req_base = NULL;
4707 	}
4708 	if (mrioc->cfg_page) {
4709 		dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
4710 		    mrioc->cfg_page, mrioc->cfg_page_dma);
4711 		mrioc->cfg_page = NULL;
4712 	}
4713 	if (mrioc->pel_seqnum_virt) {
4714 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4715 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4716 		mrioc->pel_seqnum_virt = NULL;
4717 	}
4718 
4719 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4720 		diag_buffer = &mrioc->diag_buffers[i];
4721 		if (diag_buffer->addr) {
4722 			dma_free_coherent(&mrioc->pdev->dev,
4723 			    diag_buffer->size, diag_buffer->addr,
4724 			    diag_buffer->dma_addr);
4725 			diag_buffer->addr = NULL;
4726 			diag_buffer->size = 0;
4727 			diag_buffer->type = 0;
4728 			diag_buffer->status = 0;
4729 		}
4730 	}
4731 
4732 	kfree(mrioc->throttle_groups);
4733 	mrioc->throttle_groups = NULL;
4734 
4735 	kfree(mrioc->logdata_buf);
4736 	mrioc->logdata_buf = NULL;
4737 
4738 }
4739 
4740 /**
4741  * mpi3mr_issue_ioc_shutdown - shutdown controller
4742  * @mrioc: Adapter instance reference
4743  *
4744  * Send shutodwn notification to the controller and wait for the
4745  * shutdown_timeout for it to be completed.
4746  *
4747  * Return: Nothing.
4748  */
4749 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4750 {
4751 	u32 ioc_config, ioc_status;
4752 	u8 retval = 1;
4753 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4754 
4755 	ioc_info(mrioc, "Issuing shutdown Notification\n");
4756 	if (mrioc->unrecoverable) {
4757 		ioc_warn(mrioc,
4758 		    "IOC is unrecoverable shutdown is not issued\n");
4759 		return;
4760 	}
4761 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4762 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4763 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4764 		ioc_info(mrioc, "shutdown already in progress\n");
4765 		return;
4766 	}
4767 
4768 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4769 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4770 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4771 
4772 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4773 
4774 	if (mrioc->facts.shutdown_timeout)
4775 		timeout = mrioc->facts.shutdown_timeout * 10;
4776 
4777 	do {
4778 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4779 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4780 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4781 			retval = 0;
4782 			break;
4783 		}
4784 		msleep(100);
4785 	} while (--timeout);
4786 
4787 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4788 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4789 
4790 	if (retval) {
4791 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4792 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4793 			ioc_warn(mrioc,
4794 			    "shutdown still in progress after timeout\n");
4795 	}
4796 
4797 	ioc_info(mrioc,
4798 	    "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
4799 	    (!retval) ? "successful" : "failed", ioc_status,
4800 	    ioc_config);
4801 }
4802 
4803 /**
4804  * mpi3mr_cleanup_ioc - Cleanup controller
4805  * @mrioc: Adapter instance reference
4806  *
4807  * controller cleanup handler, Message unit reset or soft reset
4808  * and shutdown notification is issued to the controller.
4809  *
4810  * Return: Nothing.
4811  */
4812 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4813 {
4814 	enum mpi3mr_iocstate ioc_state;
4815 
4816 	dprint_exit(mrioc, "cleaning up the controller\n");
4817 	mpi3mr_ioc_disable_intr(mrioc);
4818 
4819 	ioc_state = mpi3mr_get_iocstate(mrioc);
4820 
4821 	if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
4822 	    !mrioc->pci_err_recovery &&
4823 	    (ioc_state == MRIOC_STATE_READY)) {
4824 		if (mpi3mr_issue_and_process_mur(mrioc,
4825 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
4826 			mpi3mr_issue_reset(mrioc,
4827 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4828 			    MPI3MR_RESET_FROM_MUR_FAILURE);
4829 		mpi3mr_issue_ioc_shutdown(mrioc);
4830 	}
4831 	dprint_exit(mrioc, "controller cleanup completed\n");
4832 }
4833 
4834 /**
4835  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4836  * @mrioc: Adapter instance reference
4837  * @cmdptr: Internal command tracker
4838  *
4839  * Complete an internal driver commands with state indicating it
4840  * is completed due to reset.
4841  *
4842  * Return: Nothing.
4843  */
4844 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
4845 	struct mpi3mr_drv_cmd *cmdptr)
4846 {
4847 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
4848 		cmdptr->state |= MPI3MR_CMD_RESET;
4849 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
4850 		if (cmdptr->is_waiting) {
4851 			complete(&cmdptr->done);
4852 			cmdptr->is_waiting = 0;
4853 		} else if (cmdptr->callback)
4854 			cmdptr->callback(mrioc, cmdptr);
4855 	}
4856 }
4857 
4858 /**
4859  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
4860  * @mrioc: Adapter instance reference
4861  *
4862  * Flush all internal driver commands post reset
4863  *
4864  * Return: Nothing.
4865  */
4866 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
4867 {
4868 	struct mpi3mr_drv_cmd *cmdptr;
4869 	u8 i;
4870 
4871 	cmdptr = &mrioc->init_cmds;
4872 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4873 
4874 	cmdptr = &mrioc->cfg_cmds;
4875 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4876 
4877 	cmdptr = &mrioc->bsg_cmds;
4878 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4879 	cmdptr = &mrioc->host_tm_cmds;
4880 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4881 
4882 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4883 		cmdptr = &mrioc->dev_rmhs_cmds[i];
4884 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4885 	}
4886 
4887 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4888 		cmdptr = &mrioc->evtack_cmds[i];
4889 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4890 	}
4891 
4892 	cmdptr = &mrioc->pel_cmds;
4893 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4894 
4895 	cmdptr = &mrioc->pel_abort_cmd;
4896 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4897 
4898 	cmdptr = &mrioc->transport_cmds;
4899 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4900 }
4901 
4902 /**
4903  * mpi3mr_pel_wait_post - Issue PEL Wait
4904  * @mrioc: Adapter instance reference
4905  * @drv_cmd: Internal command tracker
4906  *
4907  * Issue PEL Wait MPI request through admin queue and return.
4908  *
4909  * Return: Nothing.
4910  */
4911 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
4912 	struct mpi3mr_drv_cmd *drv_cmd)
4913 {
4914 	struct mpi3_pel_req_action_wait pel_wait;
4915 
4916 	mrioc->pel_abort_requested = false;
4917 
4918 	memset(&pel_wait, 0, sizeof(pel_wait));
4919 	drv_cmd->state = MPI3MR_CMD_PENDING;
4920 	drv_cmd->is_waiting = 0;
4921 	drv_cmd->callback = mpi3mr_pel_wait_complete;
4922 	drv_cmd->ioc_status = 0;
4923 	drv_cmd->ioc_loginfo = 0;
4924 	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4925 	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4926 	pel_wait.action = MPI3_PEL_ACTION_WAIT;
4927 	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
4928 	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
4929 	pel_wait.class = cpu_to_le16(mrioc->pel_class);
4930 	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
4931 	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
4932 	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
4933 
4934 	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
4935 		dprint_bsg_err(mrioc,
4936 			    "Issuing PELWait: Admin post failed\n");
4937 		drv_cmd->state = MPI3MR_CMD_NOTUSED;
4938 		drv_cmd->callback = NULL;
4939 		drv_cmd->retry_count = 0;
4940 		mrioc->pel_enabled = false;
4941 	}
4942 }
4943 
4944 /**
4945  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
4946  * @mrioc: Adapter instance reference
4947  * @drv_cmd: Internal command tracker
4948  *
4949  * Issue PEL get sequence number MPI request through admin queue
4950  * and return.
4951  *
4952  * Return: 0 on success, non-zero on failure.
4953  */
4954 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
4955 	struct mpi3mr_drv_cmd *drv_cmd)
4956 {
4957 	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
4958 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
4959 	int retval = 0;
4960 
4961 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
4962 	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
4963 	mrioc->pel_cmds.is_waiting = 0;
4964 	mrioc->pel_cmds.ioc_status = 0;
4965 	mrioc->pel_cmds.ioc_loginfo = 0;
4966 	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
4967 	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
4968 	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
4969 	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
4970 	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
4971 	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
4972 
4973 	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
4974 			sizeof(pel_getseq_req), 0);
4975 	if (retval) {
4976 		if (drv_cmd) {
4977 			drv_cmd->state = MPI3MR_CMD_NOTUSED;
4978 			drv_cmd->callback = NULL;
4979 			drv_cmd->retry_count = 0;
4980 		}
4981 		mrioc->pel_enabled = false;
4982 	}
4983 
4984 	return retval;
4985 }
4986 
4987 /**
4988  * mpi3mr_pel_wait_complete - PELWait Completion callback
4989  * @mrioc: Adapter instance reference
4990  * @drv_cmd: Internal command tracker
4991  *
4992  * This is a callback handler for the PELWait request and
4993  * firmware completes a PELWait request when it is aborted or a
4994  * new PEL entry is available. This sends AEN to the application
4995  * and if the PELwait completion is not due to PELAbort then
4996  * this will send a request for new PEL Sequence number
4997  *
4998  * Return: Nothing.
4999  */
5000 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5001 	struct mpi3mr_drv_cmd *drv_cmd)
5002 {
5003 	struct mpi3_pel_reply *pel_reply = NULL;
5004 	u16 ioc_status, pe_log_status;
5005 	bool do_retry = false;
5006 
5007 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5008 		goto cleanup_drv_cmd;
5009 
5010 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5011 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5012 		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5013 			__func__, ioc_status, drv_cmd->ioc_loginfo);
5014 		dprint_bsg_err(mrioc,
5015 		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5016 		    ioc_status, drv_cmd->ioc_loginfo);
5017 		do_retry = true;
5018 	}
5019 
5020 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5021 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5022 
5023 	if (!pel_reply) {
5024 		dprint_bsg_err(mrioc,
5025 		    "pel_wait: failed due to no reply\n");
5026 		goto out_failed;
5027 	}
5028 
5029 	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5030 	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5031 	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5032 		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5033 			__func__, pe_log_status);
5034 		dprint_bsg_err(mrioc,
5035 		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
5036 		    pe_log_status);
5037 		do_retry = true;
5038 	}
5039 
5040 	if (do_retry) {
5041 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5042 			drv_cmd->retry_count++;
5043 			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5044 			    drv_cmd->retry_count);
5045 			mpi3mr_pel_wait_post(mrioc, drv_cmd);
5046 			return;
5047 		}
5048 		dprint_bsg_err(mrioc,
5049 		    "pel_wait: failed after all retries(%d)\n",
5050 		    drv_cmd->retry_count);
5051 		goto out_failed;
5052 	}
5053 	atomic64_inc(&event_counter);
5054 	if (!mrioc->pel_abort_requested) {
5055 		mrioc->pel_cmds.retry_count = 0;
5056 		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5057 	}
5058 
5059 	return;
5060 out_failed:
5061 	mrioc->pel_enabled = false;
5062 cleanup_drv_cmd:
5063 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5064 	drv_cmd->callback = NULL;
5065 	drv_cmd->retry_count = 0;
5066 }
5067 
5068 /**
5069  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5070  * @mrioc: Adapter instance reference
5071  * @drv_cmd: Internal command tracker
5072  *
5073  * This is a callback handler for the PEL get sequence number
5074  * request and a new PEL wait request will be issued to the
5075  * firmware from this
5076  *
5077  * Return: Nothing.
5078  */
5079 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5080 	struct mpi3mr_drv_cmd *drv_cmd)
5081 {
5082 	struct mpi3_pel_reply *pel_reply = NULL;
5083 	struct mpi3_pel_seq *pel_seqnum_virt;
5084 	u16 ioc_status;
5085 	bool do_retry = false;
5086 
5087 	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5088 
5089 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5090 		goto cleanup_drv_cmd;
5091 
5092 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5093 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5094 		dprint_bsg_err(mrioc,
5095 		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5096 		    ioc_status, drv_cmd->ioc_loginfo);
5097 		do_retry = true;
5098 	}
5099 
5100 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5101 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5102 	if (!pel_reply) {
5103 		dprint_bsg_err(mrioc,
5104 		    "pel_get_seqnum: failed due to no reply\n");
5105 		goto out_failed;
5106 	}
5107 
5108 	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5109 		dprint_bsg_err(mrioc,
5110 		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5111 		    le16_to_cpu(pel_reply->pe_log_status));
5112 		do_retry = true;
5113 	}
5114 
5115 	if (do_retry) {
5116 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5117 			drv_cmd->retry_count++;
5118 			dprint_bsg_err(mrioc,
5119 			    "pel_get_seqnum: retrying(%d)\n",
5120 			    drv_cmd->retry_count);
5121 			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5122 			return;
5123 		}
5124 
5125 		dprint_bsg_err(mrioc,
5126 		    "pel_get_seqnum: failed after all retries(%d)\n",
5127 		    drv_cmd->retry_count);
5128 		goto out_failed;
5129 	}
5130 	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5131 	drv_cmd->retry_count = 0;
5132 	mpi3mr_pel_wait_post(mrioc, drv_cmd);
5133 
5134 	return;
5135 out_failed:
5136 	mrioc->pel_enabled = false;
5137 cleanup_drv_cmd:
5138 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5139 	drv_cmd->callback = NULL;
5140 	drv_cmd->retry_count = 0;
5141 }
5142 
5143 /**
5144  * mpi3mr_soft_reset_handler - Reset the controller
5145  * @mrioc: Adapter instance reference
5146  * @reset_reason: Reset reason code
5147  * @snapdump: Flag to generate snapdump in firmware or not
5148  *
5149  * This is an handler for recovering controller by issuing soft
5150  * reset are diag fault reset.  This is a blocking function and
5151  * when one reset is executed if any other resets they will be
5152  * blocked. All BSG requests will be blocked during the reset. If
5153  * controller reset is successful then the controller will be
5154  * reinitalized, otherwise the controller will be marked as not
5155  * recoverable
5156  *
5157  * In snapdump bit is set, the controller is issued with diag
5158  * fault reset so that the firmware can create a snap dump and
5159  * post that the firmware will result in F000 fault and the
5160  * driver will issue soft reset to recover from that.
5161  *
5162  * Return: 0 on success, non-zero on failure.
5163  */
5164 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5165 	u16 reset_reason, u8 snapdump)
5166 {
5167 	int retval = 0, i;
5168 	unsigned long flags;
5169 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5170 	union mpi3mr_trigger_data trigger_data;
5171 
5172 	/* Block the reset handler until diag save in progress*/
5173 	dprint_reset(mrioc,
5174 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5175 	    mrioc->diagsave_timeout);
5176 	while (mrioc->diagsave_timeout)
5177 		ssleep(1);
5178 	/*
5179 	 * Block new resets until the currently executing one is finished and
5180 	 * return the status of the existing reset for all blocked resets
5181 	 */
5182 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5183 	if (!mutex_trylock(&mrioc->reset_mutex)) {
5184 		ioc_info(mrioc,
5185 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
5186 		    mpi3mr_reset_rc_name(reset_reason));
5187 		do {
5188 			ssleep(1);
5189 		} while (mrioc->reset_in_progress == 1);
5190 		ioc_info(mrioc,
5191 		    "returning previous reset result(%d) for the reset triggered by %s\n",
5192 		    mrioc->prev_reset_result,
5193 		    mpi3mr_reset_rc_name(reset_reason));
5194 		return mrioc->prev_reset_result;
5195 	}
5196 	ioc_info(mrioc, "controller reset is triggered by %s\n",
5197 	    mpi3mr_reset_rc_name(reset_reason));
5198 
5199 	mrioc->device_refresh_on = 0;
5200 	mrioc->reset_in_progress = 1;
5201 	mrioc->stop_bsgs = 1;
5202 	mrioc->prev_reset_result = -1;
5203 	memset(&trigger_data, 0, sizeof(trigger_data));
5204 
5205 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5206 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5207 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5208 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5209 		    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5210 		dprint_reset(mrioc,
5211 		    "soft_reset_handler: releasing host diagnostic buffers\n");
5212 		mpi3mr_release_diag_bufs(mrioc, 0);
5213 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5214 			mrioc->event_masks[i] = -1;
5215 
5216 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5217 		mpi3mr_issue_event_notification(mrioc);
5218 	}
5219 
5220 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5221 
5222 	mpi3mr_ioc_disable_intr(mrioc);
5223 
5224 	if (snapdump) {
5225 		mpi3mr_set_diagsave(mrioc);
5226 		retval = mpi3mr_issue_reset(mrioc,
5227 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5228 		if (!retval) {
5229 			trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5230 				      MPI3_SYSIF_FAULT_CODE_MASK);
5231 			do {
5232 				host_diagnostic =
5233 				    readl(&mrioc->sysif_regs->host_diagnostic);
5234 				if (!(host_diagnostic &
5235 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5236 					break;
5237 				msleep(100);
5238 			} while (--timeout);
5239 			mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5240 			    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5241 		}
5242 	}
5243 
5244 	retval = mpi3mr_issue_reset(mrioc,
5245 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5246 	if (retval) {
5247 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5248 		goto out;
5249 	}
5250 	if (mrioc->num_io_throttle_group !=
5251 	    mrioc->facts.max_io_throttle_group) {
5252 		ioc_err(mrioc,
5253 		    "max io throttle group doesn't match old(%d), new(%d)\n",
5254 		    mrioc->num_io_throttle_group,
5255 		    mrioc->facts.max_io_throttle_group);
5256 		retval = -EPERM;
5257 		goto out;
5258 	}
5259 
5260 	mpi3mr_flush_delayed_cmd_lists(mrioc);
5261 	mpi3mr_flush_drv_cmds(mrioc);
5262 	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5263 	bitmap_clear(mrioc->removepend_bitmap, 0,
5264 		     mrioc->dev_handle_bitmap_bits);
5265 	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5266 	mpi3mr_flush_host_io(mrioc);
5267 	mpi3mr_cleanup_fwevt_list(mrioc);
5268 	mpi3mr_invalidate_devhandles(mrioc);
5269 	mpi3mr_free_enclosure_list(mrioc);
5270 
5271 	if (mrioc->prepare_for_reset) {
5272 		mrioc->prepare_for_reset = 0;
5273 		mrioc->prepare_for_reset_timeout_counter = 0;
5274 	}
5275 	mpi3mr_memset_buffers(mrioc);
5276 	mpi3mr_release_diag_bufs(mrioc, 1);
5277 	mrioc->fw_release_trigger_active = false;
5278 	mrioc->trace_release_trigger_active = false;
5279 	mrioc->snapdump_trigger_active = false;
5280 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5281 	    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5282 
5283 	dprint_reset(mrioc,
5284 	    "soft_reset_handler: reinitializing the controller\n");
5285 	retval = mpi3mr_reinit_ioc(mrioc, 0);
5286 	if (retval) {
5287 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5288 		    mrioc->name, reset_reason);
5289 		goto out;
5290 	}
5291 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5292 
5293 out:
5294 	if (!retval) {
5295 		mrioc->diagsave_timeout = 0;
5296 		mrioc->reset_in_progress = 0;
5297 		mrioc->pel_abort_requested = 0;
5298 		if (mrioc->pel_enabled) {
5299 			mrioc->pel_cmds.retry_count = 0;
5300 			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5301 		}
5302 
5303 		mrioc->device_refresh_on = 0;
5304 
5305 		mrioc->ts_update_counter = 0;
5306 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5307 		if (mrioc->watchdog_work_q)
5308 			queue_delayed_work(mrioc->watchdog_work_q,
5309 			    &mrioc->watchdog_work,
5310 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5311 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5312 		mrioc->stop_bsgs = 0;
5313 		if (mrioc->pel_enabled)
5314 			atomic64_inc(&event_counter);
5315 	} else {
5316 		mpi3mr_issue_reset(mrioc,
5317 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5318 		mrioc->device_refresh_on = 0;
5319 		mrioc->unrecoverable = 1;
5320 		mrioc->reset_in_progress = 0;
5321 		mrioc->stop_bsgs = 0;
5322 		retval = -1;
5323 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5324 	}
5325 	mrioc->prev_reset_result = retval;
5326 	mutex_unlock(&mrioc->reset_mutex);
5327 	ioc_info(mrioc, "controller reset is %s\n",
5328 	    ((retval == 0) ? "successful" : "failed"));
5329 	return retval;
5330 }
5331 
5332 
5333 /**
5334  * mpi3mr_free_config_dma_memory - free memory for config page
5335  * @mrioc: Adapter instance reference
5336  * @mem_desc: memory descriptor structure
5337  *
5338  * Check whether the size of the buffer specified by the memory
5339  * descriptor is greater than the default page size if so then
5340  * free the memory pointed by the descriptor.
5341  *
5342  * Return: Nothing.
5343  */
5344 static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
5345 	struct dma_memory_desc *mem_desc)
5346 {
5347 	if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
5348 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
5349 		    mem_desc->addr, mem_desc->dma_addr);
5350 		mem_desc->addr = NULL;
5351 	}
5352 }
5353 
5354 /**
5355  * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
5356  * @mrioc: Adapter instance reference
5357  * @mem_desc: Memory descriptor to hold dma memory info
5358  *
5359  * This function allocates new dmaable memory or provides the
5360  * default config page dmaable memory based on the memory size
5361  * described by the descriptor.
5362  *
5363  * Return: 0 on success, non-zero on failure.
5364  */
5365 static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
5366 	struct dma_memory_desc *mem_desc)
5367 {
5368 	if (mem_desc->size > mrioc->cfg_page_sz) {
5369 		mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
5370 		    mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
5371 		if (!mem_desc->addr)
5372 			return -ENOMEM;
5373 	} else {
5374 		mem_desc->addr = mrioc->cfg_page;
5375 		mem_desc->dma_addr = mrioc->cfg_page_dma;
5376 		memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
5377 	}
5378 	return 0;
5379 }
5380 
5381 /**
5382  * mpi3mr_post_cfg_req - Issue config requests and wait
5383  * @mrioc: Adapter instance reference
5384  * @cfg_req: Configuration request
5385  * @timeout: Timeout in seconds
5386  * @ioc_status: Pointer to return ioc status
5387  *
5388  * A generic function for posting MPI3 configuration request to
5389  * the firmware. This blocks for the completion of request for
5390  * timeout seconds and if the request times out this function
5391  * faults the controller with proper reason code.
5392  *
5393  * On successful completion of the request this function returns
5394  * appropriate ioc status from the firmware back to the caller.
5395  *
5396  * Return: 0 on success, non-zero on failure.
5397  */
5398 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5399 	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5400 {
5401 	int retval = 0;
5402 
5403 	mutex_lock(&mrioc->cfg_cmds.mutex);
5404 	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5405 		retval = -1;
5406 		ioc_err(mrioc, "sending config request failed due to command in use\n");
5407 		mutex_unlock(&mrioc->cfg_cmds.mutex);
5408 		goto out;
5409 	}
5410 	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5411 	mrioc->cfg_cmds.is_waiting = 1;
5412 	mrioc->cfg_cmds.callback = NULL;
5413 	mrioc->cfg_cmds.ioc_status = 0;
5414 	mrioc->cfg_cmds.ioc_loginfo = 0;
5415 
5416 	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5417 	cfg_req->function = MPI3_FUNCTION_CONFIG;
5418 
5419 	init_completion(&mrioc->cfg_cmds.done);
5420 	dprint_cfg_info(mrioc, "posting config request\n");
5421 	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5422 		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5423 		    "mpi3_cfg_req");
5424 	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5425 	if (retval) {
5426 		ioc_err(mrioc, "posting config request failed\n");
5427 		goto out_unlock;
5428 	}
5429 	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5430 	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5431 		mpi3mr_check_rh_fault_ioc(mrioc,
5432 		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5433 		ioc_err(mrioc, "config request timed out\n");
5434 		retval = -1;
5435 		goto out_unlock;
5436 	}
5437 	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5438 	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5439 		dprint_cfg_err(mrioc,
5440 		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5441 		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5442 
5443 out_unlock:
5444 	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5445 	mutex_unlock(&mrioc->cfg_cmds.mutex);
5446 
5447 out:
5448 	return retval;
5449 }
5450 
5451 /**
5452  * mpi3mr_process_cfg_req - config page request processor
5453  * @mrioc: Adapter instance reference
5454  * @cfg_req: Configuration request
5455  * @cfg_hdr: Configuration page header
5456  * @timeout: Timeout in seconds
5457  * @ioc_status: Pointer to return ioc status
5458  * @cfg_buf: Memory pointer to copy config page or header
5459  * @cfg_buf_sz: Size of the memory to get config page or header
5460  *
5461  * This is handler for config page read, write and config page
5462  * header read operations.
5463  *
5464  * This function expects the cfg_req to be populated with page
5465  * type, page number, action for the header read and with page
5466  * address for all other operations.
5467  *
5468  * The cfg_hdr can be passed as null for reading required header
5469  * details for read/write pages the cfg_hdr should point valid
5470  * configuration page header.
5471  *
5472  * This allocates dmaable memory based on the size of the config
5473  * buffer and set the SGE of the cfg_req.
5474  *
5475  * For write actions, the config page data has to be passed in
5476  * the cfg_buf and size of the data has to be mentioned in the
5477  * cfg_buf_sz.
5478  *
5479  * For read/header actions, on successful completion of the
5480  * request with successful ioc_status the data will be copied
5481  * into the cfg_buf limited to a minimum of actual page size and
5482  * cfg_buf_sz
5483  *
5484  *
5485  * Return: 0 on success, non-zero on failure.
5486  */
5487 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5488 	struct mpi3_config_request *cfg_req,
5489 	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5490 	void *cfg_buf, u32 cfg_buf_sz)
5491 {
5492 	struct dma_memory_desc mem_desc;
5493 	int retval = -1;
5494 	u8 invalid_action = 0;
5495 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5496 
5497 	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5498 
5499 	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5500 		mem_desc.size = sizeof(struct mpi3_config_page_header);
5501 	else {
5502 		if (!cfg_hdr) {
5503 			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5504 			    cfg_req->action, cfg_req->page_type,
5505 			    cfg_req->page_number);
5506 			goto out;
5507 		}
5508 		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5509 		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5510 			if (cfg_req->action
5511 			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5512 				invalid_action = 1;
5513 			break;
5514 		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5515 			if ((cfg_req->action ==
5516 			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5517 			    (cfg_req->action ==
5518 			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5519 				invalid_action = 1;
5520 			break;
5521 		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5522 		default:
5523 			break;
5524 		}
5525 		if (invalid_action) {
5526 			ioc_err(mrioc,
5527 			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5528 			    cfg_req->action, cfg_req->page_type,
5529 			    cfg_req->page_number, cfg_hdr->page_attribute);
5530 			goto out;
5531 		}
5532 		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5533 		cfg_req->page_length = cfg_hdr->page_length;
5534 		cfg_req->page_version = cfg_hdr->page_version;
5535 	}
5536 	if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
5537 		goto out;
5538 
5539 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5540 	    mem_desc.dma_addr);
5541 
5542 	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5543 	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5544 		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5545 		    cfg_buf_sz));
5546 		dprint_cfg_info(mrioc, "config buffer to be written\n");
5547 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5548 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5549 	}
5550 
5551 	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5552 		goto out;
5553 
5554 	retval = 0;
5555 	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5556 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5557 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5558 		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5559 		    cfg_buf_sz));
5560 		dprint_cfg_info(mrioc, "config buffer read\n");
5561 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5562 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5563 	}
5564 
5565 out:
5566 	mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
5567 	return retval;
5568 }
5569 
5570 /**
5571  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5572  * @mrioc: Adapter instance reference
5573  * @ioc_status: Pointer to return ioc status
5574  * @dev_pg0: Pointer to return device page 0
5575  * @pg_sz: Size of the memory allocated to the page pointer
5576  * @form: The form to be used for addressing the page
5577  * @form_spec: Form specific information like device handle
5578  *
5579  * This is handler for config page read for a specific device
5580  * page0. The ioc_status has the controller returned ioc_status.
5581  * This routine doesn't check ioc_status to decide whether the
5582  * page read is success or not and it is the callers
5583  * responsibility.
5584  *
5585  * Return: 0 on success, non-zero on failure.
5586  */
5587 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5588 	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5589 {
5590 	struct mpi3_config_page_header cfg_hdr;
5591 	struct mpi3_config_request cfg_req;
5592 	u32 page_address;
5593 
5594 	memset(dev_pg0, 0, pg_sz);
5595 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5596 	memset(&cfg_req, 0, sizeof(cfg_req));
5597 
5598 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5599 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5600 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5601 	cfg_req.page_number = 0;
5602 	cfg_req.page_address = 0;
5603 
5604 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5605 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5606 		ioc_err(mrioc, "device page0 header read failed\n");
5607 		goto out_failed;
5608 	}
5609 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5610 		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5611 		    *ioc_status);
5612 		goto out_failed;
5613 	}
5614 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5615 	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5616 	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5617 	cfg_req.page_address = cpu_to_le32(page_address);
5618 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5619 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5620 		ioc_err(mrioc, "device page0 read failed\n");
5621 		goto out_failed;
5622 	}
5623 	return 0;
5624 out_failed:
5625 	return -1;
5626 }
5627 
5628 
5629 /**
5630  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5631  * @mrioc: Adapter instance reference
5632  * @ioc_status: Pointer to return ioc status
5633  * @phy_pg0: Pointer to return SAS Phy page 0
5634  * @pg_sz: Size of the memory allocated to the page pointer
5635  * @form: The form to be used for addressing the page
5636  * @form_spec: Form specific information like phy number
5637  *
5638  * This is handler for config page read for a specific SAS Phy
5639  * page0. The ioc_status has the controller returned ioc_status.
5640  * This routine doesn't check ioc_status to decide whether the
5641  * page read is success or not and it is the callers
5642  * responsibility.
5643  *
5644  * Return: 0 on success, non-zero on failure.
5645  */
5646 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5647 	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5648 	u32 form_spec)
5649 {
5650 	struct mpi3_config_page_header cfg_hdr;
5651 	struct mpi3_config_request cfg_req;
5652 	u32 page_address;
5653 
5654 	memset(phy_pg0, 0, pg_sz);
5655 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5656 	memset(&cfg_req, 0, sizeof(cfg_req));
5657 
5658 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5659 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5660 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5661 	cfg_req.page_number = 0;
5662 	cfg_req.page_address = 0;
5663 
5664 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5665 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5666 		ioc_err(mrioc, "sas phy page0 header read failed\n");
5667 		goto out_failed;
5668 	}
5669 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5670 		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5671 		    *ioc_status);
5672 		goto out_failed;
5673 	}
5674 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5675 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5676 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5677 	cfg_req.page_address = cpu_to_le32(page_address);
5678 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5679 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5680 		ioc_err(mrioc, "sas phy page0 read failed\n");
5681 		goto out_failed;
5682 	}
5683 	return 0;
5684 out_failed:
5685 	return -1;
5686 }
5687 
5688 /**
5689  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5690  * @mrioc: Adapter instance reference
5691  * @ioc_status: Pointer to return ioc status
5692  * @phy_pg1: Pointer to return SAS Phy page 1
5693  * @pg_sz: Size of the memory allocated to the page pointer
5694  * @form: The form to be used for addressing the page
5695  * @form_spec: Form specific information like phy number
5696  *
5697  * This is handler for config page read for a specific SAS Phy
5698  * page1. The ioc_status has the controller returned ioc_status.
5699  * This routine doesn't check ioc_status to decide whether the
5700  * page read is success or not and it is the callers
5701  * responsibility.
5702  *
5703  * Return: 0 on success, non-zero on failure.
5704  */
5705 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5706 	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5707 	u32 form_spec)
5708 {
5709 	struct mpi3_config_page_header cfg_hdr;
5710 	struct mpi3_config_request cfg_req;
5711 	u32 page_address;
5712 
5713 	memset(phy_pg1, 0, pg_sz);
5714 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5715 	memset(&cfg_req, 0, sizeof(cfg_req));
5716 
5717 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5718 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5719 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5720 	cfg_req.page_number = 1;
5721 	cfg_req.page_address = 0;
5722 
5723 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5724 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5725 		ioc_err(mrioc, "sas phy page1 header read failed\n");
5726 		goto out_failed;
5727 	}
5728 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5729 		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5730 		    *ioc_status);
5731 		goto out_failed;
5732 	}
5733 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5734 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5735 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5736 	cfg_req.page_address = cpu_to_le32(page_address);
5737 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5738 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5739 		ioc_err(mrioc, "sas phy page1 read failed\n");
5740 		goto out_failed;
5741 	}
5742 	return 0;
5743 out_failed:
5744 	return -1;
5745 }
5746 
5747 
5748 /**
5749  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5750  * @mrioc: Adapter instance reference
5751  * @ioc_status: Pointer to return ioc status
5752  * @exp_pg0: Pointer to return SAS Expander page 0
5753  * @pg_sz: Size of the memory allocated to the page pointer
5754  * @form: The form to be used for addressing the page
5755  * @form_spec: Form specific information like device handle
5756  *
5757  * This is handler for config page read for a specific SAS
5758  * Expander page0. The ioc_status has the controller returned
5759  * ioc_status. This routine doesn't check ioc_status to decide
5760  * whether the page read is success or not and it is the callers
5761  * responsibility.
5762  *
5763  * Return: 0 on success, non-zero on failure.
5764  */
5765 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5766 	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5767 	u32 form_spec)
5768 {
5769 	struct mpi3_config_page_header cfg_hdr;
5770 	struct mpi3_config_request cfg_req;
5771 	u32 page_address;
5772 
5773 	memset(exp_pg0, 0, pg_sz);
5774 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5775 	memset(&cfg_req, 0, sizeof(cfg_req));
5776 
5777 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5778 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5779 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5780 	cfg_req.page_number = 0;
5781 	cfg_req.page_address = 0;
5782 
5783 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5784 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5785 		ioc_err(mrioc, "expander page0 header read failed\n");
5786 		goto out_failed;
5787 	}
5788 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5789 		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5790 		    *ioc_status);
5791 		goto out_failed;
5792 	}
5793 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5794 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5795 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5796 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5797 	cfg_req.page_address = cpu_to_le32(page_address);
5798 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5799 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
5800 		ioc_err(mrioc, "expander page0 read failed\n");
5801 		goto out_failed;
5802 	}
5803 	return 0;
5804 out_failed:
5805 	return -1;
5806 }
5807 
5808 /**
5809  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
5810  * @mrioc: Adapter instance reference
5811  * @ioc_status: Pointer to return ioc status
5812  * @exp_pg1: Pointer to return SAS Expander page 1
5813  * @pg_sz: Size of the memory allocated to the page pointer
5814  * @form: The form to be used for addressing the page
5815  * @form_spec: Form specific information like phy number
5816  *
5817  * This is handler for config page read for a specific SAS
5818  * Expander page1. The ioc_status has the controller returned
5819  * ioc_status. This routine doesn't check ioc_status to decide
5820  * whether the page read is success or not and it is the callers
5821  * responsibility.
5822  *
5823  * Return: 0 on success, non-zero on failure.
5824  */
5825 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5826 	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
5827 	u32 form_spec)
5828 {
5829 	struct mpi3_config_page_header cfg_hdr;
5830 	struct mpi3_config_request cfg_req;
5831 	u32 page_address;
5832 
5833 	memset(exp_pg1, 0, pg_sz);
5834 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5835 	memset(&cfg_req, 0, sizeof(cfg_req));
5836 
5837 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5838 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5839 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5840 	cfg_req.page_number = 1;
5841 	cfg_req.page_address = 0;
5842 
5843 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5844 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5845 		ioc_err(mrioc, "expander page1 header read failed\n");
5846 		goto out_failed;
5847 	}
5848 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5849 		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
5850 		    *ioc_status);
5851 		goto out_failed;
5852 	}
5853 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5854 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5855 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5856 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5857 	cfg_req.page_address = cpu_to_le32(page_address);
5858 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5859 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
5860 		ioc_err(mrioc, "expander page1 read failed\n");
5861 		goto out_failed;
5862 	}
5863 	return 0;
5864 out_failed:
5865 	return -1;
5866 }
5867 
5868 /**
5869  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
5870  * @mrioc: Adapter instance reference
5871  * @ioc_status: Pointer to return ioc status
5872  * @encl_pg0: Pointer to return Enclosure page 0
5873  * @pg_sz: Size of the memory allocated to the page pointer
5874  * @form: The form to be used for addressing the page
5875  * @form_spec: Form specific information like device handle
5876  *
5877  * This is handler for config page read for a specific Enclosure
5878  * page0. The ioc_status has the controller returned ioc_status.
5879  * This routine doesn't check ioc_status to decide whether the
5880  * page read is success or not and it is the callers
5881  * responsibility.
5882  *
5883  * Return: 0 on success, non-zero on failure.
5884  */
5885 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5886 	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
5887 	u32 form_spec)
5888 {
5889 	struct mpi3_config_page_header cfg_hdr;
5890 	struct mpi3_config_request cfg_req;
5891 	u32 page_address;
5892 
5893 	memset(encl_pg0, 0, pg_sz);
5894 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5895 	memset(&cfg_req, 0, sizeof(cfg_req));
5896 
5897 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5898 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5899 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
5900 	cfg_req.page_number = 0;
5901 	cfg_req.page_address = 0;
5902 
5903 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5904 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5905 		ioc_err(mrioc, "enclosure page0 header read failed\n");
5906 		goto out_failed;
5907 	}
5908 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5909 		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
5910 		    *ioc_status);
5911 		goto out_failed;
5912 	}
5913 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5914 	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
5915 	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
5916 	cfg_req.page_address = cpu_to_le32(page_address);
5917 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5918 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
5919 		ioc_err(mrioc, "enclosure page0 read failed\n");
5920 		goto out_failed;
5921 	}
5922 	return 0;
5923 out_failed:
5924 	return -1;
5925 }
5926 
5927 
5928 /**
5929  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
5930  * @mrioc: Adapter instance reference
5931  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
5932  * @pg_sz: Size of the memory allocated to the page pointer
5933  *
5934  * This is handler for config page read for the SAS IO Unit
5935  * page0. This routine checks ioc_status to decide whether the
5936  * page read is success or not.
5937  *
5938  * Return: 0 on success, non-zero on failure.
5939  */
5940 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
5941 	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
5942 {
5943 	struct mpi3_config_page_header cfg_hdr;
5944 	struct mpi3_config_request cfg_req;
5945 	u16 ioc_status = 0;
5946 
5947 	memset(sas_io_unit_pg0, 0, pg_sz);
5948 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5949 	memset(&cfg_req, 0, sizeof(cfg_req));
5950 
5951 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5952 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5953 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
5954 	cfg_req.page_number = 0;
5955 	cfg_req.page_address = 0;
5956 
5957 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5958 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5959 		ioc_err(mrioc, "sas io unit page0 header read failed\n");
5960 		goto out_failed;
5961 	}
5962 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5963 		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
5964 		    ioc_status);
5965 		goto out_failed;
5966 	}
5967 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5968 
5969 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5970 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
5971 		ioc_err(mrioc, "sas io unit page0 read failed\n");
5972 		goto out_failed;
5973 	}
5974 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5975 		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
5976 		    ioc_status);
5977 		goto out_failed;
5978 	}
5979 	return 0;
5980 out_failed:
5981 	return -1;
5982 }
5983 
5984 /**
5985  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
5986  * @mrioc: Adapter instance reference
5987  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
5988  * @pg_sz: Size of the memory allocated to the page pointer
5989  *
5990  * This is handler for config page read for the SAS IO Unit
5991  * page1. This routine checks ioc_status to decide whether the
5992  * page read is success or not.
5993  *
5994  * Return: 0 on success, non-zero on failure.
5995  */
5996 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
5997 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
5998 {
5999 	struct mpi3_config_page_header cfg_hdr;
6000 	struct mpi3_config_request cfg_req;
6001 	u16 ioc_status = 0;
6002 
6003 	memset(sas_io_unit_pg1, 0, pg_sz);
6004 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6005 	memset(&cfg_req, 0, sizeof(cfg_req));
6006 
6007 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6008 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6009 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6010 	cfg_req.page_number = 1;
6011 	cfg_req.page_address = 0;
6012 
6013 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6014 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6015 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6016 		goto out_failed;
6017 	}
6018 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6019 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6020 		    ioc_status);
6021 		goto out_failed;
6022 	}
6023 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6024 
6025 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6026 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6027 		ioc_err(mrioc, "sas io unit page1 read failed\n");
6028 		goto out_failed;
6029 	}
6030 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6031 		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6032 		    ioc_status);
6033 		goto out_failed;
6034 	}
6035 	return 0;
6036 out_failed:
6037 	return -1;
6038 }
6039 
6040 /**
6041  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6042  * @mrioc: Adapter instance reference
6043  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6044  * @pg_sz: Size of the memory allocated to the page pointer
6045  *
6046  * This is handler for config page write for the SAS IO Unit
6047  * page1. This routine checks ioc_status to decide whether the
6048  * page read is success or not. This will modify both current
6049  * and persistent page.
6050  *
6051  * Return: 0 on success, non-zero on failure.
6052  */
6053 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6054 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6055 {
6056 	struct mpi3_config_page_header cfg_hdr;
6057 	struct mpi3_config_request cfg_req;
6058 	u16 ioc_status = 0;
6059 
6060 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6061 	memset(&cfg_req, 0, sizeof(cfg_req));
6062 
6063 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6064 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6065 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6066 	cfg_req.page_number = 1;
6067 	cfg_req.page_address = 0;
6068 
6069 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6070 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6071 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6072 		goto out_failed;
6073 	}
6074 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6075 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6076 		    ioc_status);
6077 		goto out_failed;
6078 	}
6079 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6080 
6081 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6082 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6083 		ioc_err(mrioc, "sas io unit page1 write current failed\n");
6084 		goto out_failed;
6085 	}
6086 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6087 		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6088 		    ioc_status);
6089 		goto out_failed;
6090 	}
6091 
6092 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6093 
6094 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6095 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6096 		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6097 		goto out_failed;
6098 	}
6099 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6100 		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6101 		    ioc_status);
6102 		goto out_failed;
6103 	}
6104 	return 0;
6105 out_failed:
6106 	return -1;
6107 }
6108 
6109 /**
6110  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6111  * @mrioc: Adapter instance reference
6112  * @driver_pg1: Pointer to return Driver page 1
6113  * @pg_sz: Size of the memory allocated to the page pointer
6114  *
6115  * This is handler for config page read for the Driver page1.
6116  * This routine checks ioc_status to decide whether the page
6117  * read is success or not.
6118  *
6119  * Return: 0 on success, non-zero on failure.
6120  */
6121 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6122 	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6123 {
6124 	struct mpi3_config_page_header cfg_hdr;
6125 	struct mpi3_config_request cfg_req;
6126 	u16 ioc_status = 0;
6127 
6128 	memset(driver_pg1, 0, pg_sz);
6129 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6130 	memset(&cfg_req, 0, sizeof(cfg_req));
6131 
6132 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6133 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6134 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6135 	cfg_req.page_number = 1;
6136 	cfg_req.page_address = 0;
6137 
6138 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6139 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6140 		ioc_err(mrioc, "driver page1 header read failed\n");
6141 		goto out_failed;
6142 	}
6143 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6144 		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6145 		    ioc_status);
6146 		goto out_failed;
6147 	}
6148 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6149 
6150 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6151 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6152 		ioc_err(mrioc, "driver page1 read failed\n");
6153 		goto out_failed;
6154 	}
6155 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6156 		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6157 		    ioc_status);
6158 		goto out_failed;
6159 	}
6160 	return 0;
6161 out_failed:
6162 	return -1;
6163 }
6164 
6165 /**
6166  * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6167  * @mrioc: Adapter instance reference
6168  * @driver_pg2: Pointer to return driver page 2
6169  * @pg_sz: Size of the memory allocated to the page pointer
6170  * @page_action: Page action
6171  *
6172  * This is handler for config page read for the driver page2.
6173  * This routine checks ioc_status to decide whether the page
6174  * read is success or not.
6175  *
6176  * Return: 0 on success, non-zero on failure.
6177  */
6178 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6179 	struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6180 {
6181 	struct mpi3_config_page_header cfg_hdr;
6182 	struct mpi3_config_request cfg_req;
6183 	u16 ioc_status = 0;
6184 
6185 	memset(driver_pg2, 0, pg_sz);
6186 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6187 	memset(&cfg_req, 0, sizeof(cfg_req));
6188 
6189 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6190 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6191 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6192 	cfg_req.page_number = 2;
6193 	cfg_req.page_address = 0;
6194 	cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6195 
6196 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6197 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6198 		ioc_err(mrioc, "driver page2 header read failed\n");
6199 		goto out_failed;
6200 	}
6201 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6202 		ioc_err(mrioc, "driver page2 header read failed with\n"
6203 			       "ioc_status(0x%04x)\n",
6204 		    ioc_status);
6205 		goto out_failed;
6206 	}
6207 	cfg_req.action = page_action;
6208 
6209 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6210 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6211 		ioc_err(mrioc, "driver page2 read failed\n");
6212 		goto out_failed;
6213 	}
6214 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6215 		ioc_err(mrioc, "driver page2 read failed with\n"
6216 			       "ioc_status(0x%04x)\n",
6217 		    ioc_status);
6218 		goto out_failed;
6219 	}
6220 	return 0;
6221 out_failed:
6222 	return -1;
6223 }
6224 
6225