xref: /freebsd/sys/dev/mpi3mr/mpi3mr.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/cdefs.h>
45 #include <sys/types.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/module.h>
50 #include <sys/bus.h>
51 #include <sys/conf.h>
52 #include <sys/malloc.h>
53 #include <sys/sysctl.h>
54 #include <sys/uio.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
63 
64 #include <cam/cam.h>
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #include <cam/scsi/smp_all.h>
74 #include <sys/queue.h>
75 #include <sys/kthread.h>
76 #include "mpi3mr.h"
77 #include "mpi3mr_cam.h"
78 #include "mpi3mr_app.h"
79 
80 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
81 	U64 reply_dma);
82 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
83 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
84 	struct mpi3mr_drvr_cmd *drvrcmd);
85 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
86 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
87 	U32 reset_reason);
88 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
89 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
90 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
91 	struct mpi3mr_drvr_cmd *drv_cmd);
92 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
93 	struct mpi3mr_drvr_cmd *drv_cmd);
94 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
95 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
96 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
97 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
98 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
99 
100 void
101 mpi3mr_hexdump(void *buf, int sz, int format)
102 {
103         int i;
104         U32 *buf_loc = (U32 *)buf;
105 
106         for (i = 0; i < (sz / sizeof(U32)); i++) {
107                 if ((i % format) == 0) {
108                         if (i != 0)
109                                 printf("\n");
110                         printf("%08x: ", (i * 4));
111                 }
112                 printf("%08x ", buf_loc[i]);
113         }
114         printf("\n");
115 }
116 
117 void
118 init_completion(struct completion *completion)
119 {
120 	completion->done = 0;
121 }
122 
123 void
124 complete(struct completion *completion)
125 {
126 	completion->done = 1;
127 	wakeup(complete);
128 }
129 
130 void wait_for_completion_timeout(struct completion *completion,
131 	    U32 timeout)
132 {
133 	U32 count = timeout * 1000;
134 
135 	while ((completion->done == 0) && count) {
136                 DELAY(1000);
137 		count--;
138 	}
139 
140 	if (completion->done == 0) {
141 		printf("%s: Command is timedout\n", __func__);
142 		completion->done = 1;
143 	}
144 }
145 void wait_for_completion_timeout_tm(struct completion *completion,
146 	    U32 timeout, struct mpi3mr_softc *sc)
147 {
148 	U32 count = timeout * 1000;
149 
150 	while ((completion->done == 0) && count) {
151 		msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
152 		       "TM command", 1 * hz);
153 		count--;
154 	}
155 
156 	if (completion->done == 0) {
157 		printf("%s: Command is timedout\n", __func__);
158 		completion->done = 1;
159 	}
160 }
161 
162 
163 void
164 poll_for_command_completion(struct mpi3mr_softc *sc,
165        struct mpi3mr_drvr_cmd *cmd, U16 wait)
166 {
167 	int wait_time = wait * 1000;
168        while (wait_time) {
169                mpi3mr_complete_admin_cmd(sc);
170                if (cmd->state & MPI3MR_CMD_COMPLETE)
171                        break;
172 	       DELAY(1000);
173                wait_time--;
174        }
175 }
176 
177 /**
178  * mpi3mr_trigger_snapdump - triggers firmware snapdump
179  * @sc: Adapter instance reference
180  * @reason_code: reason code for the fault.
181  *
182  * This routine will trigger the snapdump and wait for it to
183  * complete or timeout before it returns.
184  * This will be called during initilaization time faults/resets/timeouts
185  * before soft reset invocation.
186  *
187  * Return:  None.
188  */
189 static void
190 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
191 {
192 	U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
193 
194 	mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
195 	    mpi3mr_reset_rc_name(reason_code));
196 
197 	mpi3mr_set_diagsave(sc);
198 	mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
199 			   reason_code);
200 
201 	do {
202 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
203 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
204 			break;
205                 DELAY(100 * 1000);
206 	} while (--timeout);
207 
208 	return;
209 }
210 
211 /**
212  * mpi3mr_check_rh_fault_ioc - check reset history and fault
213  * controller
214  * @sc: Adapter instance reference
215  * @reason_code, reason code for the fault.
216  *
217  * This routine will fault the controller with
218  * the given reason code if it is not already in the fault or
219  * not asynchronosuly reset. This will be used to handle
220  * initilaization time faults/resets/timeout as in those cases
221  * immediate soft reset invocation is not required.
222  *
223  * Return:  None.
224  */
225 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U32 reason_code)
226 {
227 	U32 ioc_status;
228 
229 	if (sc->unrecoverable) {
230 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
231 		return;
232 	}
233 
234 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
235 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
236 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
237 		mpi3mr_print_fault_info(sc);
238 		return;
239 	}
240 
241 	mpi3mr_trigger_snapdump(sc, reason_code);
242 
243 	return;
244 }
245 
246 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
247     bus_addr_t phys_addr)
248 {
249 	if (!phys_addr)
250 		return NULL;
251 	if ((phys_addr < sc->reply_buf_dma_min_address) ||
252 	    (phys_addr > sc->reply_buf_dma_max_address))
253 		return NULL;
254 
255 	return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
256 }
257 
258 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
259     bus_addr_t phys_addr)
260 {
261 	if (!phys_addr)
262 		return NULL;
263 	return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
264 }
265 
266 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
267     U64 reply_dma)
268 {
269 	U32 old_idx = 0;
270 
271 	mtx_lock_spin(&sc->reply_free_q_lock);
272 	old_idx  =  sc->reply_free_q_host_index;
273 	sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
274 	    (sc->reply_free_q_sz - 1)) ? 0 :
275 	    (sc->reply_free_q_host_index + 1));
276 	sc->reply_free_q[old_idx] = reply_dma;
277 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
278 		sc->reply_free_q_host_index);
279 	mtx_unlock_spin(&sc->reply_free_q_lock);
280 }
281 
282 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
283     U64 sense_buf_phys)
284 {
285 	U32 old_idx = 0;
286 
287 	mtx_lock_spin(&sc->sense_buf_q_lock);
288 	old_idx  =  sc->sense_buf_q_host_index;
289 	sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
290 	    (sc->sense_buf_q_sz - 1)) ? 0 :
291 	    (sc->sense_buf_q_host_index + 1));
292 	sc->sense_buf_q[old_idx] = sense_buf_phys;
293 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
294 		sc->sense_buf_q_host_index);
295 	mtx_unlock_spin(&sc->sense_buf_q_lock);
296 
297 }
298 
299 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
300 	struct mpi3mr_throttle_group_info *tg, U8 divert_value)
301 {
302 	struct mpi3mr_target *target;
303 
304 	mtx_lock_spin(&sc->target_lock);
305 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
306 		if (target->throttle_group == tg)
307 			target->io_divert = divert_value;
308 	}
309 	mtx_unlock_spin(&sc->target_lock);
310 }
311 
312 /**
313  * mpi3mr_submit_admin_cmd - Submit request to admin queue
314  * @mrioc: Adapter reference
315  * @admin_req: MPI3 request
316  * @admin_req_sz: Request size
317  *
318  * Post the MPI3 request into admin request queue and
319  * inform the controller, if the queue is full return
320  * appropriate error.
321  *
322  * Return: 0 on success, non-zero on failure.
323  */
324 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
325     U16 admin_req_sz)
326 {
327 	U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
328 	int retval = 0;
329 	U8 *areq_entry;
330 
331 	mtx_lock_spin(&sc->admin_req_lock);
332 	areq_pi = sc->admin_req_pi;
333 	areq_ci = sc->admin_req_ci;
334 	max_entries = sc->num_admin_reqs;
335 
336 	if (sc->unrecoverable)
337 		return -EFAULT;
338 
339 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
340 					   (areq_pi == (max_entries - 1)))) {
341 		printf(IOCNAME "AdminReqQ full condition detected\n",
342 		    sc->name);
343 		retval = -EAGAIN;
344 		goto out;
345 	}
346 	areq_entry = (U8 *)sc->admin_req + (areq_pi *
347 						     MPI3MR_AREQ_FRAME_SZ);
348 	memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
349 	memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
350 
351 	if (++areq_pi == max_entries)
352 		areq_pi = 0;
353 	sc->admin_req_pi = areq_pi;
354 
355 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
356 
357 out:
358 	mtx_unlock_spin(&sc->admin_req_lock);
359 	return retval;
360 }
361 
362 /**
363  * mpi3mr_check_req_qfull - Check request queue is full or not
364  * @op_req_q: Operational reply queue info
365  *
366  * Return: true when queue full, false otherwise.
367  */
368 static inline bool
369 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
370 {
371 	U16 pi, ci, max_entries;
372 	bool is_qfull = false;
373 
374 	pi = op_req_q->pi;
375 	ci = op_req_q->ci;
376 	max_entries = op_req_q->num_reqs;
377 
378 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
379 		is_qfull = true;
380 
381 	return is_qfull;
382 }
383 
384 /**
385  * mpi3mr_submit_io - Post IO command to firmware
386  * @sc:		      Adapter instance reference
387  * @op_req_q:	      Operational Request queue reference
388  * @req:	      MPT request data
389  *
390  * This function submits IO command to firmware.
391  *
392  * Return: Nothing
393  */
394 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
395     struct mpi3mr_op_req_queue *op_req_q, U8 *req)
396 {
397 	U16 pi, max_entries;
398 	int retval = 0;
399 	U8 *req_entry;
400 	U16 req_sz = sc->facts.op_req_sz;
401 	struct mpi3mr_irq_context *irq_ctx;
402 
403 	mtx_lock_spin(&op_req_q->q_lock);
404 
405 	pi = op_req_q->pi;
406 	max_entries = op_req_q->num_reqs;
407 	if (mpi3mr_check_req_qfull(op_req_q)) {
408 		irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
409 		mpi3mr_complete_io_cmd(sc, irq_ctx);
410 
411 		if (mpi3mr_check_req_qfull(op_req_q)) {
412 			printf(IOCNAME "OpReqQ full condition detected\n",
413 				sc->name);
414 			retval = -EBUSY;
415 			goto out;
416 		}
417 	}
418 
419 	req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
420 	memset(req_entry, 0, req_sz);
421 	memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
422 	if (++pi == max_entries)
423 		pi = 0;
424 	op_req_q->pi = pi;
425 
426 	mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
427 
428 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
429 	if (sc->mpi3mr_debug & MPI3MR_TRACE) {
430 		device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
431 		mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
432 	}
433 
434 out:
435 	mtx_unlock_spin(&op_req_q->q_lock);
436 	return retval;
437 }
438 
439 inline void
440 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
441 		     bus_addr_t dma_addr)
442 {
443 	Mpi3SGESimple_t *sgel = paddr;
444 
445 	sgel->Flags = flags;
446 	sgel->Length = (length);
447 	sgel->Address = (U64)dma_addr;
448 }
449 
450 void mpi3mr_build_zero_len_sge(void *paddr)
451 {
452 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
453 		MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
454 
455 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
456 
457 }
458 
459 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
460 {
461 	sc->intr_enabled = 1;
462 }
463 
464 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
465 {
466 	sc->intr_enabled = 0;
467 }
468 
469 void
470 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
471 {
472 	bus_addr_t *addr;
473 
474 	addr = arg;
475 	*addr = segs[0].ds_addr;
476 }
477 
478 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
479 {
480 	Mpi3DeleteReplyQueueRequest_t delq_req;
481 	struct mpi3mr_op_reply_queue *op_reply_q;
482 	int retval = 0;
483 
484 
485 	op_reply_q = &sc->op_reply_q[qid - 1];
486 
487 	if (!op_reply_q->qid)
488 	{
489 		retval = -1;
490 		printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
491 		    sc->name);
492 		goto out;
493 	}
494 
495 	memset(&delq_req, 0, sizeof(delq_req));
496 
497 	mtx_lock(&sc->init_cmds.completion.lock);
498 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
499 		retval = -1;
500 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
501 		    sc->name);
502 		mtx_unlock(&sc->init_cmds.completion.lock);
503 		goto out;
504 	}
505 
506 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
507 		retval = -1;
508 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
509 		    sc->name);
510 		goto out;
511 	}
512 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
513 	sc->init_cmds.is_waiting = 1;
514 	sc->init_cmds.callback = NULL;
515 	delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
516 	delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
517 	delq_req.QueueID = qid;
518 
519 	init_completion(&sc->init_cmds.completion);
520 	retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
521 	if (retval) {
522 		printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
523 		    sc->name);
524 		goto out_unlock;
525 	}
526 	wait_for_completion_timeout(&sc->init_cmds.completion,
527 	    (MPI3MR_INTADMCMD_TIMEOUT));
528 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
529 		printf(IOCNAME "Issue DelRepQ: command timed out\n",
530 		    sc->name);
531 		mpi3mr_check_rh_fault_ioc(sc,
532 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
533 		sc->unrecoverable = 1;
534 
535 		retval = -1;
536 		goto out_unlock;
537 	}
538 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
539 	     != MPI3_IOCSTATUS_SUCCESS ) {
540 		printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
541 		    " Loginfo(0x%08x) \n" , sc->name,
542 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
543 		    sc->init_cmds.ioc_loginfo);
544 		retval = -1;
545 		goto out_unlock;
546 	}
547 	sc->irq_ctx[qid - 1].op_reply_q = NULL;
548 
549 	if (sc->op_reply_q[qid - 1].q_base_phys != 0)
550 		bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
551 	if (sc->op_reply_q[qid - 1].q_base != NULL)
552 		bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
553 	if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
554 		bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
555 
556 	sc->op_reply_q[qid - 1].q_base = NULL;
557 	sc->op_reply_q[qid - 1].qid = 0;
558 out_unlock:
559 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
560 	mtx_unlock(&sc->init_cmds.completion.lock);
561 out:
562 	return retval;
563 }
564 
565 /**
566  * mpi3mr_create_op_reply_queue - create operational reply queue
567  * @sc: Adapter instance reference
568  * @qid: operational reply queue id
569  *
570  * Create operatinal reply queue by issuing MPI request
571  * through admin queue.
572  *
573  * Return:  0 on success, non-zero on failure.
574  */
575 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
576 {
577 	Mpi3CreateReplyQueueRequest_t create_req;
578 	struct mpi3mr_op_reply_queue *op_reply_q;
579 	int retval = 0;
580 	char q_lock_name[32];
581 
582 	op_reply_q = &sc->op_reply_q[qid - 1];
583 
584 	if (op_reply_q->qid)
585 	{
586 		retval = -1;
587 		printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
588 		    sc->name, op_reply_q->qid);
589 		return retval;
590 	}
591 
592 	op_reply_q->ci = 0;
593 	if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
594 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
595 	else
596 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
597 
598 	op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
599 	op_reply_q->ephase = 1;
600 
601         if (!op_reply_q->q_base) {
602 		snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
603 		mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
604 
605 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
606 					4, 0,			/* algnmnt, boundary */
607 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
608 					BUS_SPACE_MAXADDR,	/* highaddr */
609 					NULL, NULL,		/* filter, filterarg */
610 					op_reply_q->qsz,		/* maxsize */
611 					1,			/* nsegments */
612 					op_reply_q->qsz,		/* maxsegsize */
613 					0,			/* flags */
614 					NULL, NULL,		/* lockfunc, lockarg */
615 					&op_reply_q->q_base_tag)) {
616 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
617 			return (ENOMEM);
618 		}
619 
620 		if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
621 		    BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
622 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
623 			return (ENOMEM);
624 		}
625 		bzero(op_reply_q->q_base, op_reply_q->qsz);
626 		bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
627 		    mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, 0);
628 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
629 		    qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
630 
631 		if (!op_reply_q->q_base)
632 		{
633 			retval = -1;
634 			printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
635 			    sc->name, qid);
636 			goto out;
637 		}
638 	}
639 
640 	memset(&create_req, 0, sizeof(create_req));
641 
642 	mtx_lock(&sc->init_cmds.completion.lock);
643 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
644 		retval = -1;
645 		printf(IOCNAME "CreateRepQ: Init command is in use\n",
646 		    sc->name);
647 		mtx_unlock(&sc->init_cmds.completion.lock);
648 		goto out;
649 	}
650 
651 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
652 	sc->init_cmds.is_waiting = 1;
653 	sc->init_cmds.callback = NULL;
654 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
655 	create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
656 	create_req.QueueID = qid;
657 	create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
658 	create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
659 	create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
660 	create_req.Size = op_reply_q->num_replies;
661 
662 	init_completion(&sc->init_cmds.completion);
663 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
664 	    sizeof(create_req));
665 	if (retval) {
666 		printf(IOCNAME "CreateRepQ: Admin Post failed\n",
667 		    sc->name);
668 		goto out_unlock;
669 	}
670 
671 	wait_for_completion_timeout(&sc->init_cmds.completion,
672 	  	MPI3MR_INTADMCMD_TIMEOUT);
673 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
674 		printf(IOCNAME "CreateRepQ: command timed out\n",
675 		    sc->name);
676 		mpi3mr_check_rh_fault_ioc(sc,
677 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
678 		sc->unrecoverable = 1;
679 		retval = -1;
680 		goto out_unlock;
681 	}
682 
683 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
684 	     != MPI3_IOCSTATUS_SUCCESS ) {
685 		printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
686 		    " Loginfo(0x%08x) \n" , sc->name,
687 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
688 		    sc->init_cmds.ioc_loginfo);
689 		retval = -1;
690 		goto out_unlock;
691 	}
692 	op_reply_q->qid = qid;
693 	sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
694 
695 out_unlock:
696 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
697 	mtx_unlock(&sc->init_cmds.completion.lock);
698 out:
699 	if (retval) {
700 		if (op_reply_q->q_base_phys != 0)
701 			bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
702 		if (op_reply_q->q_base != NULL)
703 			bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
704 		if (op_reply_q->q_base_tag != NULL)
705 			bus_dma_tag_destroy(op_reply_q->q_base_tag);
706 		op_reply_q->q_base = NULL;
707 		op_reply_q->qid = 0;
708 	}
709 
710 	return retval;
711 }
712 
713 /**
714  * mpi3mr_create_op_req_queue - create operational request queue
715  * @sc: Adapter instance reference
716  * @req_qid: operational request queue id
717  * @reply_qid: Reply queue ID
718  *
719  * Create operatinal request queue by issuing MPI request
720  * through admin queue.
721  *
722  * Return:  0 on success, non-zero on failure.
723  */
724 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
725 {
726 	Mpi3CreateRequestQueueRequest_t create_req;
727 	struct mpi3mr_op_req_queue *op_req_q;
728 	int retval = 0;
729 	char q_lock_name[32];
730 
731 	op_req_q = &sc->op_req_q[req_qid - 1];
732 
733 	if (op_req_q->qid)
734 	{
735 		retval = -1;
736 		printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
737 		    sc->name, op_req_q->qid);
738 		return retval;
739 	}
740 
741 	op_req_q->ci = 0;
742 	op_req_q->pi = 0;
743 	op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
744 	op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
745 	op_req_q->reply_qid = reply_qid;
746 
747 	if (!op_req_q->q_base) {
748 		snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
749 		mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
750 
751 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
752 					4, 0,			/* algnmnt, boundary */
753 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
754 					BUS_SPACE_MAXADDR,	/* highaddr */
755 					NULL, NULL,		/* filter, filterarg */
756 					op_req_q->qsz,		/* maxsize */
757 					1,			/* nsegments */
758 					op_req_q->qsz,		/* maxsegsize */
759 					0,			/* flags */
760 					NULL, NULL,		/* lockfunc, lockarg */
761 					&op_req_q->q_base_tag)) {
762 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
763 			return (ENOMEM);
764 		}
765 
766 		if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
767 		    BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
768 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
769 			return (ENOMEM);
770 		}
771 
772 		bzero(op_req_q->q_base, op_req_q->qsz);
773 
774 		bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
775 		    mpi3mr_memaddr_cb, &op_req_q->q_base_phys, 0);
776 
777 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
778 		    req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
779 
780 		if (!op_req_q->q_base) {
781 			retval = -1;
782 			printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
783 			    sc->name, req_qid);
784 			goto out;
785 		}
786 	}
787 
788 	memset(&create_req, 0, sizeof(create_req));
789 
790 	mtx_lock(&sc->init_cmds.completion.lock);
791 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
792 		retval = -1;
793 		printf(IOCNAME "CreateReqQ: Init command is in use\n",
794 		    sc->name);
795 		mtx_unlock(&sc->init_cmds.completion.lock);
796 		goto out;
797 	}
798 
799 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
800 	sc->init_cmds.is_waiting = 1;
801 	sc->init_cmds.callback = NULL;
802 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
803 	create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
804 	create_req.QueueID = req_qid;
805 	create_req.Flags = 0;
806 	create_req.ReplyQueueID = reply_qid;
807 	create_req.BaseAddress = (U64)op_req_q->q_base_phys;
808 	create_req.Size = op_req_q->num_reqs;
809 
810 	init_completion(&sc->init_cmds.completion);
811 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
812 	    sizeof(create_req));
813 	if (retval) {
814 		printf(IOCNAME "CreateReqQ: Admin Post failed\n",
815 		    sc->name);
816 		goto out_unlock;
817 	}
818 
819 	wait_for_completion_timeout(&sc->init_cmds.completion,
820 	    (MPI3MR_INTADMCMD_TIMEOUT));
821 
822 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
823 		printf(IOCNAME "CreateReqQ: command timed out\n",
824 		    sc->name);
825 		mpi3mr_check_rh_fault_ioc(sc,
826 			MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
827 		sc->unrecoverable = 1;
828 		retval = -1;
829 		goto out_unlock;
830 	}
831 
832 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
833 	     != MPI3_IOCSTATUS_SUCCESS ) {
834 		printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
835 		    " Loginfo(0x%08x) \n" , sc->name,
836 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
837 		    sc->init_cmds.ioc_loginfo);
838 		retval = -1;
839 		goto out_unlock;
840 	}
841 	op_req_q->qid = req_qid;
842 
843 out_unlock:
844 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
845 	mtx_unlock(&sc->init_cmds.completion.lock);
846 out:
847 	if (retval) {
848 		if (op_req_q->q_base_phys != 0)
849 			bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
850 		if (op_req_q->q_base != NULL)
851 			bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
852 		if (op_req_q->q_base_tag != NULL)
853 			bus_dma_tag_destroy(op_req_q->q_base_tag);
854 		op_req_q->q_base = NULL;
855 		op_req_q->qid = 0;
856 	}
857 	return retval;
858 }
859 
860 /**
861  * mpi3mr_create_op_queues - create operational queues
862  * @sc: Adapter instance reference
863  *
864  * Create operatinal queues(request queues and reply queues).
865  * Return:  0 on success, non-zero on failure.
866  */
867 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
868 {
869 	int retval = 0;
870 	U16 num_queues = 0, i = 0, qid;
871 
872 	num_queues = min(sc->facts.max_op_reply_q,
873 	    sc->facts.max_op_req_q);
874 	num_queues = min(num_queues, sc->msix_count);
875 
876 	/*
877 	 * During reset set the num_queues to the number of queues
878 	 * that was set before the reset.
879 	 */
880 	if (sc->num_queues)
881 		num_queues = sc->num_queues;
882 
883 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
884 	    num_queues);
885 
886 	if (!sc->op_req_q) {
887 		sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
888 		    num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
889 
890 		if (!sc->op_req_q) {
891 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
892 			retval = -1;
893 			goto out_failed;
894 		}
895 	}
896 
897 	if (!sc->op_reply_q) {
898 		sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
899 			M_MPI3MR, M_NOWAIT | M_ZERO);
900 
901 		if (!sc->op_reply_q) {
902 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
903 			retval = -1;
904 			goto out_failed;
905 		}
906 	}
907 
908 	sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
909 
910 	/*Operational Request and reply queue ID starts with 1*/
911 	for (i = 0; i < num_queues; i++) {
912 		qid = i + 1;
913 		if (mpi3mr_create_op_reply_queue(sc, qid)) {
914 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
915 			    qid);
916 			break;
917 		}
918 		if (mpi3mr_create_op_req_queue(sc, qid,
919 		    sc->op_reply_q[qid - 1].qid)) {
920 			mpi3mr_delete_op_reply_queue(sc, qid);
921 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
922 			    qid);
923 			break;
924 		}
925 
926 	}
927 
928 	/* Not even one queue is created successfully*/
929         if (i == 0) {
930                 retval = -1;
931                 goto out_failed;
932         }
933 
934 	if (!sc->num_queues) {
935 		sc->num_queues = i;
936 	} else {
937 		if (num_queues != i) {
938 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
939 					"queues allocated (%d) during driver init\n", i, num_queues);
940 			goto out_failed;
941 		}
942 	}
943 
944 	mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
945 	    sc->num_queues);
946 	mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
947 	    sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
948 
949 	return retval;
950 out_failed:
951 	if (sc->op_req_q) {
952 		free(sc->op_req_q, M_MPI3MR);
953 		sc->op_req_q = NULL;
954 	}
955 	if (sc->op_reply_q) {
956 		free(sc->op_reply_q, M_MPI3MR);
957 		sc->op_reply_q = NULL;
958 	}
959 	return retval;
960 }
961 
962 /**
963  * mpi3mr_setup_admin_qpair - Setup admin queue pairs
964  * @sc: Adapter instance reference
965  *
966  * Allocation and setup admin queues(request queues and reply queues).
967  * Return:  0 on success, non-zero on failure.
968  */
969 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
970 {
971 	int retval = 0;
972 	U32 num_adm_entries = 0;
973 
974 	sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
975 	sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
976 	sc->admin_req_ci = sc->admin_req_pi = 0;
977 
978 	sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
979 	sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
980 	sc->admin_reply_ci = 0;
981 	sc->admin_reply_ephase = 1;
982 
983 	if (!sc->admin_req) {
984 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
985 					4, 0,			/* algnmnt, boundary */
986 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
987 					BUS_SPACE_MAXADDR,	/* highaddr */
988 					NULL, NULL,		/* filter, filterarg */
989 					sc->admin_req_q_sz,	/* maxsize */
990 					1,			/* nsegments */
991 					sc->admin_req_q_sz,	/* maxsegsize */
992 					0,			/* flags */
993 					NULL, NULL,		/* lockfunc, lockarg */
994 					&sc->admin_req_tag)) {
995 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
996 			return (ENOMEM);
997 		}
998 
999 		if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
1000 		    BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1001 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
1002 			return (ENOMEM);
1003 		}
1004 		bzero(sc->admin_req, sc->admin_req_q_sz);
1005 		bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1006 		    mpi3mr_memaddr_cb, &sc->admin_req_phys, 0);
1007 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1008 		    (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1009 
1010 		if (!sc->admin_req)
1011 		{
1012 			retval = -1;
1013 			printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1014 			    sc->name);
1015 			goto out_failed;
1016 		}
1017 	}
1018 
1019 	if (!sc->admin_reply) {
1020 		mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1021 
1022 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1023 					4, 0,			/* algnmnt, boundary */
1024 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1025 					BUS_SPACE_MAXADDR,	/* highaddr */
1026 					NULL, NULL,		/* filter, filterarg */
1027 					sc->admin_reply_q_sz,	/* maxsize */
1028 					1,			/* nsegments */
1029 					sc->admin_reply_q_sz,	/* maxsegsize */
1030 					0,			/* flags */
1031 					NULL, NULL,		/* lockfunc, lockarg */
1032 					&sc->admin_reply_tag)) {
1033 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1034 			return (ENOMEM);
1035 		}
1036 
1037 		if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1038 		    BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1039 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
1040 			return (ENOMEM);
1041 		}
1042 		bzero(sc->admin_reply, sc->admin_reply_q_sz);
1043 		bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1044 		    mpi3mr_memaddr_cb, &sc->admin_reply_phys, 0);
1045 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1046 		    (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1047 
1048 
1049 		if (!sc->admin_reply)
1050 		{
1051 			retval = -1;
1052 			printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1053 			    sc->name);
1054 			goto out_failed;
1055 		}
1056 	}
1057 
1058 	num_adm_entries = (sc->num_admin_replies << 16) |
1059 				(sc->num_admin_reqs);
1060 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1061 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1062 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1063 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1064 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1065 
1066 	return retval;
1067 
1068 out_failed:
1069 	/* Free Admin reply*/
1070 	if (sc->admin_reply_phys)
1071 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1072 
1073 	if (sc->admin_reply != NULL)
1074 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1075 		    sc->admin_reply_dmamap);
1076 
1077 	if (sc->admin_reply_tag != NULL)
1078 		bus_dma_tag_destroy(sc->admin_reply_tag);
1079 
1080 	/* Free Admin request*/
1081 	if (sc->admin_req_phys)
1082 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1083 
1084 	if (sc->admin_req != NULL)
1085 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1086 		    sc->admin_req_dmamap);
1087 
1088 	if (sc->admin_req_tag != NULL)
1089 		bus_dma_tag_destroy(sc->admin_req_tag);
1090 
1091 	return retval;
1092 }
1093 
1094 /**
1095  * mpi3mr_print_fault_info - Display fault information
1096  * @sc: Adapter instance reference
1097  *
1098  * Display the controller fault information if there is a
1099  * controller fault.
1100  *
1101  * Return: Nothing.
1102  */
1103 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1104 {
1105 	U32 ioc_status, code, code1, code2, code3;
1106 
1107 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1108 
1109 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1110 		code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1111 			MPI3_SYSIF_FAULT_CODE_MASK;
1112 		code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1113 		code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1114 		code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1115 		printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1116 		    sc->name, code, code1, code2, code3);
1117 	}
1118 }
1119 
1120 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1121 {
1122 	U32 ioc_status, ioc_control;
1123 	U8 ready, enabled;
1124 
1125 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1126 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1127 
1128 	if(sc->unrecoverable)
1129 		return MRIOC_STATE_UNRECOVERABLE;
1130 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1131 		return MRIOC_STATE_FAULT;
1132 
1133 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1134 	enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1135 
1136 	if (ready && enabled)
1137 		return MRIOC_STATE_READY;
1138 	if ((!ready) && (!enabled))
1139 		return MRIOC_STATE_RESET;
1140 	if ((!ready) && (enabled))
1141 		return MRIOC_STATE_BECOMING_READY;
1142 
1143 	return MRIOC_STATE_RESET_REQUESTED;
1144 }
1145 
1146 static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
1147 {
1148         U32 ioc_status;
1149 
1150 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1151         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1152 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1153 
1154 }
1155 
1156 /**
1157  * mpi3mr_mur_ioc - Message unit Reset handler
1158  * @sc: Adapter instance reference
1159  * @reset_reason: Reset reason code
1160  *
1161  * Issue Message unit Reset to the controller and wait for it to
1162  * be complete.
1163  *
1164  * Return: 0 on success, -1 on failure.
1165  */
1166 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
1167 {
1168         U32 ioc_config, timeout, ioc_status;
1169         int retval = -1;
1170 
1171         mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1172         if (sc->unrecoverable) {
1173                 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1174                 return retval;
1175         }
1176         mpi3mr_clear_resethistory(sc);
1177 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
1178 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1179         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1180 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1181 
1182         timeout = MPI3MR_MUR_TIMEOUT * 10;
1183         do {
1184 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1185                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1186                         mpi3mr_clear_resethistory(sc);
1187 			ioc_config =
1188 				mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1189                         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1190                             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1191                             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1192                                 retval = 0;
1193                                 break;
1194                         }
1195                 }
1196                 DELAY(100 * 1000);
1197         } while (--timeout);
1198 
1199 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1200 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1201 
1202         mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1203                 !retval ? "successful":"failed", ioc_status, ioc_config);
1204         return retval;
1205 }
1206 
1207 /**
1208  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1209  * @sc: Adapter instance reference
1210  *
1211  * Set Enable IOC bit in IOC configuration register and wait for
1212  * the controller to become ready.
1213  *
1214  * Return: 0 on success, appropriate error on failure.
1215  */
1216 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
1217 {
1218         U32 ioc_config, timeout;
1219         enum mpi3mr_iocstate current_state;
1220 
1221 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1222         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1223 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1224 
1225         timeout = sc->ready_timeout * 10;
1226         do {
1227                 current_state = mpi3mr_get_iocstate(sc);
1228                 if (current_state == MRIOC_STATE_READY)
1229                         return 0;
1230                 DELAY(100 * 1000);
1231         } while (--timeout);
1232 
1233         return -1;
1234 }
1235 
1236 static const struct {
1237 	enum mpi3mr_iocstate value;
1238 	char *name;
1239 } mrioc_states[] = {
1240 	{ MRIOC_STATE_READY, "ready" },
1241 	{ MRIOC_STATE_FAULT, "fault" },
1242 	{ MRIOC_STATE_RESET, "reset" },
1243 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
1244 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1245 	{ MRIOC_STATE_COUNT, "Count" },
1246 };
1247 
1248 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1249 {
1250 	int i;
1251 	char *name = NULL;
1252 
1253 	for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1254 		if (mrioc_states[i].value == mrioc_state){
1255 			name = mrioc_states[i].name;
1256 			break;
1257 		}
1258 	}
1259 	return name;
1260 }
1261 
1262 /* Reset reason to name mapper structure*/
1263 static const struct {
1264 	enum mpi3mr_reset_reason value;
1265 	char *name;
1266 } mpi3mr_reset_reason_codes[] = {
1267 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1268 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1269 	{ MPI3MR_RESET_FROM_IOCTL, "application" },
1270 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1271 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1272 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1273 	{ MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1274 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1275 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1276 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1277 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1278 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1279 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1280 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1281 	{
1282 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1283 		"create request queue timeout"
1284 	},
1285 	{
1286 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1287 		"create reply queue timeout"
1288 	},
1289 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1290 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1291 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1292 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1293 	{
1294 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1295 		"component image activation timeout"
1296 	},
1297 	{
1298 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1299 		"get package version timeout"
1300 	},
1301 	{
1302 		MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1303 		"persistent event log abort timeout"
1304 	},
1305 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1306 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1307 	{
1308 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1309 		"diagnostic buffer post timeout"
1310 	},
1311 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1312 	{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1313 };
1314 
1315 /**
1316  * mpi3mr_reset_rc_name - get reset reason code name
1317  * @reason_code: reset reason code value
1318  *
1319  * Map reset reason to an NULL terminated ASCII string
1320  *
1321  * Return: Name corresponding to reset reason value or NULL.
1322  */
1323 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1324 {
1325 	int i;
1326 	char *name = NULL;
1327 
1328 	for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1329 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1330 			name = mpi3mr_reset_reason_codes[i].name;
1331 			break;
1332 		}
1333 	}
1334 	return name;
1335 }
1336 
1337 #define MAX_RESET_TYPE 3
1338 /* Reset type to name mapper structure*/
1339 static const struct {
1340 	U16 reset_type;
1341 	char *name;
1342 } mpi3mr_reset_types[] = {
1343 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1344 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1345 	{ MAX_RESET_TYPE, "count"}
1346 };
1347 
1348 /**
1349  * mpi3mr_reset_type_name - get reset type name
1350  * @reset_type: reset type value
1351  *
1352  * Map reset type to an NULL terminated ASCII string
1353  *
1354  * Return: Name corresponding to reset type value or NULL.
1355  */
1356 static const char *mpi3mr_reset_type_name(U16 reset_type)
1357 {
1358 	int i;
1359 	char *name = NULL;
1360 
1361 	for (i = 0; i < MAX_RESET_TYPE; i++) {
1362 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1363 			name = mpi3mr_reset_types[i].name;
1364 			break;
1365 		}
1366 	}
1367 	return name;
1368 }
1369 
1370 /**
1371  * mpi3mr_soft_reset_success - Check softreset is success or not
1372  * @ioc_status: IOC status register value
1373  * @ioc_config: IOC config register value
1374  *
1375  * Check whether the soft reset is successful or not based on
1376  * IOC status and IOC config register values.
1377  *
1378  * Return: True when the soft reset is success, false otherwise.
1379  */
1380 static inline bool
1381 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1382 {
1383 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1384 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1385 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1386 		return true;
1387 	return false;
1388 }
1389 
1390 /**
1391  * mpi3mr_diagfault_success - Check diag fault is success or not
1392  * @sc: Adapter reference
1393  * @ioc_status: IOC status register value
1394  *
1395  * Check whether the controller hit diag reset fault code.
1396  *
1397  * Return: True when there is diag fault, false otherwise.
1398  */
1399 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1400 	U32 ioc_status)
1401 {
1402 	U32 fault;
1403 
1404 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1405 		return false;
1406 	fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) & MPI3_SYSIF_FAULT_CODE_MASK;
1407 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
1408 		return true;
1409 	return false;
1410 }
1411 
1412 /**
1413  * mpi3mr_issue_iocfacts - Send IOC Facts
1414  * @sc: Adapter instance reference
1415  * @facts_data: Cached IOC facts data
1416  *
1417  * Issue IOC Facts MPI request through admin queue and wait for
1418  * the completion of it or time out.
1419  *
1420  * Return: 0 on success, non-zero on failures.
1421  */
1422 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1423     Mpi3IOCFactsData_t *facts_data)
1424 {
1425 	Mpi3IOCFactsRequest_t iocfacts_req;
1426 	bus_dma_tag_t data_tag = NULL;
1427 	bus_dmamap_t data_map = NULL;
1428 	bus_addr_t data_phys = 0;
1429 	void *data = NULL;
1430 	U32 data_len = sizeof(*facts_data);
1431 	int retval = 0;
1432 
1433 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1434                 	MPI3_SGE_FLAGS_DLAS_SYSTEM |
1435 			MPI3_SGE_FLAGS_END_OF_LIST);
1436 
1437 
1438         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1439 				4, 0,			/* algnmnt, boundary */
1440 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1441 				BUS_SPACE_MAXADDR,	/* highaddr */
1442 				NULL, NULL,		/* filter, filterarg */
1443                                 data_len,		/* maxsize */
1444                                 1,			/* nsegments */
1445                                 data_len,		/* maxsegsize */
1446                                 0,			/* flags */
1447                                 NULL, NULL,		/* lockfunc, lockarg */
1448                                 &data_tag)) {
1449 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1450 		return (ENOMEM);
1451         }
1452 
1453         if (bus_dmamem_alloc(data_tag, (void **)&data,
1454 	    BUS_DMA_NOWAIT, &data_map)) {
1455 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
1456 			__func__, __LINE__);
1457 		return (ENOMEM);
1458         }
1459 
1460         bzero(data, data_len);
1461         bus_dmamap_load(data_tag, data_map, data, data_len,
1462 	    mpi3mr_memaddr_cb, &data_phys, 0);
1463 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1464 	    __func__, __LINE__, (uintmax_t)data_phys, data_len);
1465 
1466 	if (!data)
1467 	{
1468 		retval = -1;
1469 		printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1470 		    sc->name);
1471 		goto out;
1472 	}
1473 
1474 	mtx_lock(&sc->init_cmds.completion.lock);
1475 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1476 
1477 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1478 		retval = -1;
1479 		printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1480 		    sc->name);
1481 		mtx_unlock(&sc->init_cmds.completion.lock);
1482 		goto out;
1483 	}
1484 
1485 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1486 	sc->init_cmds.is_waiting = 1;
1487 	sc->init_cmds.callback = NULL;
1488 	iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1489 	iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1490 
1491 	mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1492 	    data_phys);
1493 
1494 	init_completion(&sc->init_cmds.completion);
1495 
1496 	retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1497 	    sizeof(iocfacts_req));
1498 
1499 	if (retval) {
1500 		printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1501 		    sc->name);
1502 		goto out_unlock;
1503 	}
1504 
1505 	wait_for_completion_timeout(&sc->init_cmds.completion,
1506 	    (MPI3MR_INTADMCMD_TIMEOUT));
1507 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1508 		printf(IOCNAME "Issue IOCFacts: command timed out\n",
1509 		    sc->name);
1510 		mpi3mr_check_rh_fault_ioc(sc,
1511 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1512 		sc->unrecoverable = 1;
1513 		retval = -1;
1514 		goto out_unlock;
1515 	}
1516 
1517 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1518 	     != MPI3_IOCSTATUS_SUCCESS ) {
1519 		printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1520 		    " Loginfo(0x%08x) \n" , sc->name,
1521 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1522 		    sc->init_cmds.ioc_loginfo);
1523 		retval = -1;
1524 		goto out_unlock;
1525 	}
1526 
1527 	memcpy(facts_data, (U8 *)data, data_len);
1528 out_unlock:
1529 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1530 	mtx_unlock(&sc->init_cmds.completion.lock);
1531 
1532 out:
1533 	if (data_phys != 0)
1534 		bus_dmamap_unload(data_tag, data_map);
1535 	if (data != NULL)
1536 		bus_dmamem_free(data_tag, data, data_map);
1537 	if (data_tag != NULL)
1538 		bus_dma_tag_destroy(data_tag);
1539 	return retval;
1540 }
1541 
1542 /**
1543  * mpi3mr_process_factsdata - Process IOC facts data
1544  * @sc: Adapter instance reference
1545  * @facts_data: Cached IOC facts data
1546  *
1547  * Convert IOC facts data into cpu endianness and cache it in
1548  * the driver .
1549  *
1550  * Return: Nothing.
1551  */
1552 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1553     Mpi3IOCFactsData_t *facts_data)
1554 {
1555 	int retval = 0;
1556 	U32 ioc_config, req_sz, facts_flags;
1557 
1558 	if (le16toh(facts_data->IOCFactsDataLength) !=
1559 	    (sizeof(*facts_data) / 4)) {
1560 		mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1561 		    " driver_sz(%ld) firmware_sz(%d) \n",
1562 		    sizeof(*facts_data),
1563 		    facts_data->IOCFactsDataLength);
1564 	}
1565 
1566 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1567         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1568                   MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1569 
1570 	if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1571 		 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1572 		    " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1573 		    facts_data->IOCRequestFrameSize);
1574 	}
1575 
1576 	memset(&sc->facts, 0, sizeof(sc->facts));
1577 
1578 	facts_flags = le32toh(facts_data->Flags);
1579 	sc->facts.op_req_sz = req_sz;
1580 	sc->op_reply_sz = 1 << ((ioc_config &
1581                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1582                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1583 
1584 	sc->facts.ioc_num = facts_data->IOCNumber;
1585         sc->facts.who_init = facts_data->WhoInit;
1586         sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1587 	sc->facts.personality = (facts_flags &
1588 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1589 	sc->facts.dma_mask = (facts_flags &
1590 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1591 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1592         sc->facts.protocol_flags = facts_data->ProtocolFlags;
1593         sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1594         sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1595         sc->facts.product_id = (facts_data->ProductID);
1596 	sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1597         sc->facts.exceptions = (facts_data->IOCExceptions);
1598         sc->facts.max_perids = (facts_data->MaxPersistentID);
1599         sc->facts.max_vds = (facts_data->MaxVDs);
1600         sc->facts.max_hpds = (facts_data->MaxHostPDs);
1601         sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1602         sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1603         sc->facts.max_nvme = (facts_data->MaxNVMe);
1604         sc->facts.max_pcieswitches =
1605                 (facts_data->MaxPCIeSwitches);
1606         sc->facts.max_sasexpanders =
1607                 (facts_data->MaxSASExpanders);
1608         sc->facts.max_sasinitiators =
1609                 (facts_data->MaxSASInitiators);
1610         sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1611         sc->facts.min_devhandle = (facts_data->MinDevHandle);
1612         sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1613 	sc->facts.max_op_req_q =
1614                 (facts_data->MaxOperationalRequestQueues);
1615 	sc->facts.max_op_reply_q =
1616                 (facts_data->MaxOperationalReplyQueues);
1617         sc->facts.ioc_capabilities =
1618                 (facts_data->IOCCapabilities);
1619         sc->facts.fw_ver.build_num =
1620                 (facts_data->FWVersion.BuildNum);
1621         sc->facts.fw_ver.cust_id =
1622                 (facts_data->FWVersion.CustomerID);
1623         sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1624         sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1625         sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1626         sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1627         sc->max_msix_vectors = min(sc->max_msix_vectors,
1628             sc->facts.max_msix_vectors);
1629         sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1630         sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1631         sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1632         sc->facts.shutdown_timeout =
1633                 (facts_data->ShutdownTimeout);
1634 	sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1635 	sc->facts.io_throttle_data_length =
1636 	    facts_data->IOThrottleDataLength;
1637 	sc->facts.max_io_throttle_group =
1638 	    facts_data->MaxIOThrottleGroup;
1639 	sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1640 	sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1641 
1642 	/*Store in 512b block count*/
1643 	if (sc->facts.io_throttle_data_length)
1644 		sc->io_throttle_data_length =
1645 		    (sc->facts.io_throttle_data_length * 2 * 4);
1646 	else
1647 		/* set the length to 1MB + 1K to disable throttle*/
1648 		sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1649 
1650 	sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1651 	sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1652 
1653 	mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1654             "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1655 	    sc->facts.ioc_num, sc->facts.max_op_req_q,
1656 	    sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1657             sc->facts.max_reqs, sc->facts.min_devhandle,
1658             sc->facts.max_pds, sc->facts.max_msix_vectors,
1659             sc->facts.max_perids);
1660         mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1661             sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1662             sc->facts.sge_mod_shift);
1663 	mpi3mr_dprint(sc, MPI3MR_INFO,
1664 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1665 	    sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1666 	    sc->facts.io_throttle_data_length * 4,
1667 	    sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1668 
1669 	sc->max_host_ios = sc->facts.max_reqs -
1670 	    (MPI3MR_INTERNALCMDS_RESVD + 1);
1671 
1672 	return retval;
1673 }
1674 
1675 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1676 {
1677 	int i;
1678 	bus_addr_t phys_addr;
1679 
1680 	/* initialize Reply buffer Queue */
1681 	for (i = 0, phys_addr = sc->reply_buf_phys;
1682 	    i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1683 		sc->reply_free_q[i] = phys_addr;
1684 	sc->reply_free_q[i] = (0);
1685 
1686 	/* initialize Sense Buffer Queue */
1687 	for (i = 0, phys_addr = sc->sense_buf_phys;
1688 	    i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1689 		sc->sense_buf_q[i] = phys_addr;
1690 	sc->sense_buf_q[i] = (0);
1691 
1692 }
1693 
1694 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1695 {
1696 	U32 sz;
1697 
1698 	sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1699 	sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1700 	sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1701 	sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1702 
1703 	sz = sc->num_reply_bufs * sc->reply_sz;
1704 
1705 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1706 				16, 0,			/* algnmnt, boundary */
1707 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1708 				BUS_SPACE_MAXADDR,	/* highaddr */
1709 				NULL, NULL,		/* filter, filterarg */
1710                                 sz,			/* maxsize */
1711                                 1,			/* nsegments */
1712                                 sz,			/* maxsegsize */
1713                                 0,			/* flags */
1714                                 NULL, NULL,		/* lockfunc, lockarg */
1715                                 &sc->reply_buf_tag)) {
1716 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1717 		return (ENOMEM);
1718         }
1719 
1720 	if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1721 	    BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1722 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1723 			__func__, __LINE__);
1724 		return (ENOMEM);
1725         }
1726 
1727 	bzero(sc->reply_buf, sz);
1728         bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1729 	    mpi3mr_memaddr_cb, &sc->reply_buf_phys, 0);
1730 
1731 	sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1732 	sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1733 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1734 	    "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1735 	    sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1736 	    (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1737 
1738 	/* reply free queue, 8 byte align */
1739 	sz = sc->reply_free_q_sz * 8;
1740 
1741         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1742 				8, 0,			/* algnmnt, boundary */
1743 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1744 				BUS_SPACE_MAXADDR,	/* highaddr */
1745 				NULL, NULL,		/* filter, filterarg */
1746                                 sz,			/* maxsize */
1747                                 1,			/* nsegments */
1748                                 sz,			/* maxsegsize */
1749                                 0,			/* flags */
1750                                 NULL, NULL,		/* lockfunc, lockarg */
1751                                 &sc->reply_free_q_tag)) {
1752 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1753 		return (ENOMEM);
1754         }
1755 
1756         if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1757 	    BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1758 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1759 			__func__, __LINE__);
1760 		return (ENOMEM);
1761         }
1762 
1763 	bzero(sc->reply_free_q, sz);
1764         bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1765 	    mpi3mr_memaddr_cb, &sc->reply_free_q_phys, 0);
1766 
1767 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1768 	    "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1769 	    sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1770 	    (unsigned long long)sc->reply_free_q_phys);
1771 
1772 	/* sense buffer pool,  4 byte align */
1773 	sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1774 
1775         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1776 				4, 0,			/* algnmnt, boundary */
1777 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1778 				BUS_SPACE_MAXADDR,	/* highaddr */
1779 				NULL, NULL,		/* filter, filterarg */
1780                                 sz,			/* maxsize */
1781                                 1,			/* nsegments */
1782                                 sz,			/* maxsegsize */
1783                                 0,			/* flags */
1784                                 NULL, NULL,		/* lockfunc, lockarg */
1785                                 &sc->sense_buf_tag)) {
1786 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1787 		return (ENOMEM);
1788         }
1789 
1790 	if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1791 	    BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1792 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1793 			__func__, __LINE__);
1794 		return (ENOMEM);
1795         }
1796 
1797 	bzero(sc->sense_buf, sz);
1798         bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1799 	    mpi3mr_memaddr_cb, &sc->sense_buf_phys, 0);
1800 
1801 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1802 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1803 	    sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1804 	    (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1805 
1806 	/* sense buffer queue, 8 byte align */
1807 	sz = sc->sense_buf_q_sz * 8;
1808 
1809         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1810 				8, 0,			/* algnmnt, boundary */
1811 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1812 				BUS_SPACE_MAXADDR,	/* highaddr */
1813 				NULL, NULL,		/* filter, filterarg */
1814                                 sz,			/* maxsize */
1815                                 1,			/* nsegments */
1816                                 sz,			/* maxsegsize */
1817                                 0,			/* flags */
1818                                 NULL, NULL,		/* lockfunc, lockarg */
1819                                 &sc->sense_buf_q_tag)) {
1820 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1821 		return (ENOMEM);
1822         }
1823 
1824 	if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1825 	    BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1826 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1827 			__func__, __LINE__);
1828 		return (ENOMEM);
1829         }
1830 
1831 	bzero(sc->sense_buf_q, sz);
1832         bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1833 	    mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, 0);
1834 
1835 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1836 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1837 	    sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1838 	    (unsigned long long)sc->sense_buf_q_phys);
1839 
1840 	return 0;
1841 }
1842 
1843 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1844 {
1845 	int retval = 0;
1846 	U32 i;
1847 
1848 	if (sc->init_cmds.reply)
1849 		goto post_reply_sbuf;
1850 
1851 	sc->init_cmds.reply = malloc(sc->reply_sz,
1852 		M_MPI3MR, M_NOWAIT | M_ZERO);
1853 
1854 	if (!sc->init_cmds.reply) {
1855 		printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1856 		    sc->name);
1857 		goto out_failed;
1858 	}
1859 
1860 	sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1861 	if (!sc->ioctl_cmds.reply) {
1862 		printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1863 		    sc->name);
1864 		goto out_failed;
1865 	}
1866 
1867 	sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1868 	if (!sc->host_tm_cmds.reply) {
1869 		printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1870 		    sc->name);
1871 		goto out_failed;
1872 	}
1873 	for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1874 		sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1875 		    M_MPI3MR, M_NOWAIT | M_ZERO);
1876 		if (!sc->dev_rmhs_cmds[i].reply) {
1877 			printf(IOCNAME "Cannot allocate memory for"
1878 			    " dev_rmhs_cmd[%d].reply\n",
1879 			    sc->name, i);
1880 			goto out_failed;
1881 		}
1882 	}
1883 
1884 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1885 		sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1886 			M_MPI3MR, M_NOWAIT | M_ZERO);
1887 		if (!sc->evtack_cmds[i].reply)
1888 			goto out_failed;
1889 	}
1890 
1891 	sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1892 
1893 	sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1894 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1895 	if (!sc->removepend_bitmap) {
1896 		printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1897 		    sc->name);
1898 		goto out_failed;
1899 	}
1900 
1901 	sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1902 	sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1903 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1904 	if (!sc->devrem_bitmap) {
1905 		printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1906 		    sc->name);
1907 		goto out_failed;
1908 	}
1909 
1910 	sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1911 
1912 	sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1913 		M_MPI3MR, M_NOWAIT | M_ZERO);
1914 	if (!sc->evtack_cmds_bitmap)
1915 		goto out_failed;
1916 
1917 	if (mpi3mr_reply_dma_alloc(sc)) {
1918 		printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1919 		    sc->name, __func__, __LINE__);
1920 		goto out_failed;
1921 	}
1922 
1923 post_reply_sbuf:
1924 	mpi3mr_setup_reply_free_queues(sc);
1925 	return retval;
1926 out_failed:
1927 	mpi3mr_cleanup_interrupts(sc);
1928 	mpi3mr_free_mem(sc);
1929 	retval = -1;
1930 	return retval;
1931 }
1932 
1933 static void
1934 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
1935 {
1936 	int retval = 0;
1937 	void *fw_pkg_ver = NULL;
1938 	bus_dma_tag_t fw_pkg_ver_tag;
1939 	bus_dmamap_t fw_pkg_ver_map;
1940 	bus_addr_t fw_pkg_ver_dma;
1941 	Mpi3CIUploadRequest_t ci_upload;
1942 	Mpi3ComponentImageHeader_t *ci_header;
1943 	U32 fw_pkg_ver_len = sizeof(*ci_header);
1944 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1945 
1946 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1947 				4, 0,			/* algnmnt, boundary */
1948 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1949 				BUS_SPACE_MAXADDR,	/* highaddr */
1950 				NULL, NULL,		/* filter, filterarg */
1951 				fw_pkg_ver_len,		/* maxsize */
1952 				1,			/* nsegments */
1953 				fw_pkg_ver_len,		/* maxsegsize */
1954 				0,			/* flags */
1955 				NULL, NULL,		/* lockfunc, lockarg */
1956 				&fw_pkg_ver_tag)) {
1957 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
1958 		return;
1959 	}
1960 
1961 	if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
1962 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
1963 			      __func__, __LINE__);
1964 		return;
1965 	}
1966 
1967 	bzero(fw_pkg_ver, fw_pkg_ver_len);
1968 
1969 	bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len, mpi3mr_memaddr_cb, &fw_pkg_ver_dma, 0);
1970 
1971 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
1972 		      __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
1973 
1974 	if (!fw_pkg_ver) {
1975 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
1976 		goto out;
1977 	}
1978 
1979 	memset(&ci_upload, 0, sizeof(ci_upload));
1980 	mtx_lock(&sc->init_cmds.completion.lock);
1981 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1982 		mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
1983 		mtx_unlock(&sc->init_cmds.completion.lock);
1984 		goto out;
1985 	}
1986 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1987 	sc->init_cmds.is_waiting = 1;
1988 	sc->init_cmds.callback = NULL;
1989 	ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
1990 	ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
1991 	ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
1992 	ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
1993 	ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
1994 
1995 	mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
1996 	    fw_pkg_ver_dma);
1997 
1998 	init_completion(&sc->init_cmds.completion);
1999 	if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
2000 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2001 		goto out_unlock;
2002 	}
2003 	wait_for_completion_timeout(&sc->init_cmds.completion,
2004 		(MPI3MR_INTADMCMD_TIMEOUT));
2005 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2006 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2007 		sc->init_cmds.is_waiting = 0;
2008 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2009 			mpi3mr_check_rh_fault_ioc(sc,
2010 				MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2011 		goto out_unlock;
2012 	}
2013 	if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2014 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2015 			      "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2016 			      GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2017 		goto out_unlock;
2018 	}
2019 
2020 	ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2021 	mpi3mr_dprint(sc, MPI3MR_XINFO,
2022 		      "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2023 		      HeaderSize(0x%x) Signature1(0x%x)\n",
2024 		      ci_header->EnvironmentVariableOffset,
2025 		      ci_header->HeaderSize,
2026 		      ci_header->Signature1);
2027 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2028 		      ci_header->ComponentImageVersion.GenMajor,
2029 		      ci_header->ComponentImageVersion.GenMinor,
2030 		      ci_header->ComponentImageVersion.PhaseMajor,
2031 		      ci_header->ComponentImageVersion.PhaseMinor);
2032 out_unlock:
2033 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2034 	mtx_unlock(&sc->init_cmds.completion.lock);
2035 
2036 out:
2037 	if (fw_pkg_ver_dma != 0)
2038 		bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2039 	if (fw_pkg_ver)
2040 		bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2041 	if (fw_pkg_ver_tag)
2042 		bus_dma_tag_destroy(fw_pkg_ver_tag);
2043 
2044 }
2045 
2046 /**
2047  * mpi3mr_issue_iocinit - Send IOC Init
2048  * @sc: Adapter instance reference
2049  *
2050  * Issue IOC Init MPI request through admin queue and wait for
2051  * the completion of it or time out.
2052  *
2053  * Return: 0 on success, non-zero on failures.
2054  */
2055 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2056 {
2057 	Mpi3IOCInitRequest_t iocinit_req;
2058 	Mpi3DriverInfoLayout_t *drvr_info = NULL;
2059 	bus_dma_tag_t drvr_info_tag;
2060 	bus_dmamap_t drvr_info_map;
2061 	bus_addr_t drvr_info_phys;
2062 	U32 drvr_info_len = sizeof(*drvr_info);
2063 	int retval = 0;
2064 	struct timeval now;
2065 	uint64_t time_in_msec;
2066 
2067 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2068 				4, 0,			/* algnmnt, boundary */
2069 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2070 				BUS_SPACE_MAXADDR,	/* highaddr */
2071 				NULL, NULL,		/* filter, filterarg */
2072                                 drvr_info_len,		/* maxsize */
2073                                 1,			/* nsegments */
2074                                 drvr_info_len,		/* maxsegsize */
2075                                 0,			/* flags */
2076                                 NULL, NULL,		/* lockfunc, lockarg */
2077                                 &drvr_info_tag)) {
2078 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2079 		return (ENOMEM);
2080         }
2081 
2082 	if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2083 	    BUS_DMA_NOWAIT, &drvr_info_map)) {
2084 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
2085 			__func__, __LINE__);
2086 		return (ENOMEM);
2087         }
2088 
2089 	bzero(drvr_info, drvr_info_len);
2090         bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2091 	    mpi3mr_memaddr_cb, &drvr_info_phys, 0);
2092 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2093 	    __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2094 
2095 	if (!drvr_info)
2096 	{
2097 		retval = -1;
2098 		printf(IOCNAME "Memory alloc for Driver Info failed\n",
2099 		    sc->name);
2100 		goto out;
2101 	}
2102 	drvr_info->InformationLength = (drvr_info_len);
2103 	strcpy(drvr_info->DriverSignature, "Broadcom");
2104 	strcpy(drvr_info->OsName, "FreeBSD");
2105 	strcpy(drvr_info->OsVersion, fmt_os_ver);
2106 	strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2107 	strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2108 	strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2109 	drvr_info->DriverCapabilities = 0;
2110 	memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2111 
2112 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2113 	mtx_lock(&sc->init_cmds.completion.lock);
2114 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2115 		retval = -1;
2116 		printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2117 		    sc->name);
2118 		mtx_unlock(&sc->init_cmds.completion.lock);
2119 		goto out;
2120 	}
2121 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2122 	sc->init_cmds.is_waiting = 1;
2123 	sc->init_cmds.callback = NULL;
2124         iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2125         iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2126         iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2127         iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2128         iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2129         iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2130         iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2131         iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2132         iocinit_req.ReplyFreeQueueAddress =
2133                 sc->reply_free_q_phys;
2134         iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2135         iocinit_req.SenseBufferFreeQueueDepth =
2136                 sc->sense_buf_q_sz;
2137         iocinit_req.SenseBufferFreeQueueAddress =
2138                 sc->sense_buf_q_phys;
2139         iocinit_req.DriverInformationAddress = drvr_info_phys;
2140 
2141 	getmicrotime(&now);
2142 	time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2143 	iocinit_req.TimeStamp = htole64(time_in_msec);
2144 
2145 	init_completion(&sc->init_cmds.completion);
2146 	retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2147 	    sizeof(iocinit_req));
2148 
2149 	if (retval) {
2150 		printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2151 		    sc->name);
2152 		goto out_unlock;
2153 	}
2154 
2155 	wait_for_completion_timeout(&sc->init_cmds.completion,
2156 	    (MPI3MR_INTADMCMD_TIMEOUT));
2157 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2158 		printf(IOCNAME "Issue IOCInit: command timed out\n",
2159 		    sc->name);
2160 		mpi3mr_check_rh_fault_ioc(sc,
2161 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2162 		sc->unrecoverable = 1;
2163 		retval = -1;
2164 		goto out_unlock;
2165 	}
2166 
2167 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2168 	     != MPI3_IOCSTATUS_SUCCESS ) {
2169 		printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2170 		    " Loginfo(0x%08x) \n" , sc->name,
2171 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2172 		    sc->init_cmds.ioc_loginfo);
2173 		retval = -1;
2174 		goto out_unlock;
2175 	}
2176 
2177 out_unlock:
2178 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2179 	mtx_unlock(&sc->init_cmds.completion.lock);
2180 
2181 out:
2182 	if (drvr_info_phys != 0)
2183 		bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2184 	if (drvr_info != NULL)
2185 		bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2186 	if (drvr_info_tag != NULL)
2187 		bus_dma_tag_destroy(drvr_info_tag);
2188 	return retval;
2189 }
2190 
2191 static void
2192 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2193 {
2194         int i = 0;
2195         char personality[16];
2196         struct mpi3mr_compimg_ver *fwver = &sc->facts.fw_ver;
2197 
2198         switch (sc->facts.personality) {
2199         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2200                 strcpy(personality, "Enhanced HBA");
2201                 break;
2202         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2203                 strcpy(personality, "RAID");
2204                 break;
2205         default:
2206                 strcpy(personality, "Unknown");
2207                 break;
2208         }
2209 
2210 	mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2211 
2212 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Version: %d.%d.%d.%d.%05d-%05d\n",
2213 		      fwver->gen_major, fwver->gen_minor, fwver->ph_major,
2214 		      fwver->ph_minor, fwver->cust_id, fwver->build_num);
2215 
2216         mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2217 
2218         if (sc->facts.protocol_flags &
2219             MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2220                 printf("Initiator");
2221                 i++;
2222         }
2223 
2224         if (sc->facts.protocol_flags &
2225             MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2226                 printf("%sTarget", i ? "," : "");
2227                 i++;
2228         }
2229 
2230         if (sc->facts.protocol_flags &
2231             MPI3_IOCFACTS_PROTOCOL_NVME) {
2232                 printf("%sNVMe attachment", i ? "," : "");
2233                 i++;
2234         }
2235         i = 0;
2236         printf("), ");
2237         printf("Capabilities=(");
2238 
2239         if (sc->facts.ioc_capabilities &
2240             MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE) {
2241                 printf("RAID");
2242                 i++;
2243         }
2244 
2245         printf(")\n");
2246 }
2247 
2248 /**
2249  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2250  * @sc: Adapter instance reference
2251  * @event: MPI event ID
2252  *
2253  * Un mask the specific event by resetting the event_mask
2254  * bitmap.
2255  *
2256  * Return: None.
2257  */
2258 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2259 {
2260 	U32 desired_event;
2261 
2262 	if (event >= 128)
2263 		return;
2264 
2265 	desired_event = (1 << (event % 32));
2266 
2267 	if (event < 32)
2268 		sc->event_masks[0] &= ~desired_event;
2269 	else if (event < 64)
2270 		sc->event_masks[1] &= ~desired_event;
2271 	else if (event < 96)
2272 		sc->event_masks[2] &= ~desired_event;
2273 	else if (event < 128)
2274 		sc->event_masks[3] &= ~desired_event;
2275 }
2276 
2277 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2278 {
2279 	int i;
2280 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2281 		sc->event_masks[i] = -1;
2282 
2283         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2284         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2285         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2286 
2287         mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2288 
2289         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2290         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2291         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2292         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2293 
2294         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2295         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2296 
2297         mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2298         mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2299         mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2300 }
2301 
2302 /**
2303  * mpi3mr_issue_event_notification - Send event notification
2304  * @sc: Adapter instance reference
2305  *
2306  * Issue event notification MPI request through admin queue and
2307  * wait for the completion of it or time out.
2308  *
2309  * Return: 0 on success, non-zero on failures.
2310  */
2311 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2312 {
2313 	Mpi3EventNotificationRequest_t evtnotify_req;
2314 	int retval = 0;
2315 	U8 i;
2316 
2317 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2318 	mtx_lock(&sc->init_cmds.completion.lock);
2319 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2320 		retval = -1;
2321 		printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2322 		    sc->name);
2323 		mtx_unlock(&sc->init_cmds.completion.lock);
2324 		goto out;
2325 	}
2326 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2327 	sc->init_cmds.is_waiting = 1;
2328 	sc->init_cmds.callback = NULL;
2329 	evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2330 	evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2331 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2332 		evtnotify_req.EventMasks[i] =
2333 		    (sc->event_masks[i]);
2334 	init_completion(&sc->init_cmds.completion);
2335 	retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2336 	    sizeof(evtnotify_req));
2337 	if (retval) {
2338 		printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2339 		    sc->name);
2340 		goto out_unlock;
2341 	}
2342 
2343 	poll_for_command_completion(sc,
2344 				    &sc->init_cmds,
2345 				    (MPI3MR_INTADMCMD_TIMEOUT));
2346 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2347 		printf(IOCNAME "Issue EvtNotify: command timed out\n",
2348 		    sc->name);
2349 		mpi3mr_check_rh_fault_ioc(sc,
2350 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2351 		retval = -1;
2352 		goto out_unlock;
2353 	}
2354 
2355 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2356 	     != MPI3_IOCSTATUS_SUCCESS ) {
2357 		printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2358 		    " Loginfo(0x%08x) \n" , sc->name,
2359 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2360 		    sc->init_cmds.ioc_loginfo);
2361 		retval = -1;
2362 		goto out_unlock;
2363 	}
2364 
2365 out_unlock:
2366 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2367 	mtx_unlock(&sc->init_cmds.completion.lock);
2368 
2369 out:
2370 	return retval;
2371 }
2372 
2373 int
2374 mpi3mr_register_events(struct mpi3mr_softc *sc)
2375 {
2376 	int error;
2377 
2378 	mpi3mr_set_events_mask(sc);
2379 
2380 	error = mpi3mr_issue_event_notification(sc);
2381 
2382 	if (error) {
2383 		printf(IOCNAME "Failed to issue event notification %d\n",
2384 		    sc->name, error);
2385 	}
2386 
2387 	return error;
2388 }
2389 
2390 /**
2391  * mpi3mr_process_event_ack - Process event acknowledgment
2392  * @sc: Adapter instance reference
2393  * @event: MPI3 event ID
2394  * @event_ctx: Event context
2395  *
2396  * Send event acknowledgement through admin queue and wait for
2397  * it to complete.
2398  *
2399  * Return: 0 on success, non-zero on failures.
2400  */
2401 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2402 	U32 event_ctx)
2403 {
2404 	Mpi3EventAckRequest_t evtack_req;
2405 	int retval = 0;
2406 
2407 	memset(&evtack_req, 0, sizeof(evtack_req));
2408 	mtx_lock(&sc->init_cmds.completion.lock);
2409 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2410 		retval = -1;
2411 		printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2412 		    sc->name);
2413 		mtx_unlock(&sc->init_cmds.completion.lock);
2414 		goto out;
2415 	}
2416 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2417 	sc->init_cmds.is_waiting = 1;
2418 	sc->init_cmds.callback = NULL;
2419 	evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2420 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2421 	evtack_req.Event = event;
2422 	evtack_req.EventContext = htole32(event_ctx);
2423 
2424 	init_completion(&sc->init_cmds.completion);
2425 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2426 	    sizeof(evtack_req));
2427 	if (retval) {
2428 		printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2429 		    sc->name);
2430 		goto out_unlock;
2431 	}
2432 
2433 	wait_for_completion_timeout(&sc->init_cmds.completion,
2434 	    (MPI3MR_INTADMCMD_TIMEOUT));
2435 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2436 		printf(IOCNAME "Issue EvtAck: command timed out\n",
2437 		    sc->name);
2438 		retval = -1;
2439 		goto out_unlock;
2440 	}
2441 
2442 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2443 	     != MPI3_IOCSTATUS_SUCCESS ) {
2444 		printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2445 		    " Loginfo(0x%08x) \n" , sc->name,
2446 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2447 		    sc->init_cmds.ioc_loginfo);
2448 		retval = -1;
2449 		goto out_unlock;
2450 	}
2451 
2452 out_unlock:
2453 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2454 	mtx_unlock(&sc->init_cmds.completion.lock);
2455 
2456 out:
2457 	return retval;
2458 }
2459 
2460 
2461 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2462 {
2463 	int retval = 0;
2464 	U32 sz, i;
2465 	U16 num_chains;
2466 
2467 	num_chains = sc->max_host_ios;
2468 
2469 	sc->chain_buf_count = num_chains;
2470 	sz = sizeof(struct mpi3mr_chain) * num_chains;
2471 
2472 	sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2473 
2474 	if (!sc->chain_sgl_list) {
2475 		printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2476 		    sc->name);
2477 		retval = -1;
2478 		goto out_failed;
2479 	}
2480 
2481 	sz = MPI3MR_CHAINSGE_SIZE;
2482 
2483         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2484 				4096, 0,		/* algnmnt, boundary */
2485 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2486 				BUS_SPACE_MAXADDR,	/* highaddr */
2487 				NULL, NULL,		/* filter, filterarg */
2488                                 sz,			/* maxsize */
2489                                 1,			/* nsegments */
2490                                 sz,			/* maxsegsize */
2491                                 0,			/* flags */
2492                                 NULL, NULL,		/* lockfunc, lockarg */
2493                                 &sc->chain_sgl_list_tag)) {
2494 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2495 		return (ENOMEM);
2496         }
2497 
2498 	for (i = 0; i < num_chains; i++) {
2499 		if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2500 		    BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2501 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
2502 				__func__, __LINE__);
2503 			return (ENOMEM);
2504 		}
2505 
2506 		bzero(sc->chain_sgl_list[i].buf, sz);
2507 		bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2508 		    mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, 0);
2509 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2510 		    __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2511 	}
2512 
2513 	sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2514 
2515 	sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2516 	if (!sc->chain_bitmap) {
2517 		mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2518 		retval = -1;
2519 		goto out_failed;
2520 	}
2521 	return retval;
2522 
2523 out_failed:
2524 	for (i = 0; i < num_chains; i++) {
2525 		if (sc->chain_sgl_list[i].buf_phys != 0)
2526 			bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2527 		if (sc->chain_sgl_list[i].buf != NULL)
2528 			bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2529 	}
2530 	if (sc->chain_sgl_list_tag != NULL)
2531 		bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2532 	return retval;
2533 }
2534 
2535 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2536 {
2537 	int retval = 0;
2538 
2539 	if (!sc->pel_cmds.reply) {
2540 		sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2541 		if (!sc->pel_cmds.reply) {
2542 			printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2543 			    sc->name);
2544 			goto out_failed;
2545 		}
2546 	}
2547 
2548 	if (!sc->pel_abort_cmd.reply) {
2549 		sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2550 		if (!sc->pel_abort_cmd.reply) {
2551 			printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2552 			    sc->name);
2553 			goto out_failed;
2554 		}
2555 	}
2556 
2557 	if (!sc->pel_seq_number) {
2558 		sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2559 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,   /* parent */
2560 				 4, 0,                           /* alignment, boundary */
2561 				 BUS_SPACE_MAXADDR_32BIT,        /* lowaddr */
2562 				 BUS_SPACE_MAXADDR,              /* highaddr */
2563 				 NULL, NULL,                     /* filter, filterarg */
2564 				 sc->pel_seq_number_sz,		 /* maxsize */
2565 				 1,                              /* nsegments */
2566 				 sc->pel_seq_number_sz,          /* maxsegsize */
2567 				 0,                              /* flags */
2568 				 NULL, NULL,                     /* lockfunc, lockarg */
2569 				 &sc->pel_seq_num_dmatag)) {
2570 			 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2571 			 retval = -ENOMEM;
2572 			 goto out_failed;
2573 		}
2574 
2575 		if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2576 		    BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2577 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2578 			retval = -ENOMEM;
2579 			goto out_failed;
2580 		}
2581 
2582 		bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2583 
2584 		bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2585 		    sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, 0);
2586 
2587 		if (!sc->pel_seq_number) {
2588 			printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2589 				__func__, __LINE__, sc->pel_seq_number_sz);
2590 			retval = -ENOMEM;
2591 			goto out_failed;
2592 		}
2593 	}
2594 
2595 out_failed:
2596 	return retval;
2597 }
2598 
2599 /**
2600  * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2601  * @sc: Adapter instance reference
2602  *
2603  * Return zero if the new IOCFacts is compatible with previous values
2604  * else return appropriate error
2605  */
2606 static int
2607 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2608 {
2609 	U16 dev_handle_bitmap_sz;
2610 	U8 *removepend_bitmap;
2611 
2612 	if (sc->facts.reply_sz > sc->reply_sz) {
2613 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2614 		    "Cannot increase reply size from %d to %d\n",
2615 		    sc->reply_sz, sc->reply_sz);
2616 		return -EPERM;
2617 	}
2618 
2619 	if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2620 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2621 		    "max io throttle group doesn't match old(%d), new(%d)\n",
2622 		    sc->num_io_throttle_group,
2623 		    sc->facts.max_io_throttle_group);
2624 		return -EPERM;
2625 	}
2626 
2627 	if (sc->facts.max_op_reply_q < sc->num_queues) {
2628 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2629 		    "Cannot reduce number of operational reply queues from %d to %d\n",
2630 		    sc->num_queues,
2631 		    sc->facts.max_op_reply_q);
2632 		return -EPERM;
2633 	}
2634 
2635 	if (sc->facts.max_op_req_q < sc->num_queues) {
2636 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2637 		    "Cannot reduce number of operational request queues from %d to %d\n",
2638 		    sc->num_queues, sc->facts.max_op_req_q);
2639 		return -EPERM;
2640 	}
2641 
2642 	dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2643 
2644 	if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2645 		removepend_bitmap = realloc(sc->removepend_bitmap,
2646 		    dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2647 
2648 		if (!removepend_bitmap) {
2649 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2650 			    "failed to increase removepend_bitmap sz from: %d to %d\n",
2651 			    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2652 			return -ENOMEM;
2653 		}
2654 
2655 		memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2656 		    dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2657 		sc->removepend_bitmap = removepend_bitmap;
2658 		mpi3mr_dprint(sc, MPI3MR_INFO,
2659 		    "increased dev_handle_bitmap_sz from %d to %d\n",
2660 		    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2661 		sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2662 	}
2663 
2664 	return 0;
2665 }
2666 
2667 /*
2668  * mpi3mr_initialize_ioc - Controller initialization
2669  * @dev: pointer to device struct
2670  *
2671  * This function allocates the controller wide resources and brings
2672  * the controller to operational state
2673  *
2674  * Return: 0 on success and proper error codes on failure
2675  */
2676 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2677 {
2678 	int retval = 0;
2679 	enum mpi3mr_iocstate ioc_state;
2680 	U64 ioc_info;
2681 	U32 ioc_status, ioc_control, i, timeout;
2682 	Mpi3IOCFactsData_t facts_data;
2683 	char str[32];
2684 	U32 size;
2685 
2686 	sc->cpu_count = mp_ncpus;
2687 
2688 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2689 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2690 	ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2691 
2692 	mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2693 	    "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2694 
2695         /*The timeout value is in 2sec unit, changing it to seconds*/
2696 	sc->ready_timeout =
2697                 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2698                     MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2699 
2700 	ioc_state = mpi3mr_get_iocstate(sc);
2701 
2702 	mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s   IOC ready timeout: %d\n",
2703 	    mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2704 
2705 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
2706 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
2707 		timeout = sc->ready_timeout * 10;
2708 		do {
2709 			DELAY(1000 * 100);
2710 		} while (--timeout);
2711 
2712 		ioc_state = mpi3mr_get_iocstate(sc);
2713 		mpi3mr_dprint(sc, MPI3MR_INFO,
2714 			"IOC in %s state after waiting for reset time\n",
2715 			mpi3mr_iocstate_name(ioc_state));
2716 	}
2717 
2718 	if (ioc_state == MRIOC_STATE_READY) {
2719                 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2720                 if (retval) {
2721                         mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2722                                 retval);
2723                 }
2724                 ioc_state = mpi3mr_get_iocstate(sc);
2725         }
2726 
2727         if (ioc_state != MRIOC_STATE_RESET) {
2728                 mpi3mr_print_fault_info(sc);
2729 		 mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2730                  retval = mpi3mr_issue_reset(sc,
2731                      MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2732                      MPI3MR_RESET_FROM_BRINGUP);
2733                 if (retval) {
2734                         mpi3mr_dprint(sc, MPI3MR_ERROR,
2735                             "%s :Failed to soft reset IOC, error 0x%d\n",
2736                             __func__, retval);
2737                         goto out_failed;
2738                 }
2739         }
2740 
2741 	ioc_state = mpi3mr_get_iocstate(sc);
2742 
2743         if (ioc_state != MRIOC_STATE_RESET) {
2744 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2745 		goto out_failed;
2746         }
2747 
2748 	retval = mpi3mr_setup_admin_qpair(sc);
2749 	if (retval) {
2750 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2751 		    retval);
2752 		goto out_failed;
2753 	}
2754 
2755 	retval = mpi3mr_bring_ioc_ready(sc);
2756 	if (retval) {
2757 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
2758 		    retval);
2759 		goto out_failed;
2760 	}
2761 
2762 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2763 		retval = mpi3mr_alloc_interrupts(sc, 1);
2764 		if (retval) {
2765 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2766 			    retval);
2767 			goto out_failed;
2768 		}
2769 
2770 		retval = mpi3mr_setup_irqs(sc);
2771 		if (retval) {
2772 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2773 			    retval);
2774 			goto out_failed;
2775 		}
2776 	}
2777 
2778 	mpi3mr_enable_interrupts(sc);
2779 
2780 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2781 		mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2782 		mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2783 		mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2784 		mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2785 		mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2786 		mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2787 		mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2788 //		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_SPIN);
2789 		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2790 		mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2791 		mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2792 
2793 		mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2794 		sc->init_cmds.reply = NULL;
2795 		sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2796 		sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2797 		sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2798 
2799 		mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2800 		sc->ioctl_cmds.reply = NULL;
2801 		sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2802 		sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2803 		sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2804 
2805 		mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2806 		sc->pel_abort_cmd.reply = NULL;
2807 		sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2808 		sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2809 		sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2810 
2811 		mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2812 		sc->host_tm_cmds.reply = NULL;
2813 		sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2814 		sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2815 		sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2816 
2817 		TAILQ_INIT(&sc->cmd_list_head);
2818 		TAILQ_INIT(&sc->event_list);
2819 		TAILQ_INIT(&sc->delayed_rmhs_list);
2820 		TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2821 
2822 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2823 			snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2824 			mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2825 			sc->dev_rmhs_cmds[i].reply = NULL;
2826 			sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2827 			sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2828 			sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2829 							    + i;
2830 		}
2831 	}
2832 
2833 	retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2834 	if (retval) {
2835 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
2836 		    retval);
2837 		goto out_failed;
2838 	}
2839 
2840 	retval = mpi3mr_process_factsdata(sc, &facts_data);
2841 	if (retval) {
2842 		mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
2843 		    retval);
2844 		goto out_failed;
2845 	}
2846 
2847 	sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2848 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2849 
2850 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2851 		retval = mpi3mr_validate_fw_update(sc);
2852 		if (retval)
2853 			goto out_failed;
2854 	} else {
2855 		sc->reply_sz = sc->facts.reply_sz;
2856 	}
2857 
2858 
2859 	mpi3mr_display_ioc_info(sc);
2860 
2861 	retval = mpi3mr_reply_alloc(sc);
2862 	if (retval) {
2863 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
2864 		    retval);
2865 		goto out_failed;
2866 	}
2867 
2868 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2869 		retval = mpi3mr_alloc_chain_bufs(sc);
2870 		if (retval) {
2871 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
2872 			    retval);
2873 			goto out_failed;
2874 		}
2875 	}
2876 
2877 	retval = mpi3mr_issue_iocinit(sc);
2878 	if (retval) {
2879 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
2880 		    retval);
2881 		goto out_failed;
2882 	}
2883 
2884 	mpi3mr_print_fw_pkg_ver(sc);
2885 
2886 	sc->reply_free_q_host_index = sc->num_reply_bufs;
2887 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2888 		sc->reply_free_q_host_index);
2889 
2890 	sc->sense_buf_q_host_index = sc->num_sense_bufs;
2891 
2892 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2893 		sc->sense_buf_q_host_index);
2894 
2895 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2896 		retval = mpi3mr_alloc_interrupts(sc, 0);
2897 		if (retval) {
2898 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
2899 			    retval);
2900 			goto out_failed;
2901 		}
2902 
2903 		retval = mpi3mr_setup_irqs(sc);
2904 		if (retval) {
2905 			printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
2906 			    sc->name, retval);
2907 			goto out_failed;
2908 		}
2909 
2910 		mpi3mr_enable_interrupts(sc);
2911 
2912 	} else
2913 		mpi3mr_enable_interrupts(sc);
2914 
2915 	retval = mpi3mr_create_op_queues(sc);
2916 
2917 	if (retval) {
2918 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
2919 		    retval);
2920 		goto out_failed;
2921 	}
2922 
2923 	if (!sc->throttle_groups && sc->num_io_throttle_group) {
2924 		mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
2925 		size = sizeof(struct mpi3mr_throttle_group_info);
2926 		sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
2927 					  malloc(sc->num_io_throttle_group *
2928 					      size, M_MPI3MR, M_NOWAIT | M_ZERO);
2929 		if (!sc->throttle_groups)
2930 			goto out_failed;
2931 	}
2932 
2933 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2934 		mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
2935 		retval = mpi3mr_register_events(sc);
2936 		if (retval) {
2937 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
2938 			    retval);
2939 			goto out_failed;
2940 		}
2941 
2942 		mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
2943 		retval = mpi3mr_issue_port_enable(sc, 0);
2944 		if (retval) {
2945 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
2946 			    retval);
2947 			goto out_failed;
2948 		}
2949 	}
2950 	retval = mpi3mr_pel_alloc(sc);
2951 	if (retval) {
2952 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
2953 		    retval);
2954 		goto out_failed;
2955 	}
2956 
2957 	return retval;
2958 
2959 out_failed:
2960 	retval = -1;
2961 	return retval;
2962 }
2963 
2964 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
2965     struct mpi3mr_drvr_cmd *drvrcmd)
2966 {
2967 	drvrcmd->state = MPI3MR_CMD_NOTUSED;
2968 	drvrcmd->callback = NULL;
2969 	printf(IOCNAME "Completing Port Enable Request\n", sc->name);
2970 	sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
2971 	mpi3mr_startup_decrement(sc->cam_sc);
2972 }
2973 
2974 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
2975 {
2976 	Mpi3PortEnableRequest_t pe_req;
2977 	int retval = 0;
2978 
2979 	memset(&pe_req, 0, sizeof(pe_req));
2980 	mtx_lock(&sc->init_cmds.completion.lock);
2981 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2982 		retval = -1;
2983 		printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
2984 		mtx_unlock(&sc->init_cmds.completion.lock);
2985 		goto out;
2986 	}
2987 
2988 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2989 
2990 	if (async) {
2991 		sc->init_cmds.is_waiting = 0;
2992 		sc->init_cmds.callback = mpi3mr_port_enable_complete;
2993 	} else {
2994 		sc->init_cmds.is_waiting = 1;
2995 		sc->init_cmds.callback = NULL;
2996 		init_completion(&sc->init_cmds.completion);
2997 	}
2998 	pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2999 	pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
3000 
3001 	printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3002 	retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3003 	if (retval) {
3004 		printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3005 		    sc->name);
3006 		goto out_unlock;
3007 	}
3008 
3009 	if (!async) {
3010 		wait_for_completion_timeout(&sc->init_cmds.completion,
3011 		    MPI3MR_PORTENABLE_TIMEOUT);
3012 		if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3013 			printf(IOCNAME "Issue PortEnable: command timed out\n",
3014 			    sc->name);
3015 			retval = -1;
3016 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3017 			goto out_unlock;
3018 		}
3019 		mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3020 	}
3021 out_unlock:
3022 	mtx_unlock(&sc->init_cmds.completion.lock);
3023 
3024 out:
3025 	return retval;
3026 }
3027 
3028 void
3029 mpi3mr_watchdog_thread(void *arg)
3030 {
3031 	struct mpi3mr_softc *sc;
3032 	enum mpi3mr_iocstate ioc_state;
3033 	U32 fault, host_diagnostic, ioc_status;
3034 
3035 	sc = (struct mpi3mr_softc *)arg;
3036 
3037 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3038 
3039 	sc->watchdog_thread_active = 1;
3040 	mtx_lock(&sc->reset_mutex);
3041 	for (;;) {
3042 		/* Sleep for 1 second and check the queue status */
3043 		msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3044 		    "mpi3mr_watchdog", 1 * hz);
3045 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3046 		    (sc->unrecoverable == 1)) {
3047 			mpi3mr_dprint(sc, MPI3MR_INFO,
3048 			    "Exit due to %s from %s\n",
3049 			   sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3050 			    "Hardware critical error", __func__);
3051 			break;
3052 		}
3053 
3054 		if ((sc->prepare_for_reset) &&
3055 		    ((sc->prepare_for_reset_timeout_counter++) >=
3056 		     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3057 			mpi3mr_soft_reset_handler(sc,
3058 			    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3059 			continue;
3060 		}
3061 
3062 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3063 
3064 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3065 			mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3066 			continue;
3067 		}
3068 
3069 		ioc_state = mpi3mr_get_iocstate(sc);
3070 		if (ioc_state == MRIOC_STATE_FAULT) {
3071 			fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3072 			    MPI3_SYSIF_FAULT_CODE_MASK;
3073 
3074 			host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3075 			if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3076 				if (!sc->diagsave_timeout) {
3077 					mpi3mr_print_fault_info(sc);
3078 					mpi3mr_dprint(sc, MPI3MR_INFO,
3079 						"diag save in progress\n");
3080 				}
3081 				if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3082 					continue;
3083 			}
3084 			mpi3mr_print_fault_info(sc);
3085 			sc->diagsave_timeout = 0;
3086 
3087 			if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3088 			    (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3089 				mpi3mr_dprint(sc, MPI3MR_INFO,
3090 				    "Controller requires system power cycle or complete reset is needed,"
3091 				    "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3092 				sc->unrecoverable = 1;
3093 				goto out;
3094 			}
3095 			if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3096 			    || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3097 			    || (sc->reset_in_progress))
3098 				goto out;
3099 			if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3100 				mpi3mr_soft_reset_handler(sc,
3101 				    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3102 			else
3103 				mpi3mr_soft_reset_handler(sc,
3104 				    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3105 
3106 		}
3107 
3108 		if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3109 			mpi3mr_print_fault_info(sc);
3110 			mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3111 		}
3112 	}
3113 out:
3114 	mtx_unlock(&sc->reset_mutex);
3115 	sc->watchdog_thread_active = 0;
3116 	mpi3mr_kproc_exit(0);
3117 }
3118 
3119 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3120 	Mpi3EventNotificationReply_t *event_rep)
3121 {
3122 	char *desc = NULL;
3123 	U16 event;
3124 
3125 	event = event_rep->Event;
3126 
3127 	switch (event) {
3128 	case MPI3_EVENT_LOG_DATA:
3129 		desc = "Log Data";
3130 		break;
3131 	case MPI3_EVENT_CHANGE:
3132 		desc = "Event Change";
3133 		break;
3134 	case MPI3_EVENT_GPIO_INTERRUPT:
3135 		desc = "GPIO Interrupt";
3136 		break;
3137 	case MPI3_EVENT_CABLE_MGMT:
3138 		desc = "Cable Management";
3139 		break;
3140 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3141 		desc = "Energy Pack Change";
3142 		break;
3143 	case MPI3_EVENT_DEVICE_ADDED:
3144 	{
3145 		Mpi3DevicePage0_t *event_data =
3146 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3147 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3148 			event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3149 		return;
3150 	}
3151 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3152 	{
3153 		Mpi3DevicePage0_t *event_data =
3154 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3155 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3156 			event_data->DevHandle, event_data->DeviceForm);
3157 		return;
3158 	}
3159 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3160 	{
3161 		Mpi3EventDataDeviceStatusChange_t *event_data =
3162 		    (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3163 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3164 			event_data->DevHandle, event_data->ReasonCode);
3165 		return;
3166 	}
3167 	case MPI3_EVENT_SAS_DISCOVERY:
3168 	{
3169 		Mpi3EventDataSasDiscovery_t *event_data =
3170 		    (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3171 		mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3172 			(event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3173 		    "start" : "stop");
3174 		if (event_data->DiscoveryStatus &&
3175 		    (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3176 			printf("discovery_status(0x%08x)",
3177 			    event_data->DiscoveryStatus);
3178 
3179 		}
3180 
3181 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3182 			printf("\n");
3183 		return;
3184 	}
3185 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3186 		desc = "SAS Broadcast Primitive";
3187 		break;
3188 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3189 		desc = "SAS Notify Primitive";
3190 		break;
3191 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3192 		desc = "SAS Init Device Status Change";
3193 		break;
3194 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3195 		desc = "SAS Init Table Overflow";
3196 		break;
3197 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3198 		desc = "SAS Topology Change List";
3199 		break;
3200 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3201 		desc = "Enclosure Device Status Change";
3202 		break;
3203 	case MPI3_EVENT_HARD_RESET_RECEIVED:
3204 		desc = "Hard Reset Received";
3205 		break;
3206 	case MPI3_EVENT_SAS_PHY_COUNTER:
3207 		desc = "SAS PHY Counter";
3208 		break;
3209 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3210 		desc = "SAS Device Discovery Error";
3211 		break;
3212 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3213 		desc = "PCIE Topology Change List";
3214 		break;
3215 	case MPI3_EVENT_PCIE_ENUMERATION:
3216 	{
3217 		Mpi3EventDataPcieEnumeration_t *event_data =
3218 			(Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3219 		mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3220 			(event_data->ReasonCode ==
3221 			    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3222 			    "stop");
3223 		if (event_data->EnumerationStatus)
3224 			mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3225 			   event_data->EnumerationStatus);
3226 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3227 			printf("\n");
3228 		return;
3229 	}
3230 	case MPI3_EVENT_PREPARE_FOR_RESET:
3231 		desc = "Prepare For Reset";
3232 		break;
3233 	}
3234 
3235 	if (!desc)
3236 		return;
3237 
3238 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3239 }
3240 
3241 struct mpi3mr_target *
3242 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3243     uint16_t per_id)
3244 {
3245 	struct mpi3mr_target *target = NULL;
3246 
3247 	mtx_lock_spin(&cam_sc->sc->target_lock);
3248 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3249 		if (target->per_id == per_id)
3250 			break;
3251 	}
3252 
3253 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3254 	return target;
3255 }
3256 
3257 struct mpi3mr_target *
3258 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3259     uint16_t handle)
3260 {
3261 	struct mpi3mr_target *target = NULL;
3262 
3263 	mtx_lock_spin(&cam_sc->sc->target_lock);
3264 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3265 		if (target->dev_handle == handle)
3266 			break;
3267 
3268 	}
3269 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3270 	return target;
3271 }
3272 
3273 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3274     struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3275     bool is_added)
3276 {
3277 	U16 flags = 0;
3278 
3279 	tgtdev->per_id = (dev_pg0->PersistentID);
3280 	tgtdev->dev_handle = (dev_pg0->DevHandle);
3281 	tgtdev->dev_type = dev_pg0->DeviceForm;
3282 	tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3283 	tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3284 	tgtdev->slot = (dev_pg0->Slot);
3285 	tgtdev->qdepth = (dev_pg0->QueueDepth);
3286 	tgtdev->wwid = (dev_pg0->WWID);
3287 
3288 	flags = (dev_pg0->Flags);
3289 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3290 	if (is_added == true)
3291 		tgtdev->io_throttle_enabled =
3292 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3293 
3294 	switch (dev_pg0->AccessStatus) {
3295 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3296 	case MPI3_DEVICE0_ASTATUS_PREPARE:
3297 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3298 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3299 		break;
3300 	default:
3301 		tgtdev->is_hidden = 1;
3302 		break;
3303 	}
3304 
3305 	switch (tgtdev->dev_type) {
3306 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
3307 	{
3308 		Mpi3Device0SasSataFormat_t *sasinf =
3309 		    &dev_pg0->DeviceSpecific.SasSataFormat;
3310 		U16 dev_info = (sasinf->DeviceInfo);
3311 		tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3312 		tgtdev->dev_spec.sassata_inf.sas_address =
3313 		    (sasinf->SASAddress);
3314 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3315 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3316 			tgtdev->is_hidden = 1;
3317 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3318 			    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3319 			tgtdev->is_hidden = 1;
3320 		break;
3321 	}
3322 	case MPI3_DEVICE_DEVFORM_PCIE:
3323 	{
3324 		Mpi3Device0PcieFormat_t *pcieinf =
3325 		    &dev_pg0->DeviceSpecific.PcieFormat;
3326 		U16 dev_info = (pcieinf->DeviceInfo);
3327 
3328 		tgtdev->q_depth = dev_pg0->QueueDepth;
3329 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3330 		tgtdev->dev_spec.pcie_inf.capb =
3331 		    (pcieinf->Capabilities);
3332 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3333 		if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3334 			tgtdev->dev_spec.pcie_inf.mdts =
3335 			    (pcieinf->MaximumDataTransferSize);
3336 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3337 			tgtdev->dev_spec.pcie_inf.reset_to =
3338 				pcieinf->ControllerResetTO;
3339 			tgtdev->dev_spec.pcie_inf.abort_to =
3340 				pcieinf->NVMeAbortTO;
3341 		}
3342 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3343 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3344 
3345 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3346 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3347 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3348 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3349 			tgtdev->is_hidden = 1;
3350 
3351 		break;
3352 	}
3353 	case MPI3_DEVICE_DEVFORM_VD:
3354 	{
3355 		Mpi3Device0VdFormat_t *vdinf =
3356 		    &dev_pg0->DeviceSpecific.VdFormat;
3357 		struct mpi3mr_throttle_group_info *tg = NULL;
3358 
3359 		tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3360 		if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3361 			tgtdev->is_hidden = 1;
3362 		tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3363 		tgtdev->dev_spec.vol_inf.tg_high =
3364 			vdinf->IOThrottleGroupHigh * 2048;
3365 		tgtdev->dev_spec.vol_inf.tg_low =
3366 			vdinf->IOThrottleGroupLow * 2048;
3367 		if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3368 			tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3369 			tg->id = vdinf->IOThrottleGroup;
3370 			tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3371 			tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3372 			if (is_added == true)
3373 				tg->fw_qd = tgtdev->q_depth;
3374 			tg->modified_qd = tgtdev->q_depth;
3375 		}
3376 		tgtdev->dev_spec.vol_inf.tg = tg;
3377 		tgtdev->throttle_group = tg;
3378 		break;
3379 	}
3380 	default:
3381 		goto out;
3382 	}
3383 
3384 out:
3385 	return;
3386 }
3387 
3388 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3389     Mpi3DevicePage0_t *dev_pg0)
3390 {
3391 	int retval = 0;
3392 	struct mpi3mr_target *target = NULL;
3393 	U16 per_id = 0;
3394 
3395 	per_id = dev_pg0->PersistentID;
3396 
3397 	mtx_lock_spin(&sc->target_lock);
3398 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3399 		if (target->per_id == per_id) {
3400 			target->state = MPI3MR_DEV_CREATED;
3401 			break;
3402 		}
3403 	}
3404 	mtx_unlock_spin(&sc->target_lock);
3405 
3406 	if (target) {
3407 			mpi3mr_update_device(sc, target, dev_pg0, true);
3408 	} else {
3409 			target = malloc(sizeof(*target), M_MPI3MR,
3410 				 M_NOWAIT | M_ZERO);
3411 
3412 			if (target == NULL) {
3413 				retval = -1;
3414 				goto out;
3415 			}
3416 
3417 			target->exposed_to_os = 0;
3418 			mpi3mr_update_device(sc, target, dev_pg0, true);
3419 			mtx_lock_spin(&sc->target_lock);
3420 			TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3421 			target->state = MPI3MR_DEV_CREATED;
3422 			mtx_unlock_spin(&sc->target_lock);
3423 	}
3424 out:
3425 	return retval;
3426 }
3427 
3428 /**
3429  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3430  * @sc: Adapter instance reference
3431  * @drv_cmd: Internal command tracker
3432  *
3433  * Issues a target reset TM to the firmware from the device
3434  * removal TM pend list or retry the removal handshake sequence
3435  * based on the IOU control request IOC status.
3436  *
3437  * Return: Nothing
3438  */
3439 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3440 	struct mpi3mr_drvr_cmd *drv_cmd)
3441 {
3442 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3443 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3444 
3445 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3446 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3447 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3448 	    drv_cmd->ioc_loginfo);
3449 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3450 		if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3451 			drv_cmd->retry_count++;
3452 			mpi3mr_dprint(sc, MPI3MR_EVENT,
3453 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3454 			    __func__, drv_cmd->dev_handle,
3455 			    drv_cmd->retry_count);
3456 			mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3457 			    drv_cmd, drv_cmd->iou_rc);
3458 			return;
3459 		}
3460 		mpi3mr_dprint(sc, MPI3MR_ERROR,
3461 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3462 		    __func__, drv_cmd->dev_handle);
3463 	} else {
3464 		mpi3mr_dprint(sc, MPI3MR_INFO,
3465 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3466 		    __func__, drv_cmd->dev_handle);
3467 		mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3468 	}
3469 
3470 	if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3471 		delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3472 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3473 		drv_cmd->retry_count = 0;
3474 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3475 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3476 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3477 		    __func__, drv_cmd->dev_handle);
3478 		mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3479 		    drv_cmd->iou_rc);
3480 		TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3481 		free(delayed_dev_rmhs, M_MPI3MR);
3482 		return;
3483 	}
3484 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3485 	drv_cmd->callback = NULL;
3486 	drv_cmd->retry_count = 0;
3487 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3488 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3489 }
3490 
3491 /**
3492  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3493  * @sc: Adapter instance reference
3494  * @drv_cmd: Internal command tracker
3495  *
3496  * Issues a target reset TM to the firmware from the device
3497  * removal TM pend list or issue IO Unit control request as
3498  * part of device removal or hidden acknowledgment handshake.
3499  *
3500  * Return: Nothing
3501  */
3502 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3503 	struct mpi3mr_drvr_cmd *drv_cmd)
3504 {
3505 	Mpi3IoUnitControlRequest_t iou_ctrl;
3506 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3507 	Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3508 	int retval;
3509 
3510 	if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3511 		tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3512 
3513 	if (tm_reply)
3514 		printf(IOCNAME
3515 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3516 		    sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3517 		    drv_cmd->ioc_loginfo,
3518 		    le32toh(tm_reply->TerminationCount));
3519 
3520 	printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3521 	    sc->name, drv_cmd->dev_handle, cmd_idx);
3522 
3523 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3524 
3525 	drv_cmd->state = MPI3MR_CMD_PENDING;
3526 	drv_cmd->is_waiting = 0;
3527 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3528 	iou_ctrl.Operation = drv_cmd->iou_rc;
3529 	iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3530 	iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3531 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3532 
3533 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3534 	if (retval) {
3535 		printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3536 		    sc->name);
3537 		goto out_failed;
3538 	}
3539 
3540 	return;
3541 out_failed:
3542 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3543 	drv_cmd->callback = NULL;
3544 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3545 	drv_cmd->retry_count = 0;
3546 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3547 }
3548 
3549 /**
3550  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3551  * @sc: Adapter instance reference
3552  * @handle: Device handle
3553  * @cmdparam: Internal command tracker
3554  * @iou_rc: IO Unit reason code
3555  *
3556  * Issues a target reset TM to the firmware or add it to a pend
3557  * list as part of device removal or hidden acknowledgment
3558  * handshake.
3559  *
3560  * Return: Nothing
3561  */
3562 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3563 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3564 {
3565 	Mpi3SCSITaskMgmtRequest_t tm_req;
3566 	int retval = 0;
3567 	U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3568 	U8 retrycount = 5;
3569 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3570 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3571 	struct mpi3mr_target *tgtdev = NULL;
3572 
3573 	mtx_lock_spin(&sc->target_lock);
3574 	TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3575 		if ((tgtdev->dev_handle == handle) &&
3576 		    (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) {
3577 			tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
3578 			break;
3579 		}
3580 	}
3581 	mtx_unlock_spin(&sc->target_lock);
3582 
3583 	if (drv_cmd)
3584 		goto issue_cmd;
3585 	do {
3586 		cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3587 		    MPI3MR_NUM_DEVRMCMD);
3588 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3589 			if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3590 				break;
3591 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
3592 		}
3593 	} while (retrycount--);
3594 
3595 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3596 		delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3597 		     M_ZERO|M_NOWAIT);
3598 
3599 		if (!delayed_dev_rmhs)
3600 			return;
3601 		delayed_dev_rmhs->handle = handle;
3602 		delayed_dev_rmhs->iou_rc = iou_rc;
3603 		TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3604 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3605 		    __func__, handle);
3606 
3607 
3608 		return;
3609 	}
3610 	drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3611 
3612 issue_cmd:
3613 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3614 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3615 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3616 	    __func__, handle, cmd_idx);
3617 
3618 	memset(&tm_req, 0, sizeof(tm_req));
3619 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3620 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3621 		goto out;
3622 	}
3623 	drv_cmd->state = MPI3MR_CMD_PENDING;
3624 	drv_cmd->is_waiting = 0;
3625 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3626 	drv_cmd->dev_handle = handle;
3627 	drv_cmd->iou_rc = iou_rc;
3628 	tm_req.DevHandle = htole16(handle);
3629 	tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3630 	tm_req.HostTag = htole16(drv_cmd->host_tag);
3631 	tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3632 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3633 
3634 	mpi3mr_set_bit(handle, sc->removepend_bitmap);
3635 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3636 	if (retval) {
3637 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3638 		    __func__);
3639 		goto out_failed;
3640 	}
3641 out:
3642 	return;
3643 out_failed:
3644 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3645 	drv_cmd->callback = NULL;
3646 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3647 	drv_cmd->retry_count = 0;
3648 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3649 }
3650 
3651 /**
3652  * mpi3mr_complete_evt_ack - Event ack request completion
3653  * @sc: Adapter instance reference
3654  * @drv_cmd: Internal command tracker
3655  *
3656  * This is the completion handler for non blocking event
3657  * acknowledgment sent to the firmware and this will issue any
3658  * pending event acknowledgment request.
3659  *
3660  * Return: Nothing
3661  */
3662 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3663 	struct mpi3mr_drvr_cmd *drv_cmd)
3664 {
3665 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3666 	struct delayed_evtack_node *delayed_evtack = NULL;
3667 
3668 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3669 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3670 		    "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3671 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3672 		    drv_cmd->ioc_loginfo);
3673 	}
3674 
3675 	if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3676 		delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3677 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3678 		    "%s: processing delayed event ack for event %d\n",
3679 		    __func__, delayed_evtack->event);
3680 		mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3681 		    delayed_evtack->event_ctx);
3682 		TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3683 		free(delayed_evtack, M_MPI3MR);
3684 		return;
3685 	}
3686 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3687 	drv_cmd->callback = NULL;
3688 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3689 }
3690 
3691 /**
3692  * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3693  * @sc: Adapter instance reference
3694  * @event: MPI3 event id
3695  * @cmdparam: Internal command tracker
3696  * @event_ctx: Event context
3697  *
3698  * Issues event acknowledgment request to the firmware if there
3699  * is a free command to send the event ack else it to a pend
3700  * list so that it will be processed on a completion of a prior
3701  * event acknowledgment .
3702  *
3703  * Return: Nothing
3704  */
3705 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3706 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3707 {
3708 	Mpi3EventAckRequest_t evtack_req;
3709 	int retval = 0;
3710 	U8 retrycount = 5;
3711 	U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3712 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3713 	struct delayed_evtack_node *delayed_evtack = NULL;
3714 
3715 	if (drv_cmd)
3716 		goto issue_cmd;
3717 	do {
3718 		cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3719 		    MPI3MR_NUM_EVTACKCMD);
3720 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3721 			if (!mpi3mr_test_and_set_bit(cmd_idx,
3722 			    sc->evtack_cmds_bitmap))
3723 				break;
3724 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
3725 		}
3726 	} while (retrycount--);
3727 
3728 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3729 		delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3730 		     M_ZERO | M_NOWAIT);
3731 		if (!delayed_evtack)
3732 			return;
3733 		delayed_evtack->event = event;
3734 		delayed_evtack->event_ctx = event_ctx;
3735 		TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3736 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3737 		    __func__, event);
3738 		return;
3739 	}
3740 	drv_cmd = &sc->evtack_cmds[cmd_idx];
3741 
3742 issue_cmd:
3743 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3744 
3745 	memset(&evtack_req, 0, sizeof(evtack_req));
3746 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3747 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3748 		goto out;
3749 	}
3750 	drv_cmd->state = MPI3MR_CMD_PENDING;
3751 	drv_cmd->is_waiting = 0;
3752 	drv_cmd->callback = mpi3mr_complete_evt_ack;
3753 	evtack_req.HostTag = htole16(drv_cmd->host_tag);
3754 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
3755 	evtack_req.Event = event;
3756 	evtack_req.EventContext = htole32(event_ctx);
3757 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
3758 	    sizeof(evtack_req));
3759 
3760 	if (retval) {
3761 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
3762 		goto out_failed;
3763 	}
3764 out:
3765 	return;
3766 out_failed:
3767 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3768 	drv_cmd->callback = NULL;
3769 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3770 }
3771 
3772 /*
3773  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
3774  * @sc: Adapter instance reference
3775  * @event_reply: Event data
3776  *
3777  * Checks for the reason code and based on that either block I/O
3778  * to device, or unblock I/O to the device, or start the device
3779  * removal handshake with reason as remove with the firmware for
3780  * PCIe devices.
3781  *
3782  * Return: Nothing
3783  */
3784 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
3785 	Mpi3EventNotificationReply_t *event_reply)
3786 {
3787 	Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
3788 	    (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
3789 	int i;
3790 	U16 handle;
3791 	U8 reason_code;
3792 	struct mpi3mr_target *tgtdev = NULL;
3793 
3794 	for (i = 0; i < topo_evt->NumEntries; i++) {
3795 		handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
3796 		if (!handle)
3797 			continue;
3798 		reason_code = topo_evt->PortEntry[i].PortStatus;
3799 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3800 		switch (reason_code) {
3801 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
3802 			if (tgtdev) {
3803 				tgtdev->dev_removed = 1;
3804 				tgtdev->dev_removedelay = 0;
3805 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3806 			}
3807 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3808 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3809 			break;
3810 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
3811 			if (tgtdev) {
3812 				tgtdev->dev_removedelay = 1;
3813 				mpi3mr_atomic_inc(&tgtdev->block_io);
3814 			}
3815 			break;
3816 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
3817 			if (tgtdev &&
3818 			    tgtdev->dev_removedelay) {
3819 				tgtdev->dev_removedelay = 0;
3820 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3821 					mpi3mr_atomic_dec(&tgtdev->block_io);
3822 			}
3823 			break;
3824 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
3825 		default:
3826 			break;
3827 		}
3828 	}
3829 }
3830 
3831 /**
3832  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
3833  * @sc: Adapter instance reference
3834  * @event_reply: Event data
3835  *
3836  * Checks for the reason code and based on that either block I/O
3837  * to device, or unblock I/O to the device, or start the device
3838  * removal handshake with reason as remove with the firmware for
3839  * SAS/SATA devices.
3840  *
3841  * Return: Nothing
3842  */
3843 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
3844 	Mpi3EventNotificationReply_t *event_reply)
3845 {
3846 	Mpi3EventDataSasTopologyChangeList_t *topo_evt =
3847 	    (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
3848 	int i;
3849 	U16 handle;
3850 	U8 reason_code;
3851 	struct mpi3mr_target *tgtdev = NULL;
3852 
3853 	for (i = 0; i < topo_evt->NumEntries; i++) {
3854 		handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
3855 		if (!handle)
3856 			continue;
3857 		reason_code = topo_evt->PhyEntry[i].Status &
3858 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
3859 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3860 		switch (reason_code) {
3861 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
3862 			if (tgtdev) {
3863 				tgtdev->dev_removed = 1;
3864 				tgtdev->dev_removedelay = 0;
3865 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3866 			}
3867 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3868 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3869 			break;
3870 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
3871 			if (tgtdev) {
3872 				tgtdev->dev_removedelay = 1;
3873 				mpi3mr_atomic_inc(&tgtdev->block_io);
3874 			}
3875 			break;
3876 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
3877 			if (tgtdev &&
3878 			    tgtdev->dev_removedelay) {
3879 				tgtdev->dev_removedelay = 0;
3880 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3881 					mpi3mr_atomic_dec(&tgtdev->block_io);
3882 			}
3883 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
3884 		default:
3885 			break;
3886 		}
3887 	}
3888 
3889 }
3890 /**
3891  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
3892  * @sc: Adapter instance reference
3893  * @event_reply: Event data
3894  *
3895  * Checks for the reason code and based on that either block I/O
3896  * to device, or unblock I/O to the device, or start the device
3897  * removal handshake with reason as remove/hide acknowledgment
3898  * with the firmware.
3899  *
3900  * Return: Nothing
3901  */
3902 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
3903 	Mpi3EventNotificationReply_t *event_reply)
3904 {
3905 	U16 dev_handle = 0;
3906 	U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
3907 	struct mpi3mr_target *tgtdev = NULL;
3908 	Mpi3EventDataDeviceStatusChange_t *evtdata =
3909 	    (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
3910 
3911 	dev_handle = le16toh(evtdata->DevHandle);
3912 
3913 	switch (evtdata->ReasonCode) {
3914 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
3915 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
3916 		block = 1;
3917 		break;
3918 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
3919 		delete = 1;
3920 		hide = 1;
3921 		break;
3922 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
3923 		uhide = 1;
3924 		break;
3925 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
3926 		delete = 1;
3927 		remove = 1;
3928 		break;
3929 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
3930 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
3931 		ublock = 1;
3932 		break;
3933 	default:
3934 		break;
3935 	}
3936 
3937 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
3938 
3939 	if (!tgtdev) {
3940 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
3941 		    __func__, dev_handle);
3942 		return;
3943 	}
3944 
3945 	if (block)
3946 		mpi3mr_atomic_inc(&tgtdev->block_io);
3947 
3948 	if (hide)
3949 		tgtdev->is_hidden = hide;
3950 
3951 	if (uhide) {
3952 		tgtdev->is_hidden = 0;
3953 		tgtdev->dev_removed = 0;
3954 	}
3955 
3956 	if (delete)
3957 		tgtdev->dev_removed = 1;
3958 
3959 	if (ublock) {
3960 		if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3961 			mpi3mr_atomic_dec(&tgtdev->block_io);
3962 	}
3963 
3964 	if (remove) {
3965 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3966 					MPI3_CTRL_OP_REMOVE_DEVICE);
3967 	}
3968 	if (hide)
3969 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3970 					MPI3_CTRL_OP_HIDDEN_ACK);
3971 }
3972 
3973 /**
3974  * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
3975  * @sc: Adapter instance reference
3976  * @event_reply: Event data
3977  *
3978  * Blocks and unblocks host level I/O based on the reason code
3979  *
3980  * Return: Nothing
3981  */
3982 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
3983 	Mpi3EventNotificationReply_t *event_reply)
3984 {
3985 	Mpi3EventDataPrepareForReset_t *evtdata =
3986 	    (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
3987 
3988 	if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
3989 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
3990 		    __func__);
3991 		if (sc->prepare_for_reset)
3992 			return;
3993 		sc->prepare_for_reset = 1;
3994 		sc->prepare_for_reset_timeout_counter = 0;
3995 	} else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
3996 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
3997 		    __func__);
3998 		sc->prepare_for_reset = 0;
3999 		sc->prepare_for_reset_timeout_counter = 0;
4000 	}
4001 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4002 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4003 		mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4004 		    le32toh(event_reply->EventContext));
4005 }
4006 
4007 /**
4008  * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4009  * @sc: Adapter instance reference
4010  * @event_reply: Event data
4011  *
4012  * Identifies the new shutdown timeout value and update.
4013  *
4014  * Return: Nothing
4015  */
4016 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4017 	Mpi3EventNotificationReply_t *event_reply)
4018 {
4019 	Mpi3EventDataEnergyPackChange_t *evtdata =
4020 	    (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4021 	U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4022 
4023 	if (shutdown_timeout <= 0) {
4024 		mpi3mr_dprint(sc, MPI3MR_ERROR,
4025 		    "%s :Invalid Shutdown Timeout received = %d\n",
4026 		    __func__, shutdown_timeout);
4027 		return;
4028 	}
4029 
4030 	mpi3mr_dprint(sc, MPI3MR_EVENT,
4031 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4032 	    __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4033 	sc->facts.shutdown_timeout = shutdown_timeout;
4034 }
4035 
4036 /**
4037  * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4038  * @sc: Adapter instance reference
4039  * @event_reply: Event data
4040  *
4041  * Displays Cable manegemt event details.
4042  *
4043  * Return: Nothing
4044  */
4045 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4046 	Mpi3EventNotificationReply_t *event_reply)
4047 {
4048 	Mpi3EventDataCableManagement_t *evtdata =
4049 	    (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4050 
4051 	switch (evtdata->Status) {
4052 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4053 	{
4054 		mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4055 		    "Devices connected to this cable are not detected.\n"
4056 		    "This cable requires %d mW of power.\n",
4057 		    evtdata->ReceptacleID,
4058 		    le32toh(evtdata->ActiveCablePowerRequirement));
4059 		break;
4060 	}
4061 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4062 	{
4063 		mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4064 		    evtdata->ReceptacleID);
4065 		break;
4066 	}
4067 	default:
4068 		break;
4069 	}
4070 }
4071 
4072 /**
4073  * mpi3mr_process_events - Event's toph-half handler
4074  * @sc: Adapter instance reference
4075  * @event_reply: Event data
4076  *
4077  * Top half of event processing.
4078  *
4079  * Return: Nothing
4080  */
4081 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4082     uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4083 {
4084 	U16 evt_type;
4085 	bool ack_req = 0, process_evt_bh = 0;
4086 	struct mpi3mr_fw_event_work *fw_event;
4087 	U16 sz;
4088 
4089 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4090 		goto out;
4091 
4092 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4093 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4094 		ack_req = 1;
4095 
4096 	evt_type = event_reply->Event;
4097 
4098 	switch (evt_type) {
4099 	case MPI3_EVENT_DEVICE_ADDED:
4100 	{
4101 		Mpi3DevicePage0_t *dev_pg0 =
4102 			(Mpi3DevicePage0_t *) event_reply->EventData;
4103 		if (mpi3mr_create_device(sc, dev_pg0))
4104 			mpi3mr_dprint(sc, MPI3MR_ERROR,
4105 			"%s :Failed to add device in the device add event\n",
4106 			__func__);
4107 		else
4108 			process_evt_bh = 1;
4109 		break;
4110 	}
4111 
4112 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4113 	{
4114 		process_evt_bh = 1;
4115 		mpi3mr_devstatuschg_evt_th(sc, event_reply);
4116 		break;
4117 	}
4118 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4119 	{
4120 		process_evt_bh = 1;
4121 		mpi3mr_sastopochg_evt_th(sc, event_reply);
4122 		break;
4123 	}
4124 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4125 	{
4126 		process_evt_bh = 1;
4127 		mpi3mr_pcietopochg_evt_th(sc, event_reply);
4128 		break;
4129 	}
4130 	case MPI3_EVENT_PREPARE_FOR_RESET:
4131 	{
4132 		mpi3mr_preparereset_evt_th(sc, event_reply);
4133 		ack_req = 0;
4134 		break;
4135 	}
4136 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
4137 	case MPI3_EVENT_LOG_DATA:
4138 	{
4139 		process_evt_bh = 1;
4140 		break;
4141 	}
4142 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
4143 	{
4144 		mpi3mr_energypackchg_evt_th(sc, event_reply);
4145 		break;
4146 	}
4147 	case MPI3_EVENT_CABLE_MGMT:
4148 	{
4149 		mpi3mr_cablemgmt_evt_th(sc, event_reply);
4150 		break;
4151 	}
4152 
4153 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4154 	case MPI3_EVENT_SAS_DISCOVERY:
4155 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4156 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4157 	case MPI3_EVENT_PCIE_ENUMERATION:
4158 		break;
4159 	default:
4160 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4161 		    __func__, evt_type);
4162 		break;
4163 	}
4164 
4165 	if (process_evt_bh || ack_req) {
4166 		fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4167 		     M_ZERO|M_NOWAIT);
4168 
4169 		if (!fw_event) {
4170 			printf("%s: allocate failed for fw_event\n", __func__);
4171 			return;
4172 		}
4173 
4174 		sz = le16toh(event_reply->EventDataLength) * 4;
4175 		fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4176 
4177 		if (!fw_event->event_data) {
4178 			printf("%s: allocate failed for event_data\n", __func__);
4179 			free(fw_event, M_MPI3MR);
4180 			return;
4181 		}
4182 
4183 		bcopy(event_reply->EventData, fw_event->event_data, sz);
4184 		fw_event->event = event_reply->Event;
4185 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4186 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4187 		    event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4188 		    sc->track_mapping_events)
4189 			sc->pending_map_events++;
4190 
4191 		/*
4192 		 * Events should be processed after Port enable is completed.
4193 		 */
4194 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4195 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4196 		    !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4197 			mpi3mr_startup_increment(sc->cam_sc);
4198 
4199 		fw_event->send_ack = ack_req;
4200 		fw_event->event_context = le32toh(event_reply->EventContext);
4201 		fw_event->event_data_size = sz;
4202 		fw_event->process_event = process_evt_bh;
4203 
4204 		mtx_lock(&sc->fwevt_lock);
4205 		TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4206 		taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4207 		mtx_unlock(&sc->fwevt_lock);
4208 
4209 	}
4210 out:
4211 	return;
4212 }
4213 
4214 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4215     Mpi3DefaultReply_t *def_reply)
4216 {
4217 	Mpi3EventNotificationReply_t *event_reply =
4218 		(Mpi3EventNotificationReply_t *)def_reply;
4219 
4220 	sc->change_count = event_reply->IOCChangeCount;
4221 	mpi3mr_display_event_data(sc, event_reply);
4222 
4223 	mpi3mr_process_events(sc, data, event_reply);
4224 }
4225 
4226 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4227     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4228 {
4229 	U16 reply_desc_type, host_tag = 0, idx;
4230 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4231 	U32 ioc_loginfo = 0;
4232 	Mpi3StatusReplyDescriptor_t *status_desc;
4233 	Mpi3AddressReplyDescriptor_t *addr_desc;
4234 	Mpi3SuccessReplyDescriptor_t *success_desc;
4235 	Mpi3DefaultReply_t *def_reply = NULL;
4236 	struct mpi3mr_drvr_cmd *cmdptr = NULL;
4237 	Mpi3SCSIIOReply_t *scsi_reply;
4238 	U8 *sense_buf = NULL;
4239 
4240 	*reply_dma = 0;
4241 	reply_desc_type = reply_desc->ReplyFlags &
4242 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4243 	switch (reply_desc_type) {
4244 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4245 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4246 		host_tag = status_desc->HostTag;
4247 		ioc_status = status_desc->IOCStatus;
4248 		if (ioc_status &
4249 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4250 			ioc_loginfo = status_desc->IOCLogInfo;
4251 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4252 		break;
4253 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4254 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4255 		*reply_dma = addr_desc->ReplyFrameAddress;
4256 		def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4257 		if (def_reply == NULL)
4258 			goto out;
4259 		host_tag = def_reply->HostTag;
4260 		ioc_status = def_reply->IOCStatus;
4261 		if (ioc_status &
4262 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4263 			ioc_loginfo = def_reply->IOCLogInfo;
4264 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4265 		if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4266 			scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4267 			sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4268 			    scsi_reply->SenseDataBufferAddress);
4269 		}
4270 		break;
4271 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4272 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4273 		host_tag = success_desc->HostTag;
4274 		break;
4275 	default:
4276 		break;
4277 	}
4278 	switch (host_tag) {
4279 	case MPI3MR_HOSTTAG_INITCMDS:
4280 		cmdptr = &sc->init_cmds;
4281 		break;
4282 	case MPI3MR_HOSTTAG_IOCTLCMDS:
4283 		cmdptr = &sc->ioctl_cmds;
4284 		break;
4285 	case MPI3MR_HOSTTAG_TMS:
4286 		cmdptr = &sc->host_tm_cmds;
4287 		wakeup((void *)&sc->tm_chan);
4288 		break;
4289 	case MPI3MR_HOSTTAG_PELABORT:
4290 		cmdptr = &sc->pel_abort_cmd;
4291 		break;
4292 	case MPI3MR_HOSTTAG_PELWAIT:
4293 		cmdptr = &sc->pel_cmds;
4294 		break;
4295 	case MPI3MR_HOSTTAG_INVALID:
4296 		if (def_reply && def_reply->Function ==
4297 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
4298 			mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4299 	default:
4300 		break;
4301 	}
4302 
4303 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4304 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4305 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4306 		cmdptr = &sc->dev_rmhs_cmds[idx];
4307 	}
4308 
4309 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4310 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4311 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4312 		cmdptr = &sc->evtack_cmds[idx];
4313 	}
4314 
4315 	if (cmdptr) {
4316 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
4317 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
4318 			cmdptr->ioc_loginfo = ioc_loginfo;
4319 			cmdptr->ioc_status = ioc_status;
4320 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
4321 			if (def_reply) {
4322 				cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4323 				memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4324 				    sc->reply_sz);
4325 			}
4326 			if (sense_buf && cmdptr->sensebuf) {
4327 				cmdptr->is_senseprst = 1;
4328 				memcpy(cmdptr->sensebuf, sense_buf,
4329 				    MPI3MR_SENSEBUF_SZ);
4330 			}
4331 			if (cmdptr->is_waiting) {
4332 				complete(&cmdptr->completion);
4333 				cmdptr->is_waiting = 0;
4334 			} else if (cmdptr->callback)
4335 				cmdptr->callback(sc, cmdptr);
4336 		}
4337 	}
4338 out:
4339 	if (sense_buf != NULL)
4340 		mpi3mr_repost_sense_buf(sc,
4341 		    scsi_reply->SenseDataBufferAddress);
4342 	return;
4343 }
4344 
4345 /*
4346  * mpi3mr_complete_admin_cmd:	ISR routine for admin commands
4347  * @sc:				Adapter's soft instance
4348  *
4349  * This function processes admin command completions.
4350  */
4351 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4352 {
4353 	U32 exp_phase = sc->admin_reply_ephase;
4354 	U32 adm_reply_ci = sc->admin_reply_ci;
4355 	U32 num_adm_reply = 0;
4356 	U64 reply_dma = 0;
4357 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4358 
4359 	mtx_lock_spin(&sc->admin_reply_lock);
4360 	if (sc->admin_in_use == false) {
4361 		sc->admin_in_use = true;
4362 		mtx_unlock_spin(&sc->admin_reply_lock);
4363 	} else {
4364 		mtx_unlock_spin(&sc->admin_reply_lock);
4365 		return 0;
4366 	}
4367 
4368 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4369 		adm_reply_ci;
4370 
4371 	if ((reply_desc->ReplyFlags &
4372 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4373 		mtx_lock_spin(&sc->admin_reply_lock);
4374 		sc->admin_in_use = false;
4375 		mtx_unlock_spin(&sc->admin_reply_lock);
4376 		return 0;
4377 	}
4378 
4379 	do {
4380 		sc->admin_req_ci = reply_desc->RequestQueueCI;
4381 		mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4382 		if (reply_dma)
4383 			mpi3mr_repost_reply_buf(sc, reply_dma);
4384 		num_adm_reply++;
4385 		if (++adm_reply_ci == sc->num_admin_replies) {
4386 			adm_reply_ci = 0;
4387 			exp_phase ^= 1;
4388 		}
4389 		reply_desc =
4390 			(Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4391 			    adm_reply_ci;
4392 		if ((reply_desc->ReplyFlags &
4393 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4394 			break;
4395 	} while (1);
4396 
4397 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4398 	sc->admin_reply_ci = adm_reply_ci;
4399 	sc->admin_reply_ephase = exp_phase;
4400 	mtx_lock_spin(&sc->admin_reply_lock);
4401 	sc->admin_in_use = false;
4402 	mtx_unlock_spin(&sc->admin_reply_lock);
4403 	return num_adm_reply;
4404 }
4405 
4406 static void
4407 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4408 {
4409 	mpi3mr_unmap_request(sc, cmd);
4410 
4411 	mtx_lock(&sc->mpi3mr_mtx);
4412 	if (cmd->callout_owner) {
4413 		callout_stop(&cmd->callout);
4414 		cmd->callout_owner = false;
4415 	}
4416 
4417 	if (sc->unrecoverable)
4418 		mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4419 
4420 	xpt_done(cmd->ccb);
4421 	cmd->ccb = NULL;
4422 	mtx_unlock(&sc->mpi3mr_mtx);
4423 	mpi3mr_release_command(cmd);
4424 }
4425 
4426 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4427     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4428 {
4429 	U16 reply_desc_type, host_tag = 0;
4430 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4431 	U32 ioc_loginfo = 0;
4432 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4433 	Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4434 	Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4435 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4436 	U8 *sense_buf = NULL;
4437 	U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4438 	U32 xfer_count = 0, sense_count =0, resp_data = 0;
4439 	struct mpi3mr_cmd *cm = NULL;
4440 	union ccb *ccb;
4441 	struct ccb_scsiio *csio;
4442 	struct mpi3mr_cam_softc *cam_sc;
4443 	U32 target_id;
4444 	U8 *scsi_cdb;
4445 	struct mpi3mr_target *target = NULL;
4446 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4447 	struct mpi3mr_throttle_group_info *tg = NULL;
4448 	U8 throttle_enabled_dev = 0;
4449 	static int ratelimit;
4450 
4451 	*reply_dma = 0;
4452 	reply_desc_type = reply_desc->ReplyFlags &
4453 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4454 	switch (reply_desc_type) {
4455 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4456 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4457 		host_tag = status_desc->HostTag;
4458 		ioc_status = status_desc->IOCStatus;
4459 		if (ioc_status &
4460 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4461 			ioc_loginfo = status_desc->IOCLogInfo;
4462 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4463 		break;
4464 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4465 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4466 		*reply_dma = addr_desc->ReplyFrameAddress;
4467 		scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4468 		    *reply_dma);
4469 		if (scsi_reply == NULL) {
4470 			mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4471 			    "this shouldn't happen, reply_desc: %p\n",
4472 			    reply_desc);
4473 			goto out;
4474 		}
4475 
4476 		host_tag = scsi_reply->HostTag;
4477 		ioc_status = scsi_reply->IOCStatus;
4478 		scsi_status = scsi_reply->SCSIStatus;
4479 		scsi_state = scsi_reply->SCSIState;
4480 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4481 		xfer_count = scsi_reply->TransferCount;
4482 		sense_count = scsi_reply->SenseCount;
4483 		resp_data = scsi_reply->ResponseData;
4484 		sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4485 		    scsi_reply->SenseDataBufferAddress);
4486 		if (ioc_status &
4487 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4488 			ioc_loginfo = scsi_reply->IOCLogInfo;
4489 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4490 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4491 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4492 
4493 		break;
4494 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4495 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4496 		host_tag = success_desc->HostTag;
4497 
4498 	default:
4499 		break;
4500 	}
4501 
4502 	cm = sc->cmd_list[host_tag];
4503 
4504 	if (cm->state == MPI3MR_CMD_STATE_FREE)
4505 		goto out;
4506 
4507 	cam_sc = sc->cam_sc;
4508 	ccb = cm->ccb;
4509 	csio = &ccb->csio;
4510 	target_id = csio->ccb_h.target_id;
4511 
4512 	scsi_cdb = scsiio_cdb_ptr(csio);
4513 
4514 	target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4515 	if (sc->iot_enable) {
4516 		data_len_blks = csio->dxfer_len >> 9;
4517 
4518 		if (target) {
4519 			tg = target->throttle_group;
4520 			throttle_enabled_dev =
4521 				target->io_throttle_enabled;
4522 		}
4523 
4524 		if ((data_len_blks >= sc->io_throttle_data_length) &&
4525 		     throttle_enabled_dev) {
4526 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4527 			ioc_pend_data_len = mpi3mr_atomic_read(
4528 			    &sc->pend_large_data_sz);
4529 			if (tg) {
4530 				mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4531 					data_len_blks);
4532 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4533 				if (ratelimit % 1000) {
4534 					mpi3mr_dprint(sc, MPI3MR_IOT,
4535 						"large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4536 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4537 						    target->per_id,
4538 						    target->dev_handle,
4539 						    data_len_blks, ioc_pend_data_len,
4540 						    tg_pend_data_len,
4541 						    sc->io_throttle_low,
4542 						    tg->low);
4543 					ratelimit++;
4544 				}
4545 				if (tg->io_divert  && ((ioc_pend_data_len <=
4546 				    sc->io_throttle_low) &&
4547 				    (tg_pend_data_len <= tg->low))) {
4548 					tg->io_divert = 0;
4549 					mpi3mr_dprint(sc, MPI3MR_IOT,
4550 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4551 						target->per_id, tg->id);
4552 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4553 					    sc, tg, 0);
4554 				}
4555 			} else {
4556 				if (ratelimit % 1000) {
4557 					mpi3mr_dprint(sc, MPI3MR_IOT,
4558 					    "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4559 					    target->per_id,
4560 					    target->dev_handle,
4561 					    data_len_blks, ioc_pend_data_len,
4562 					    sc->io_throttle_low);
4563 					ratelimit++;
4564 				}
4565 
4566 				if (ioc_pend_data_len <= sc->io_throttle_low) {
4567 					target->io_divert = 0;
4568 					mpi3mr_dprint(sc, MPI3MR_IOT,
4569 						"PD: Coming out of divert perst_id(%d)\n",
4570 						target->per_id);
4571 				}
4572 			}
4573 
4574 			} else if (target->io_divert) {
4575 			ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4576 			if (!tg) {
4577 				if (ratelimit % 1000) {
4578 					mpi3mr_dprint(sc, MPI3MR_IOT,
4579 					    "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4580 					    target->per_id,
4581 					    target->dev_handle,
4582 					    data_len_blks, ioc_pend_data_len,
4583 					    sc->io_throttle_low);
4584 					ratelimit++;
4585 				}
4586 
4587 				if ( ioc_pend_data_len <= sc->io_throttle_low) {
4588 					mpi3mr_dprint(sc, MPI3MR_IOT,
4589 						"PD: Coming out of divert perst_id(%d)\n",
4590 						target->per_id);
4591 					target->io_divert = 0;
4592 				}
4593 
4594 			} else if (ioc_pend_data_len <= sc->io_throttle_low) {
4595 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4596 				if (ratelimit % 1000) {
4597 					mpi3mr_dprint(sc, MPI3MR_IOT,
4598 						"vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4599 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4600 						    target->per_id,
4601 						    target->dev_handle,
4602 						    data_len_blks, ioc_pend_data_len,
4603 						    tg_pend_data_len,
4604 						    sc->io_throttle_low,
4605 						    tg->low);
4606 					ratelimit++;
4607 				}
4608 				if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
4609 					tg->io_divert = 0;
4610 					mpi3mr_dprint(sc, MPI3MR_IOT,
4611 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4612 						target->per_id, tg->id);
4613 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4614 					    sc, tg, 0);
4615 				}
4616 
4617 			}
4618 		}
4619 	}
4620 
4621 	if (success_desc) {
4622 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4623 		goto out_success;
4624 	}
4625 
4626 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4627 	    && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4628 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4629 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4630 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
4631 
4632 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4633 	    && sense_buf) {
4634 		int sense_len, returned_sense_len;
4635 
4636 		returned_sense_len = min(le32toh(sense_count),
4637 		    sizeof(struct scsi_sense_data));
4638 		if (returned_sense_len < csio->sense_len)
4639 			csio->sense_resid = csio->sense_len -
4640 			    returned_sense_len;
4641 		else
4642 			csio->sense_resid = 0;
4643 
4644 		sense_len = min(returned_sense_len,
4645 		    csio->sense_len - csio->sense_resid);
4646 		bzero(&csio->sense_data, sizeof(csio->sense_data));
4647 		bcopy(sense_buf, &csio->sense_data, sense_len);
4648 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4649 	}
4650 
4651 	switch (ioc_status) {
4652 	case MPI3_IOCSTATUS_BUSY:
4653 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4654 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4655 		break;
4656 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4657 		/*
4658 		 * If devinfo is 0 this will be a volume.  In that case don't
4659 		 * tell CAM that the volume is not there.  We want volumes to
4660 		 * be enumerated until they are deleted/removed, not just
4661 		 * failed.
4662 		 */
4663 		if (cm->targ->devinfo == 0)
4664 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4665 		else
4666 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4667 		break;
4668 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4669 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4670 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4671 		mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4672 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4673 		    "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4674 		    __func__, __LINE__,
4675 		    target_id, cm->hosttag,
4676 		    le32toh(scsi_reply->IOCLogInfo));
4677 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4678 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
4679 		    scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4680 		    le32toh(xfer_count));
4681 		break;
4682 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4683 		/* resid is ignored for this condition */
4684 		csio->resid = 0;
4685 		mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4686 		break;
4687 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4688 		csio->resid = cm->length - le32toh(xfer_count);
4689 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4690 	case MPI3_IOCSTATUS_SUCCESS:
4691 		if ((scsi_reply->IOCStatus & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK) ==
4692 		    MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4693 			mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n",  __func__, __LINE__);
4694 
4695 		/* Completion failed at the transport level. */
4696 		if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4697 		    MPI3_SCSI_STATE_TERMINATED)) {
4698 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4699 			break;
4700 		}
4701 
4702 		/* In a modern packetized environment, an autosense failure
4703 		 * implies that there's not much else that can be done to
4704 		 * recover the command.
4705 		 */
4706 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4707 			mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4708 			break;
4709 		}
4710 
4711 		/*
4712 		 * Intentionally override the normal SCSI status reporting
4713 		 * for these two cases.  These are likely to happen in a
4714 		 * multi-initiator environment, and we want to make sure that
4715 		 * CAM retries these commands rather than fail them.
4716 		 */
4717 		if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4718 		    (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4719 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4720 			break;
4721 		}
4722 
4723 		/* Handle normal status and sense */
4724 		csio->scsi_status = scsi_reply->SCSIStatus;
4725 		if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4726 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4727 		else
4728 			mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4729 
4730 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4731 			int sense_len, returned_sense_len;
4732 
4733 			returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4734 			    sizeof(struct scsi_sense_data));
4735 			if (returned_sense_len < csio->sense_len)
4736 				csio->sense_resid = csio->sense_len -
4737 				    returned_sense_len;
4738 			else
4739 				csio->sense_resid = 0;
4740 
4741 			sense_len = min(returned_sense_len,
4742 			    csio->sense_len - csio->sense_resid);
4743 			bzero(&csio->sense_data, sizeof(csio->sense_data));
4744 			bcopy(cm->sense, &csio->sense_data, sense_len);
4745 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4746 		}
4747 
4748 		break;
4749 	case MPI3_IOCSTATUS_INVALID_SGL:
4750 		mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
4751 		break;
4752 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
4753 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
4754 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
4755 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4756 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
4757 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
4758 	case MPI3_IOCSTATUS_INVALID_FIELD:
4759 	case MPI3_IOCSTATUS_INVALID_STATE:
4760 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
4761 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4762 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
4763 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4764 	default:
4765 		csio->resid = cm->length;
4766 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4767 		break;
4768 	}
4769 
4770 out_success:
4771 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
4772 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
4773 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
4774 	}
4775 
4776 	mpi3mr_atomic_dec(&cm->targ->outstanding);
4777 	mpi3mr_cmd_done(sc, cm);
4778 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
4779 		" cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
4780 		"scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
4781 		ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
4782 	mpi3mr_atomic_dec(&sc->fw_outstanding);
4783 out:
4784 
4785 	if (sense_buf)
4786 		mpi3mr_repost_sense_buf(sc,
4787 		    scsi_reply->SenseDataBufferAddress);
4788 	return;
4789 }
4790 
4791 /*
4792  * mpi3mr_complete_io_cmd:	ISR routine for IO commands
4793  * @sc:				Adapter's soft instance
4794  * @irq_ctx:			Driver's internal per IRQ structure
4795  *
4796  * This function processes IO command completions.
4797  */
4798 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
4799     struct mpi3mr_irq_context *irq_ctx)
4800 {
4801 	struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
4802 	U32 exp_phase = op_reply_q->ephase;
4803 	U32 reply_ci = op_reply_q->ci;
4804 	U32 num_op_replies = 0;
4805 	U64 reply_dma = 0;
4806 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4807 	U16 req_qid = 0;
4808 
4809 	mtx_lock_spin(&op_reply_q->q_lock);
4810 	if (op_reply_q->in_use == false) {
4811 		op_reply_q->in_use = true;
4812 		mtx_unlock_spin(&op_reply_q->q_lock);
4813 	} else {
4814 		mtx_unlock_spin(&op_reply_q->q_lock);
4815 		return 0;
4816 	}
4817 
4818 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4819 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
4820 		" reply_desc->ReplyFlags: 0x%x\n"
4821 		"reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
4822 		op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
4823 		op_reply_q->q_base, exp_phase);
4824 
4825 	if (((reply_desc->ReplyFlags &
4826 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
4827 		mtx_lock_spin(&op_reply_q->q_lock);
4828 		op_reply_q->in_use = false;
4829 		mtx_unlock_spin(&op_reply_q->q_lock);
4830 		return 0;
4831 	}
4832 
4833 	do {
4834 		req_qid = reply_desc->RequestQueueID;
4835 		sc->op_req_q[req_qid - 1].ci =
4836 		    reply_desc->RequestQueueCI;
4837 
4838 		mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
4839 		mpi3mr_atomic_dec(&op_reply_q->pend_ios);
4840 		if (reply_dma)
4841 			mpi3mr_repost_reply_buf(sc, reply_dma);
4842 		num_op_replies++;
4843 		if (++reply_ci == op_reply_q->num_replies) {
4844 			reply_ci = 0;
4845 			exp_phase ^= 1;
4846 		}
4847 		reply_desc =
4848 		    (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4849 		if ((reply_desc->ReplyFlags &
4850 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4851 			break;
4852 	} while (1);
4853 
4854 
4855 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4856 	op_reply_q->ci = reply_ci;
4857 	op_reply_q->ephase = exp_phase;
4858 	mtx_lock_spin(&op_reply_q->q_lock);
4859 	op_reply_q->in_use = false;
4860 	mtx_unlock_spin(&op_reply_q->q_lock);
4861 	return num_op_replies;
4862 }
4863 
4864 /*
4865  * mpi3mr_isr:			Primary ISR function
4866  * privdata:			Driver's internal per IRQ structure
4867  *
4868  * This is driver's primary ISR function which is being called whenever any admin/IO
4869  * command completion.
4870  */
4871 void mpi3mr_isr(void *privdata)
4872 {
4873 	struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
4874 	struct mpi3mr_softc *sc = irq_ctx->sc;
4875 	U16 msi_idx;
4876 
4877 	if (!irq_ctx)
4878 		return;
4879 
4880 	msi_idx = irq_ctx->msix_index;
4881 
4882 	if (!sc->intr_enabled)
4883 		return;
4884 
4885 	if (!msi_idx)
4886 		mpi3mr_complete_admin_cmd(sc);
4887 
4888 	if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
4889 		mpi3mr_complete_io_cmd(sc, irq_ctx);
4890 	}
4891 }
4892 
4893 /*
4894  * mpi3mr_alloc_requests - Allocates host commands
4895  * @sc: Adapter reference
4896  *
4897  * This function allocates controller supported host commands
4898  *
4899  * Return: 0 on success and proper error codes on failure
4900  */
4901 int
4902 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
4903 {
4904 	struct mpi3mr_cmd *cmd;
4905 	int i, j, nsegs, ret;
4906 
4907 	nsegs = MPI3MR_SG_DEPTH;
4908 	ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat,    /* parent */
4909 				1, 0,			/* algnmnt, boundary */
4910 				BUS_SPACE_MAXADDR,	/* lowaddr */
4911 				BUS_SPACE_MAXADDR,	/* highaddr */
4912 				NULL, NULL,		/* filter, filterarg */
4913 				MAXPHYS,/* maxsize */
4914                                 nsegs,			/* nsegments */
4915 				MAXPHYS,/* maxsegsize */
4916                                 BUS_DMA_ALLOCNOW,	/* flags */
4917                                 busdma_lock_mutex,	/* lockfunc */
4918 				&sc->io_lock,	/* lockarg */
4919 				&sc->buffer_dmat);
4920 	if (ret) {
4921 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
4922 		return (ENOMEM);
4923         }
4924 
4925 	/*
4926 	 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
4927 	 * Allocate the dynamic array first and then allocate individual
4928 	 * commands.
4929 	 */
4930 	sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
4931 	    M_MPI3MR, M_NOWAIT | M_ZERO);
4932 
4933 	if (!sc->cmd_list) {
4934 		device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
4935 		return (ENOMEM);
4936 	}
4937 
4938 	for (i = 0; i < sc->max_host_ios; i++) {
4939 		sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
4940 		    M_MPI3MR, M_NOWAIT | M_ZERO);
4941 		if (!sc->cmd_list[i]) {
4942 			for (j = 0; j < i; j++)
4943 				free(sc->cmd_list[j], M_MPI3MR);
4944 			free(sc->cmd_list, M_MPI3MR);
4945 			sc->cmd_list = NULL;
4946 			return (ENOMEM);
4947 		}
4948 	}
4949 
4950 	for (i = 1; i < sc->max_host_ios; i++) {
4951 		cmd = sc->cmd_list[i];
4952 		cmd->hosttag = i;
4953 		cmd->sc = sc;
4954 		cmd->state = MPI3MR_CMD_STATE_BUSY;
4955 		callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
4956 		cmd->ccb = NULL;
4957 		TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
4958 		if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
4959 			return ENOMEM;
4960 	}
4961 	return (0);
4962 }
4963 
4964 /*
4965  * mpi3mr_get_command:		Get a coomand structure from free command pool
4966  * @sc:				Adapter soft instance
4967  * Return:			MPT command reference
4968  *
4969  * This function returns an MPT command to the caller.
4970  */
4971 struct mpi3mr_cmd *
4972 mpi3mr_get_command(struct mpi3mr_softc *sc)
4973 {
4974 	struct mpi3mr_cmd *cmd = NULL;
4975 
4976 	mtx_lock(&sc->cmd_pool_lock);
4977 	if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
4978 		cmd = TAILQ_FIRST(&sc->cmd_list_head);
4979 		TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
4980 	} else {
4981 		goto out;
4982 	}
4983 
4984 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
4985 
4986 	memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
4987 	cmd->data_dir = 0;
4988 	cmd->ccb = NULL;
4989 	cmd->targ = NULL;
4990 	cmd->max_segs = 0;
4991 	cmd->lun = 0;
4992 	cmd->state = MPI3MR_CMD_STATE_BUSY;
4993 	cmd->data = NULL;
4994 	cmd->length = 0;
4995 	cmd->out_len = 0;
4996 out:
4997 	mtx_unlock(&sc->cmd_pool_lock);
4998 	return cmd;
4999 }
5000 
5001 /*
5002  * mpi3mr_release_command:	Return a cmd to free command pool
5003  * input:			Command packet for return to free command pool
5004  *
5005  * This function returns an MPT command to the free command list.
5006  */
5007 void
5008 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5009 {
5010 	struct mpi3mr_softc *sc = cmd->sc;
5011 
5012 	mtx_lock(&sc->cmd_pool_lock);
5013 	TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5014 	cmd->state = MPI3MR_CMD_STATE_FREE;
5015 	cmd->req_qidx = 0;
5016 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5017 	mtx_unlock(&sc->cmd_pool_lock);
5018 
5019 	return;
5020 }
5021 
5022  /**
5023  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5024  * @sc: Adapter instance reference
5025  *
5026  * Free the DMA memory allocated for IOCTL handling purpose.
5027  *
5028  * Return: None
5029  */
5030 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5031 {
5032 	U16 i;
5033 	struct dma_memory_desc *mem_desc;
5034 
5035 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5036 		mem_desc = &sc->ioctl_sge[i];
5037 		if (mem_desc->addr && mem_desc->dma_addr) {
5038 			bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5039 			bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5040 			mem_desc->addr = NULL;
5041 			if (mem_desc->tag != NULL)
5042 				bus_dma_tag_destroy(mem_desc->tag);
5043 		}
5044 	}
5045 
5046 	mem_desc = &sc->ioctl_chain_sge;
5047 	if (mem_desc->addr && mem_desc->dma_addr) {
5048 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5049 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5050 		mem_desc->addr = NULL;
5051 		if (mem_desc->tag != NULL)
5052 			bus_dma_tag_destroy(mem_desc->tag);
5053 	}
5054 
5055 	mem_desc = &sc->ioctl_resp_sge;
5056 	if (mem_desc->addr && mem_desc->dma_addr) {
5057 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5058 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5059 		mem_desc->addr = NULL;
5060 		if (mem_desc->tag != NULL)
5061 			bus_dma_tag_destroy(mem_desc->tag);
5062 	}
5063 
5064 	sc->ioctl_sges_allocated = false;
5065 }
5066 
5067 /**
5068  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5069  * @sc: Adapter instance reference
5070  *
5071  * This function allocates dmaable memory required to handle the
5072  * application issued MPI3 IOCTL requests.
5073  *
5074  * Return: None
5075  */
5076 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5077 {
5078 	struct dma_memory_desc *mem_desc;
5079 	U16 i;
5080 
5081 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5082 		mem_desc = &sc->ioctl_sge[i];
5083 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5084 
5085 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5086 					4, 0,			/* algnmnt, boundary */
5087 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5088 					BUS_SPACE_MAXADDR,	/* highaddr */
5089 					NULL, NULL,		/* filter, filterarg */
5090 					mem_desc->size,		/* maxsize */
5091 					1,			/* nsegments */
5092 					mem_desc->size,		/* maxsegsize */
5093 					0,			/* flags */
5094 					NULL, NULL,		/* lockfunc, lockarg */
5095 					&mem_desc->tag)) {
5096 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5097 			goto out_failed;
5098 		}
5099 
5100 		if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5101 		    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5102 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5103 			goto out_failed;
5104 		}
5105 		bzero(mem_desc->addr, mem_desc->size);
5106 		bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5107 		    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5108 
5109 		if (!mem_desc->addr)
5110 			goto out_failed;
5111 	}
5112 
5113 	mem_desc = &sc->ioctl_chain_sge;
5114 	mem_desc->size = MPI3MR_4K_PGSZ;
5115 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5116 				4, 0,			/* algnmnt, boundary */
5117 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5118 				BUS_SPACE_MAXADDR,	/* highaddr */
5119 				NULL, NULL,		/* filter, filterarg */
5120 				mem_desc->size,		/* maxsize */
5121 				1,			/* nsegments */
5122 				mem_desc->size,		/* maxsegsize */
5123 				0,			/* flags */
5124 				NULL, NULL,		/* lockfunc, lockarg */
5125 				&mem_desc->tag)) {
5126 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5127 		goto out_failed;
5128 	}
5129 
5130 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5131 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5132 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5133 		goto out_failed;
5134 	}
5135 	bzero(mem_desc->addr, mem_desc->size);
5136 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5137 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5138 
5139 	if (!mem_desc->addr)
5140 		goto out_failed;
5141 
5142 	mem_desc = &sc->ioctl_resp_sge;
5143 	mem_desc->size = MPI3MR_4K_PGSZ;
5144 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5145 				4, 0,			/* algnmnt, boundary */
5146 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
5147 				BUS_SPACE_MAXADDR,	/* highaddr */
5148 				NULL, NULL,		/* filter, filterarg */
5149 				mem_desc->size,		/* maxsize */
5150 				1,			/* nsegments */
5151 				mem_desc->size,		/* maxsegsize */
5152 				0,			/* flags */
5153 				NULL, NULL,		/* lockfunc, lockarg */
5154 				&mem_desc->tag)) {
5155 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5156 		goto out_failed;
5157 	}
5158 
5159 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5160 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5161 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5162 		goto out_failed;
5163 	}
5164 	bzero(mem_desc->addr, mem_desc->size);
5165 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5166 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, 0);
5167 
5168 	if (!mem_desc->addr)
5169 		goto out_failed;
5170 
5171 	sc->ioctl_sges_allocated = true;
5172 
5173 	return;
5174 out_failed:
5175 	printf("cannot allocate DMA memory for the mpt commands"
5176 	    "  from the applications, application interface for MPT command is disabled\n");
5177 	mpi3mr_free_ioctl_dma_memory(sc);
5178 }
5179 
5180 void
5181 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5182 {
5183 	int i;
5184 	struct mpi3mr_op_req_queue *op_req_q;
5185 	struct mpi3mr_op_reply_queue *op_reply_q;
5186 
5187 	if (sc->admin_reply) {
5188 		if (mtx_initialized(&sc->admin_reply_lock))
5189 			mtx_destroy(&sc->admin_reply_lock);
5190 	}
5191 
5192 	if (sc->op_reply_q) {
5193 		for(i = 0; i < sc->num_queues; i++) {
5194 			op_reply_q = sc->op_reply_q + i;
5195 			if (mtx_initialized(&op_reply_q->q_lock))
5196 				mtx_destroy(&op_reply_q->q_lock);
5197 		}
5198 	}
5199 
5200 	if (sc->op_req_q) {
5201 		for(i = 0; i < sc->num_queues; i++) {
5202 			op_req_q = sc->op_req_q + i;
5203 			if (mtx_initialized(&op_req_q->q_lock))
5204 				mtx_destroy(&op_req_q->q_lock);
5205 		}
5206 	}
5207 
5208 	if (mtx_initialized(&sc->init_cmds.completion.lock))
5209 		mtx_destroy(&sc->init_cmds.completion.lock);
5210 
5211 	if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5212 		mtx_destroy(&sc->ioctl_cmds.completion.lock);
5213 
5214 	if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5215 		mtx_destroy(&sc->host_tm_cmds.completion.lock);
5216 
5217 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5218 		if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5219 			mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5220 	}
5221 
5222 	if (mtx_initialized(&sc->reset_mutex))
5223 		mtx_destroy(&sc->reset_mutex);
5224 
5225 	if (mtx_initialized(&sc->target_lock))
5226 		mtx_destroy(&sc->target_lock);
5227 
5228 	if (mtx_initialized(&sc->fwevt_lock))
5229 		mtx_destroy(&sc->fwevt_lock);
5230 
5231 	if (mtx_initialized(&sc->cmd_pool_lock))
5232 		mtx_destroy(&sc->cmd_pool_lock);
5233 
5234 	if (mtx_initialized(&sc->reply_free_q_lock))
5235 		mtx_destroy(&sc->reply_free_q_lock);
5236 
5237 	if (mtx_initialized(&sc->sense_buf_q_lock))
5238 		mtx_destroy(&sc->sense_buf_q_lock);
5239 
5240 	if (mtx_initialized(&sc->chain_buf_lock))
5241 		mtx_destroy(&sc->chain_buf_lock);
5242 
5243 	if (mtx_initialized(&sc->admin_req_lock))
5244 		mtx_destroy(&sc->admin_req_lock);
5245 
5246 	if (mtx_initialized(&sc->mpi3mr_mtx))
5247 		mtx_destroy(&sc->mpi3mr_mtx);
5248 }
5249 
5250 /**
5251  * mpi3mr_free_mem - Freeup adapter level data structures
5252  * @sc: Adapter reference
5253  *
5254  * Return: Nothing.
5255  */
5256 void
5257 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5258 {
5259 	int i;
5260 	struct mpi3mr_op_req_queue *op_req_q;
5261 	struct mpi3mr_op_reply_queue *op_reply_q;
5262 	struct mpi3mr_irq_context *irq_ctx;
5263 
5264 	if (sc->cmd_list) {
5265 		for (i = 0; i < sc->max_host_ios; i++) {
5266 			free(sc->cmd_list[i], M_MPI3MR);
5267 		}
5268 		free(sc->cmd_list, M_MPI3MR);
5269 		sc->cmd_list = NULL;
5270 	}
5271 
5272 	if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5273 		bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5274 		bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5275 		sc->pel_seq_number = NULL;
5276 		if (sc->pel_seq_num_dmatag != NULL)
5277 			bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5278 	}
5279 
5280 	if (sc->throttle_groups) {
5281 		free(sc->throttle_groups, M_MPI3MR);
5282 		sc->throttle_groups = NULL;
5283 	}
5284 
5285 	/* Free up operational queues*/
5286 	if (sc->op_req_q) {
5287 		for (i = 0; i < sc->num_queues; i++) {
5288 			op_req_q = sc->op_req_q + i;
5289 			if (op_req_q->q_base && op_req_q->q_base_phys) {
5290 				bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5291 				bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5292 				op_req_q->q_base = NULL;
5293 				if (op_req_q->q_base_tag != NULL)
5294 					bus_dma_tag_destroy(op_req_q->q_base_tag);
5295 			}
5296 		}
5297 		free(sc->op_req_q, M_MPI3MR);
5298 		sc->op_req_q = NULL;
5299 	}
5300 
5301 	if (sc->op_reply_q) {
5302 		for (i = 0; i < sc->num_queues; i++) {
5303 			op_reply_q = sc->op_reply_q + i;
5304 			if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5305 				bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5306 				bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5307 				op_reply_q->q_base = NULL;
5308 				if (op_reply_q->q_base_tag != NULL)
5309 					bus_dma_tag_destroy(op_reply_q->q_base_tag);
5310 			}
5311 		}
5312 		free(sc->op_reply_q, M_MPI3MR);
5313 		sc->op_reply_q = NULL;
5314 	}
5315 
5316 	/* Free up chain buffers*/
5317 	if (sc->chain_sgl_list) {
5318 		for (i = 0; i < sc->chain_buf_count; i++) {
5319 			if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5320 				bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5321 				bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5322 						sc->chain_sgl_list[i].buf_dmamap);
5323 				sc->chain_sgl_list[i].buf = NULL;
5324 			}
5325 		}
5326 		if (sc->chain_sgl_list_tag != NULL)
5327 			bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5328 		free(sc->chain_sgl_list, M_MPI3MR);
5329 		sc->chain_sgl_list = NULL;
5330 	}
5331 
5332 	if (sc->chain_bitmap) {
5333 		free(sc->chain_bitmap, M_MPI3MR);
5334 		sc->chain_bitmap = NULL;
5335 	}
5336 
5337 	for (i = 0; i < sc->msix_count; i++) {
5338 		irq_ctx = sc->irq_ctx + i;
5339 		if (irq_ctx)
5340 			irq_ctx->op_reply_q = NULL;
5341 	}
5342 
5343 	/* Free reply_buf_tag */
5344 	if (sc->reply_buf && sc->reply_buf_phys) {
5345 		bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5346 		bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5347 				sc->reply_buf_dmamap);
5348 		sc->reply_buf = NULL;
5349 		if (sc->reply_buf_tag != NULL)
5350 			bus_dma_tag_destroy(sc->reply_buf_tag);
5351 	}
5352 
5353 	/* Free reply_free_q_tag */
5354 	if (sc->reply_free_q && sc->reply_free_q_phys) {
5355 		bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5356 		bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5357 				sc->reply_free_q_dmamap);
5358 		sc->reply_free_q = NULL;
5359 		if (sc->reply_free_q_tag != NULL)
5360 			bus_dma_tag_destroy(sc->reply_free_q_tag);
5361 	}
5362 
5363 	/* Free sense_buf_tag */
5364 	if (sc->sense_buf && sc->sense_buf_phys) {
5365 		bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5366 		bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5367 				sc->sense_buf_dmamap);
5368 		sc->sense_buf = NULL;
5369 		if (sc->sense_buf_tag != NULL)
5370 			bus_dma_tag_destroy(sc->sense_buf_tag);
5371 	}
5372 
5373 	/* Free sense_buf_q_tag */
5374 	if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5375 		bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5376 		bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5377 				sc->sense_buf_q_dmamap);
5378 		sc->sense_buf_q = NULL;
5379 		if (sc->sense_buf_q_tag != NULL)
5380 			bus_dma_tag_destroy(sc->sense_buf_q_tag);
5381 	}
5382 
5383 	/* Free up internal(non-IO) commands*/
5384 	if (sc->init_cmds.reply) {
5385 		free(sc->init_cmds.reply, M_MPI3MR);
5386 		sc->init_cmds.reply = NULL;
5387 	}
5388 
5389 	if (sc->ioctl_cmds.reply) {
5390 		free(sc->ioctl_cmds.reply, M_MPI3MR);
5391 		sc->ioctl_cmds.reply = NULL;
5392 	}
5393 
5394 	if (sc->pel_cmds.reply) {
5395 		free(sc->pel_cmds.reply, M_MPI3MR);
5396 		sc->pel_cmds.reply = NULL;
5397 	}
5398 
5399 	if (sc->pel_abort_cmd.reply) {
5400 		free(sc->pel_abort_cmd.reply, M_MPI3MR);
5401 		sc->pel_abort_cmd.reply = NULL;
5402 	}
5403 
5404 	if (sc->host_tm_cmds.reply) {
5405 		free(sc->host_tm_cmds.reply, M_MPI3MR);
5406 		sc->host_tm_cmds.reply = NULL;
5407 	}
5408 
5409 	if (sc->log_data_buffer) {
5410 		free(sc->log_data_buffer, M_MPI3MR);
5411 		sc->log_data_buffer = NULL;
5412 	}
5413 
5414 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5415 		if (sc->dev_rmhs_cmds[i].reply) {
5416 			free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5417 			sc->dev_rmhs_cmds[i].reply = NULL;
5418 		}
5419 	}
5420 
5421 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5422 		if (sc->evtack_cmds[i].reply) {
5423 			free(sc->evtack_cmds[i].reply, M_MPI3MR);
5424 			sc->evtack_cmds[i].reply = NULL;
5425 		}
5426 	}
5427 
5428 	if (sc->removepend_bitmap) {
5429 		free(sc->removepend_bitmap, M_MPI3MR);
5430 		sc->removepend_bitmap = NULL;
5431 	}
5432 
5433 	if (sc->devrem_bitmap) {
5434 		free(sc->devrem_bitmap, M_MPI3MR);
5435 		sc->devrem_bitmap = NULL;
5436 	}
5437 
5438 	if (sc->evtack_cmds_bitmap) {
5439 		free(sc->evtack_cmds_bitmap, M_MPI3MR);
5440 		sc->evtack_cmds_bitmap = NULL;
5441 	}
5442 
5443 	/* Free Admin reply*/
5444 	if (sc->admin_reply && sc->admin_reply_phys) {
5445 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5446 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5447 				sc->admin_reply_dmamap);
5448 		sc->admin_reply = NULL;
5449 		if (sc->admin_reply_tag != NULL)
5450 			bus_dma_tag_destroy(sc->admin_reply_tag);
5451 	}
5452 
5453 	/* Free Admin request*/
5454 	if (sc->admin_req && sc->admin_req_phys) {
5455 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5456 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5457 				sc->admin_req_dmamap);
5458 		sc->admin_req = NULL;
5459 		if (sc->admin_req_tag != NULL)
5460 			bus_dma_tag_destroy(sc->admin_req_tag);
5461 	}
5462 	mpi3mr_free_ioctl_dma_memory(sc);
5463 
5464 }
5465 
5466 /**
5467  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5468  * @sc: Adapter instance reference
5469  * @cmdptr: Internal command tracker
5470  *
5471  * Complete an internal driver commands with state indicating it
5472  * is completed due to reset.
5473  *
5474  * Return: Nothing.
5475  */
5476 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5477 	struct mpi3mr_drvr_cmd *cmdptr)
5478 {
5479 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5480 		cmdptr->state |= MPI3MR_CMD_RESET;
5481 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5482 		if (cmdptr->is_waiting) {
5483 			complete(&cmdptr->completion);
5484 			cmdptr->is_waiting = 0;
5485 		} else if (cmdptr->callback)
5486 			cmdptr->callback(sc, cmdptr);
5487 	}
5488 }
5489 
5490 /**
5491  * mpi3mr_flush_drv_cmds - Flush internal driver commands
5492  * @sc: Adapter instance reference
5493  *
5494  * Flush all internal driver commands post reset
5495  *
5496  * Return: Nothing.
5497  */
5498 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5499 {
5500 	int i = 0;
5501 	struct mpi3mr_drvr_cmd *cmdptr;
5502 
5503 	cmdptr = &sc->init_cmds;
5504 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5505 
5506 	cmdptr = &sc->ioctl_cmds;
5507 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5508 
5509 	cmdptr = &sc->host_tm_cmds;
5510 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5511 
5512 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5513 		cmdptr = &sc->dev_rmhs_cmds[i];
5514 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5515 	}
5516 
5517 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5518 		cmdptr = &sc->evtack_cmds[i];
5519 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5520 	}
5521 
5522 	cmdptr = &sc->pel_cmds;
5523 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5524 
5525 	cmdptr = &sc->pel_abort_cmd;
5526 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5527 }
5528 
5529 
5530 /**
5531  * mpi3mr_memset_buffers - memset memory for a controller
5532  * @sc: Adapter instance reference
5533  *
5534  * clear all the memory allocated for a controller, typically
5535  * called post reset to reuse the memory allocated during the
5536  * controller init.
5537  *
5538  * Return: Nothing.
5539  */
5540 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5541 {
5542 	U16 i;
5543 	struct mpi3mr_throttle_group_info *tg;
5544 
5545 	memset(sc->admin_req, 0, sc->admin_req_q_sz);
5546 	memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5547 
5548 	memset(sc->init_cmds.reply, 0, sc->reply_sz);
5549 	memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
5550 	memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
5551 	memset(sc->pel_cmds.reply, 0, sc->reply_sz);
5552 	memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
5553 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5554 		memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
5555 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5556 		memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
5557 	memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
5558 	memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
5559 	memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
5560 
5561 	for (i = 0; i < sc->num_queues; i++) {
5562 		sc->op_reply_q[i].qid = 0;
5563 		sc->op_reply_q[i].ci = 0;
5564 		sc->op_reply_q[i].num_replies = 0;
5565 		sc->op_reply_q[i].ephase = 0;
5566 		mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
5567 		memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
5568 
5569 		sc->op_req_q[i].ci = 0;
5570 		sc->op_req_q[i].pi = 0;
5571 		sc->op_req_q[i].num_reqs = 0;
5572 		sc->op_req_q[i].qid = 0;
5573 		sc->op_req_q[i].reply_qid = 0;
5574 		memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
5575 	}
5576 
5577 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
5578 	if (sc->throttle_groups) {
5579 		tg = sc->throttle_groups;
5580 		for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
5581 			tg->id = 0;
5582 			tg->fw_qd = 0;
5583 			tg->modified_qd = 0;
5584 			tg->io_divert= 0;
5585 			tg->high = 0;
5586 			tg->low = 0;
5587 			mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
5588 		}
5589  	}
5590 }
5591 
5592 /**
5593  * mpi3mr_invalidate_devhandles -Invalidate device handles
5594  * @sc: Adapter instance reference
5595  *
5596  * Invalidate the device handles in the target device structures
5597  * . Called post reset prior to reinitializing the controller.
5598  *
5599  * Return: Nothing.
5600  */
5601 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
5602 {
5603 	struct mpi3mr_target *target = NULL;
5604 
5605 	mtx_lock_spin(&sc->target_lock);
5606 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5607 		if (target) {
5608 			target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5609 			target->io_throttle_enabled = 0;
5610 			target->io_divert = 0;
5611 			target->throttle_group = NULL;
5612 		}
5613 	}
5614 	mtx_unlock_spin(&sc->target_lock);
5615 }
5616 
5617 /**
5618  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
5619  * @sc: Adapter instance reference
5620  *
5621  * This is executed post controller reset to identify any
5622  * missing devices during reset and remove from the upper layers
5623  * or expose any newly detected device to the upper layers.
5624  *
5625  * Return: Nothing.
5626  */
5627 
5628 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
5629 {
5630 	struct mpi3mr_target *target = NULL;
5631 	struct mpi3mr_target *target_temp = NULL;
5632 
5633 	TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
5634 		if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5635 			if (target->exposed_to_os)
5636 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5637 			mpi3mr_remove_device_from_list(sc, target, true);
5638 		}
5639 	}
5640 
5641 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5642 		if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
5643 		    !target->is_hidden && !target->exposed_to_os) {
5644 			mpi3mr_add_device(sc, target->per_id);
5645 		}
5646 	}
5647 
5648 }
5649 
5650 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
5651 {
5652 	int i;
5653 	struct mpi3mr_cmd *cmd = NULL;
5654 	union ccb *ccb = NULL;
5655 
5656 	for (i = 0; i < sc->max_host_ios; i++) {
5657 		cmd = sc->cmd_list[i];
5658 
5659 		if (cmd && cmd->ccb) {
5660 			if (cmd->callout_owner) {
5661 				ccb = (union ccb *)(cmd->ccb);
5662 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
5663 				mpi3mr_cmd_done(sc, cmd);
5664 			} else {
5665 				cmd->ccb = NULL;
5666 				mpi3mr_release_command(cmd);
5667 			}
5668 		}
5669 	}
5670 }
5671 /**
5672  * mpi3mr_clear_reset_history - Clear reset history
5673  * @sc: Adapter instance reference
5674  *
5675  * Write the reset history bit in IOC Status to clear the bit,
5676  * if it is already set.
5677  *
5678  * Return: Nothing.
5679  */
5680 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
5681 {
5682 	U32 ioc_status;
5683 
5684 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5685 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
5686 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
5687 }
5688 
5689 /**
5690  * mpi3mr_set_diagsave - Set diag save bit for snapdump
5691  * @sc: Adapter reference
5692  *
5693  * Set diag save bit in IOC configuration register to enable
5694  * snapdump.
5695  *
5696  * Return: Nothing.
5697  */
5698 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
5699 {
5700 	U32 ioc_config;
5701 
5702 	ioc_config =
5703 	    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5704 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
5705 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
5706 }
5707 
5708 /**
5709  * mpi3mr_issue_reset - Issue reset to the controller
5710  * @sc: Adapter reference
5711  * @reset_type: Reset type
5712  * @reset_reason: Reset reason code
5713  *
5714  * Unlock the host diagnostic registers and write the specific
5715  * reset type to that, wait for reset acknowledgement from the
5716  * controller, if the reset is not successful retry for the
5717  * predefined number of times.
5718  *
5719  * Return: 0 on success, non-zero on failure.
5720  */
5721 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
5722 	U32 reset_reason)
5723 {
5724 	int retval = -1;
5725 	U8 unlock_retry_count = 0;
5726 	U32 host_diagnostic, ioc_status, ioc_config;
5727 	U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
5728 
5729 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
5730 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
5731 		return retval;
5732 	if (sc->unrecoverable)
5733 		return retval;
5734 
5735 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
5736 		retval = 0;
5737 		return retval;
5738 	}
5739 
5740 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
5741 	    mpi3mr_reset_type_name(reset_type),
5742 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
5743 
5744 	mpi3mr_clear_reset_history(sc);
5745 	do {
5746 		mpi3mr_dprint(sc, MPI3MR_INFO,
5747 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
5748 		    ++unlock_retry_count);
5749 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
5750 			mpi3mr_dprint(sc, MPI3MR_ERROR,
5751 			    "%s reset failed! due to host diag register unlock failure"
5752 			    "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
5753 			    host_diagnostic);
5754 			sc->unrecoverable = 1;
5755 			return retval;
5756 		}
5757 
5758 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5759 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
5760 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5761 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
5762 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5763 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5764 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5765 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
5766 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5767 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
5768 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5769 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
5770 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5771 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
5772 
5773 		DELAY(1000); /* delay in usec */
5774 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
5775 		mpi3mr_dprint(sc, MPI3MR_INFO,
5776 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
5777 		    unlock_retry_count, host_diagnostic);
5778 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
5779 
5780 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
5781 	mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
5782 
5783 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
5784 		do {
5785 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5786 			if (ioc_status &
5787 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
5788 				ioc_config =
5789 				    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5790 				if (mpi3mr_soft_reset_success(ioc_status,
5791 				    ioc_config)) {
5792 					mpi3mr_clear_reset_history(sc);
5793 					retval = 0;
5794 					break;
5795 				}
5796 			}
5797 			DELAY(100 * 1000);
5798 		} while (--timeout);
5799 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
5800 		do {
5801 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5802 			if (mpi3mr_diagfault_success(sc, ioc_status)) {
5803 				retval = 0;
5804 				break;
5805 			}
5806 			DELAY(100 * 1000);
5807 		} while (--timeout);
5808 	}
5809 
5810 	mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5811 		MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5812 
5813 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5814 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5815 
5816 	mpi3mr_dprint(sc, MPI3MR_INFO,
5817 	    "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
5818 	    !retval ? "successful":"failed", ioc_status,
5819 	    ioc_config);
5820 
5821 	if (retval)
5822 		sc->unrecoverable = 1;
5823 
5824 	return retval;
5825 }
5826 
5827 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
5828 {
5829 	mtx_lock(&sc->fwevt_lock);
5830 	taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
5831 	taskqueue_block(sc->cam_sc->ev_tq);
5832 	mtx_unlock(&sc->fwevt_lock);
5833 	return;
5834 }
5835 
5836 /**
5837  * mpi3mr_soft_reset_handler - Reset the controller
5838  * @sc: Adapter instance reference
5839  * @reset_reason: Reset reason code
5840  * @snapdump: snapdump enable/disbale bit
5841  *
5842  * This is an handler for recovering controller by issuing soft
5843  * reset or diag fault reset. This is a blocking function and
5844  * when one reset is executed if any other resets they will be
5845  * blocked. All IOCTLs/IO will be blocked during the reset. If
5846  * controller reset is successful then the controller will be
5847  * reinitalized, otherwise the controller will be marked as not
5848  * recoverable
5849  *
5850  * Return: 0 on success, non-zero on failure.
5851  */
5852 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
5853 	U32 reset_reason, bool snapdump)
5854 {
5855 	int retval = 0, i = 0;
5856 	enum mpi3mr_iocstate ioc_state;
5857 
5858 	mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
5859 	    mpi3mr_reset_rc_name(reset_reason));
5860 
5861 	if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
5862 	     (sc->reset.ioctl_reset_snapdump != true))
5863 		snapdump = false;
5864 
5865 	mpi3mr_dprint(sc, MPI3MR_INFO,
5866 	    "soft_reset_handler: wait if diag save is in progress\n");
5867 	while (sc->diagsave_timeout)
5868 		DELAY(1000 * 1000);
5869 
5870 	ioc_state = mpi3mr_get_iocstate(sc);
5871 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
5872 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
5873 		sc->reset.type = MPI3MR_NO_RESET;
5874 		sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5875 		sc->reset.status = -1;
5876 		sc->reset.ioctl_reset_snapdump = false;
5877 		return -1;
5878 	}
5879 
5880 	if (sc->reset_in_progress) {
5881 		mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
5882 		return -1;
5883 	}
5884 
5885 	/* Pause IOs, drain and block the event taskqueue */
5886 	xpt_freeze_simq(sc->cam_sc->sim, 1);
5887 
5888 	mpi3mr_cleanup_event_taskq(sc);
5889 
5890 	sc->reset_in_progress = 1;
5891 	sc->block_ioctls = 1;
5892 
5893 	while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
5894 		ioc_state = mpi3mr_get_iocstate(sc);
5895 		if (ioc_state == MRIOC_STATE_FAULT)
5896 			break;
5897 		i++;
5898 		if (!(i % 5)) {
5899 			mpi3mr_dprint(sc, MPI3MR_INFO,
5900 			    "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
5901 		}
5902 		DELAY(1000 * 1000);
5903 	}
5904 
5905 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5906 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5907 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5908 
5909 		mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
5910 
5911 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5912 			sc->event_masks[i] = -1;
5913 		mpi3mr_issue_event_notification(sc);
5914 	}
5915 
5916 	mpi3mr_disable_interrupts(sc);
5917 
5918 	if (snapdump)
5919 		mpi3mr_trigger_snapdump(sc, reset_reason);
5920 
5921 	retval = mpi3mr_issue_reset(sc,
5922 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5923 	if (retval) {
5924 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
5925 		goto out;
5926 	}
5927 
5928 	mpi3mr_flush_drv_cmds(sc);
5929 	mpi3mr_flush_io(sc);
5930 	mpi3mr_invalidate_devhandles(sc);
5931 	mpi3mr_memset_buffers(sc);
5932 
5933 	if (sc->prepare_for_reset) {
5934 		sc->prepare_for_reset = 0;
5935 		sc->prepare_for_reset_timeout_counter = 0;
5936 	}
5937 
5938 	retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
5939 	if (retval) {
5940 		mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
5941 		    reset_reason);
5942 		goto out;
5943 	}
5944 
5945 	DELAY((1000 * 1000) * 10);
5946 out:
5947 	if (!retval) {
5948 		sc->diagsave_timeout = 0;
5949 		sc->reset_in_progress = 0;
5950 		mpi3mr_rfresh_tgtdevs(sc);
5951 		sc->ts_update_counter = 0;
5952 		sc->block_ioctls = 0;
5953 		sc->pel_abort_requested = 0;
5954 		if (sc->pel_wait_pend) {
5955 			sc->pel_cmds.retry_count = 0;
5956 			mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
5957 			mpi3mr_app_send_aen(sc);
5958 		}
5959 	} else {
5960 		mpi3mr_issue_reset(sc,
5961 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5962 		sc->unrecoverable = 1;
5963 		sc->reset_in_progress = 0;
5964 	}
5965 
5966 	mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
5967 
5968 	taskqueue_unblock(sc->cam_sc->ev_tq);
5969 	xpt_release_simq(sc->cam_sc->sim, 1);
5970 
5971 	sc->reset.type = MPI3MR_NO_RESET;
5972 	sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5973 	sc->reset.status = retval;
5974 	sc->reset.ioctl_reset_snapdump = false;
5975 
5976 	return retval;
5977 }
5978 
5979 /**
5980  * mpi3mr_issue_ioc_shutdown - shutdown controller
5981  * @sc: Adapter instance reference
5982  *
5983  * Send shutodwn notification to the controller and wait for the
5984  * shutdown_timeout for it to be completed.
5985  *
5986  * Return: Nothing.
5987  */
5988 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
5989 {
5990 	U32 ioc_config, ioc_status;
5991 	U8 retval = 1, retry = 0;
5992 	U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
5993 
5994 	mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
5995 	if (sc->unrecoverable) {
5996 		mpi3mr_dprint(sc, MPI3MR_ERROR,
5997 		    "controller is unrecoverable, shutdown not issued\n");
5998 		return;
5999 	}
6000 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6001 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6002 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6003 		mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6004 		return;
6005 	}
6006 
6007 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6008 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6009 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6010 
6011 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6012 
6013 	if (sc->facts.shutdown_timeout)
6014 		timeout = sc->facts.shutdown_timeout * 10;
6015 
6016 	do {
6017 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6018 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6019 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6020 			retval = 0;
6021 			break;
6022 		}
6023 
6024 		if (sc->unrecoverable)
6025 			break;
6026 
6027 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6028 			mpi3mr_print_fault_info(sc);
6029 
6030 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6031 				break;
6032 
6033 			if (mpi3mr_issue_reset(sc,
6034 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6035 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6036 				break;
6037 
6038 			ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6039 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6040 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6041 
6042 			mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6043 
6044 			if (sc->facts.shutdown_timeout)
6045 				timeout = sc->facts.shutdown_timeout * 10;
6046 
6047 			retry++;
6048 		}
6049 
6050                 DELAY(100 * 1000);
6051 
6052 	} while (--timeout);
6053 
6054 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6055 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6056 
6057 	if (retval) {
6058 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6059 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6060 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6061 			    "shutdown still in progress after timeout\n");
6062 	}
6063 
6064 	mpi3mr_dprint(sc, MPI3MR_INFO,
6065 	    "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6066 	    (!retval)?"successful":"failed", ioc_status,
6067 	    ioc_config);
6068 }
6069 
6070 /**
6071  * mpi3mr_cleanup_ioc - Cleanup controller
6072  * @sc: Adapter instance reference
6073 
6074  * controller cleanup handler, Message unit reset or soft reset
6075  * and shutdown notification is issued to the controller.
6076  *
6077  * Return: Nothing.
6078  */
6079 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6080 {
6081 	enum mpi3mr_iocstate ioc_state;
6082 
6083 	mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6084 	mpi3mr_disable_interrupts(sc);
6085 
6086 	ioc_state = mpi3mr_get_iocstate(sc);
6087 
6088 	if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6089 	    (ioc_state == MRIOC_STATE_READY)) {
6090 		if (mpi3mr_mur_ioc(sc,
6091 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6092 			mpi3mr_issue_reset(sc,
6093 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6094 			    MPI3MR_RESET_FROM_MUR_FAILURE);
6095 		mpi3mr_issue_ioc_shutdown(sc);
6096 	}
6097 
6098 	mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6099 }
6100