xref: /freebsd/sys/dev/mpi3mr/mpi3mr.c (revision 4034d7061a112b78d60cdb581c2d71f7cfa9f74e)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/uio.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pci_private.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include "mpi3mr.h"
76 #include "mpi3mr_cam.h"
77 #include "mpi3mr_app.h"
78 
79 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
80 	U64 reply_dma);
81 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
82 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
83 	struct mpi3mr_drvr_cmd *drvrcmd);
84 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
85 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
86 	U32 reset_reason);
87 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
88 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
89 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
90 	struct mpi3mr_drvr_cmd *drv_cmd);
91 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
92 	struct mpi3mr_drvr_cmd *drv_cmd);
93 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
94 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
95 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
96 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
97 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
98 
99 void
100 mpi3mr_hexdump(void *buf, int sz, int format)
101 {
102         int i;
103         U32 *buf_loc = (U32 *)buf;
104 
105         for (i = 0; i < (sz / sizeof(U32)); i++) {
106                 if ((i % format) == 0) {
107                         if (i != 0)
108                                 printf("\n");
109                         printf("%08x: ", (i * 4));
110                 }
111                 printf("%08x ", buf_loc[i]);
112         }
113         printf("\n");
114 }
115 
116 void
117 init_completion(struct completion *completion)
118 {
119 	completion->done = 0;
120 }
121 
122 void
123 complete(struct completion *completion)
124 {
125 	completion->done = 1;
126 	wakeup(complete);
127 }
128 
129 void wait_for_completion_timeout(struct completion *completion,
130 	    U32 timeout)
131 {
132 	U32 count = timeout * 1000;
133 
134 	while ((completion->done == 0) && count) {
135                 DELAY(1000);
136 		count--;
137 	}
138 
139 	if (completion->done == 0) {
140 		printf("%s: Command is timedout\n", __func__);
141 		completion->done = 1;
142 	}
143 }
144 void wait_for_completion_timeout_tm(struct completion *completion,
145 	    U32 timeout, struct mpi3mr_softc *sc)
146 {
147 	U32 count = timeout * 1000;
148 
149 	while ((completion->done == 0) && count) {
150 		msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
151 		       "TM command", 1 * hz);
152 		count--;
153 	}
154 
155 	if (completion->done == 0) {
156 		printf("%s: Command is timedout\n", __func__);
157 		completion->done = 1;
158 	}
159 }
160 
161 
162 void
163 poll_for_command_completion(struct mpi3mr_softc *sc,
164        struct mpi3mr_drvr_cmd *cmd, U16 wait)
165 {
166 	int wait_time = wait * 1000;
167        while (wait_time) {
168                mpi3mr_complete_admin_cmd(sc);
169                if (cmd->state & MPI3MR_CMD_COMPLETE)
170                        break;
171 	       DELAY(1000);
172                wait_time--;
173        }
174 }
175 
176 /**
177  * mpi3mr_trigger_snapdump - triggers firmware snapdump
178  * @sc: Adapter instance reference
179  * @reason_code: reason code for the fault.
180  *
181  * This routine will trigger the snapdump and wait for it to
182  * complete or timeout before it returns.
183  * This will be called during initilaization time faults/resets/timeouts
184  * before soft reset invocation.
185  *
186  * Return:  None.
187  */
188 static void
189 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
190 {
191 	U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
192 
193 	mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
194 	    mpi3mr_reset_rc_name(reason_code));
195 
196 	mpi3mr_set_diagsave(sc);
197 	mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
198 			   reason_code);
199 
200 	do {
201 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
202 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
203 			break;
204                 DELAY(100 * 1000);
205 	} while (--timeout);
206 
207 	return;
208 }
209 
210 /**
211  * mpi3mr_check_rh_fault_ioc - check reset history and fault
212  * controller
213  * @sc: Adapter instance reference
214  * @reason_code, reason code for the fault.
215  *
216  * This routine will fault the controller with
217  * the given reason code if it is not already in the fault or
218  * not asynchronosuly reset. This will be used to handle
219  * initilaization time faults/resets/timeout as in those cases
220  * immediate soft reset invocation is not required.
221  *
222  * Return:  None.
223  */
224 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U32 reason_code)
225 {
226 	U32 ioc_status;
227 
228 	if (sc->unrecoverable) {
229 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
230 		return;
231 	}
232 
233 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
234 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
235 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
236 		mpi3mr_print_fault_info(sc);
237 		return;
238 	}
239 
240 	mpi3mr_trigger_snapdump(sc, reason_code);
241 
242 	return;
243 }
244 
245 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
246     bus_addr_t phys_addr)
247 {
248 	if (!phys_addr)
249 		return NULL;
250 	if ((phys_addr < sc->reply_buf_dma_min_address) ||
251 	    (phys_addr > sc->reply_buf_dma_max_address))
252 		return NULL;
253 
254 	return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
255 }
256 
257 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
258     bus_addr_t phys_addr)
259 {
260 	if (!phys_addr)
261 		return NULL;
262 	return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
263 }
264 
265 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
266     U64 reply_dma)
267 {
268 	U32 old_idx = 0;
269 
270 	mtx_lock_spin(&sc->reply_free_q_lock);
271 	old_idx  =  sc->reply_free_q_host_index;
272 	sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
273 	    (sc->reply_free_q_sz - 1)) ? 0 :
274 	    (sc->reply_free_q_host_index + 1));
275 	sc->reply_free_q[old_idx] = reply_dma;
276 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
277 		sc->reply_free_q_host_index);
278 	mtx_unlock_spin(&sc->reply_free_q_lock);
279 }
280 
281 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
282     U64 sense_buf_phys)
283 {
284 	U32 old_idx = 0;
285 
286 	mtx_lock_spin(&sc->sense_buf_q_lock);
287 	old_idx  =  sc->sense_buf_q_host_index;
288 	sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
289 	    (sc->sense_buf_q_sz - 1)) ? 0 :
290 	    (sc->sense_buf_q_host_index + 1));
291 	sc->sense_buf_q[old_idx] = sense_buf_phys;
292 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
293 		sc->sense_buf_q_host_index);
294 	mtx_unlock_spin(&sc->sense_buf_q_lock);
295 
296 }
297 
298 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
299 	struct mpi3mr_throttle_group_info *tg, U8 divert_value)
300 {
301 	struct mpi3mr_target *target;
302 
303 	mtx_lock_spin(&sc->target_lock);
304 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
305 		if (target->throttle_group == tg)
306 			target->io_divert = divert_value;
307 	}
308 	mtx_unlock_spin(&sc->target_lock);
309 }
310 
311 /**
312  * mpi3mr_submit_admin_cmd - Submit request to admin queue
313  * @mrioc: Adapter reference
314  * @admin_req: MPI3 request
315  * @admin_req_sz: Request size
316  *
317  * Post the MPI3 request into admin request queue and
318  * inform the controller, if the queue is full return
319  * appropriate error.
320  *
321  * Return: 0 on success, non-zero on failure.
322  */
323 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
324     U16 admin_req_sz)
325 {
326 	U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
327 	int retval = 0;
328 	U8 *areq_entry;
329 
330 	mtx_lock_spin(&sc->admin_req_lock);
331 	areq_pi = sc->admin_req_pi;
332 	areq_ci = sc->admin_req_ci;
333 	max_entries = sc->num_admin_reqs;
334 
335 	if (sc->unrecoverable)
336 		return -EFAULT;
337 
338 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
339 					   (areq_pi == (max_entries - 1)))) {
340 		printf(IOCNAME "AdminReqQ full condition detected\n",
341 		    sc->name);
342 		retval = -EAGAIN;
343 		goto out;
344 	}
345 	areq_entry = (U8 *)sc->admin_req + (areq_pi *
346 						     MPI3MR_AREQ_FRAME_SZ);
347 	memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
348 	memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
349 
350 	if (++areq_pi == max_entries)
351 		areq_pi = 0;
352 	sc->admin_req_pi = areq_pi;
353 
354 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
355 
356 out:
357 	mtx_unlock_spin(&sc->admin_req_lock);
358 	return retval;
359 }
360 
361 /**
362  * mpi3mr_check_req_qfull - Check request queue is full or not
363  * @op_req_q: Operational reply queue info
364  *
365  * Return: true when queue full, false otherwise.
366  */
367 static inline bool
368 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
369 {
370 	U16 pi, ci, max_entries;
371 	bool is_qfull = false;
372 
373 	pi = op_req_q->pi;
374 	ci = op_req_q->ci;
375 	max_entries = op_req_q->num_reqs;
376 
377 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
378 		is_qfull = true;
379 
380 	return is_qfull;
381 }
382 
383 /**
384  * mpi3mr_submit_io - Post IO command to firmware
385  * @sc:		      Adapter instance reference
386  * @op_req_q:	      Operational Request queue reference
387  * @req:	      MPT request data
388  *
389  * This function submits IO command to firmware.
390  *
391  * Return: Nothing
392  */
393 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
394     struct mpi3mr_op_req_queue *op_req_q, U8 *req)
395 {
396 	U16 pi, max_entries;
397 	int retval = 0;
398 	U8 *req_entry;
399 	U16 req_sz = sc->facts.op_req_sz;
400 	struct mpi3mr_irq_context *irq_ctx;
401 
402 	mtx_lock_spin(&op_req_q->q_lock);
403 
404 	pi = op_req_q->pi;
405 	max_entries = op_req_q->num_reqs;
406 	if (mpi3mr_check_req_qfull(op_req_q)) {
407 		irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
408 		mpi3mr_complete_io_cmd(sc, irq_ctx);
409 
410 		if (mpi3mr_check_req_qfull(op_req_q)) {
411 			printf(IOCNAME "OpReqQ full condition detected\n",
412 				sc->name);
413 			retval = -EBUSY;
414 			goto out;
415 		}
416 	}
417 
418 	req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
419 	memset(req_entry, 0, req_sz);
420 	memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
421 	if (++pi == max_entries)
422 		pi = 0;
423 	op_req_q->pi = pi;
424 
425 	mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
426 
427 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
428 	if (sc->mpi3mr_debug & MPI3MR_TRACE) {
429 		device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
430 		mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
431 	}
432 
433 out:
434 	mtx_unlock_spin(&op_req_q->q_lock);
435 	return retval;
436 }
437 
438 inline void
439 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
440 		     bus_addr_t dma_addr)
441 {
442 	Mpi3SGESimple_t *sgel = paddr;
443 
444 	sgel->Flags = flags;
445 	sgel->Length = (length);
446 	sgel->Address = (U64)dma_addr;
447 }
448 
449 void mpi3mr_build_zero_len_sge(void *paddr)
450 {
451 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
452 		MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
453 
454 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
455 
456 }
457 
458 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
459 {
460 	sc->intr_enabled = 1;
461 }
462 
463 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
464 {
465 	sc->intr_enabled = 0;
466 }
467 
468 void
469 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
470 {
471 	bus_addr_t *addr;
472 
473 	addr = arg;
474 	*addr = segs[0].ds_addr;
475 }
476 
477 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
478 {
479 	Mpi3DeleteReplyQueueRequest_t delq_req;
480 	struct mpi3mr_op_reply_queue *op_reply_q;
481 	int retval = 0;
482 
483 
484 	op_reply_q = &sc->op_reply_q[qid - 1];
485 
486 	if (!op_reply_q->qid)
487 	{
488 		retval = -1;
489 		printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
490 		    sc->name);
491 		goto out;
492 	}
493 
494 	memset(&delq_req, 0, sizeof(delq_req));
495 
496 	mtx_lock(&sc->init_cmds.completion.lock);
497 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
498 		retval = -1;
499 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
500 		    sc->name);
501 		mtx_unlock(&sc->init_cmds.completion.lock);
502 		goto out;
503 	}
504 
505 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
506 		retval = -1;
507 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
508 		    sc->name);
509 		goto out;
510 	}
511 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
512 	sc->init_cmds.is_waiting = 1;
513 	sc->init_cmds.callback = NULL;
514 	delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
515 	delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
516 	delq_req.QueueID = qid;
517 
518 	init_completion(&sc->init_cmds.completion);
519 	retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
520 	if (retval) {
521 		printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
522 		    sc->name);
523 		goto out_unlock;
524 	}
525 	wait_for_completion_timeout(&sc->init_cmds.completion,
526 	    (MPI3MR_INTADMCMD_TIMEOUT));
527 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
528 		printf(IOCNAME "Issue DelRepQ: command timed out\n",
529 		    sc->name);
530 		mpi3mr_check_rh_fault_ioc(sc,
531 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
532 		sc->unrecoverable = 1;
533 
534 		retval = -1;
535 		goto out_unlock;
536 	}
537 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
538 	     != MPI3_IOCSTATUS_SUCCESS ) {
539 		printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
540 		    " Loginfo(0x%08x) \n" , sc->name,
541 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
542 		    sc->init_cmds.ioc_loginfo);
543 		retval = -1;
544 		goto out_unlock;
545 	}
546 	sc->irq_ctx[qid - 1].op_reply_q = NULL;
547 
548 	if (sc->op_reply_q[qid - 1].q_base_phys != 0)
549 		bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
550 	if (sc->op_reply_q[qid - 1].q_base != NULL)
551 		bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
552 	if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
553 		bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
554 
555 	sc->op_reply_q[qid - 1].q_base = NULL;
556 	sc->op_reply_q[qid - 1].qid = 0;
557 out_unlock:
558 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
559 	mtx_unlock(&sc->init_cmds.completion.lock);
560 out:
561 	return retval;
562 }
563 
564 /**
565  * mpi3mr_create_op_reply_queue - create operational reply queue
566  * @sc: Adapter instance reference
567  * @qid: operational reply queue id
568  *
569  * Create operatinal reply queue by issuing MPI request
570  * through admin queue.
571  *
572  * Return:  0 on success, non-zero on failure.
573  */
574 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
575 {
576 	Mpi3CreateReplyQueueRequest_t create_req;
577 	struct mpi3mr_op_reply_queue *op_reply_q;
578 	int retval = 0;
579 	char q_lock_name[32];
580 
581 	op_reply_q = &sc->op_reply_q[qid - 1];
582 
583 	if (op_reply_q->qid)
584 	{
585 		retval = -1;
586 		printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
587 		    sc->name, op_reply_q->qid);
588 		return retval;
589 	}
590 
591 	op_reply_q->ci = 0;
592 	if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
593 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
594 	else
595 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
596 
597 	op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
598 	op_reply_q->ephase = 1;
599 
600         if (!op_reply_q->q_base) {
601 		snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
602 		mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
603 
604 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
605 					4, 0,			/* algnmnt, boundary */
606 					sc->dma_loaddr,		/* lowaddr */
607 					BUS_SPACE_MAXADDR,	/* highaddr */
608 					NULL, NULL,		/* filter, filterarg */
609 					op_reply_q->qsz,		/* maxsize */
610 					1,			/* nsegments */
611 					op_reply_q->qsz,		/* maxsegsize */
612 					0,			/* flags */
613 					NULL, NULL,		/* lockfunc, lockarg */
614 					&op_reply_q->q_base_tag)) {
615 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
616 			return (ENOMEM);
617 		}
618 
619 		if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
620 		    BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
621 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
622 			return (ENOMEM);
623 		}
624 		bzero(op_reply_q->q_base, op_reply_q->qsz);
625 		bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
626 		    mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, BUS_DMA_NOWAIT);
627 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
628 		    qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
629 
630 		if (!op_reply_q->q_base)
631 		{
632 			retval = -1;
633 			printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
634 			    sc->name, qid);
635 			goto out;
636 		}
637 	}
638 
639 	memset(&create_req, 0, sizeof(create_req));
640 
641 	mtx_lock(&sc->init_cmds.completion.lock);
642 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
643 		retval = -1;
644 		printf(IOCNAME "CreateRepQ: Init command is in use\n",
645 		    sc->name);
646 		mtx_unlock(&sc->init_cmds.completion.lock);
647 		goto out;
648 	}
649 
650 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
651 	sc->init_cmds.is_waiting = 1;
652 	sc->init_cmds.callback = NULL;
653 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
654 	create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
655 	create_req.QueueID = qid;
656 	create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
657 	create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
658 	create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
659 	create_req.Size = op_reply_q->num_replies;
660 
661 	init_completion(&sc->init_cmds.completion);
662 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
663 	    sizeof(create_req));
664 	if (retval) {
665 		printf(IOCNAME "CreateRepQ: Admin Post failed\n",
666 		    sc->name);
667 		goto out_unlock;
668 	}
669 
670 	wait_for_completion_timeout(&sc->init_cmds.completion,
671 	  	MPI3MR_INTADMCMD_TIMEOUT);
672 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
673 		printf(IOCNAME "CreateRepQ: command timed out\n",
674 		    sc->name);
675 		mpi3mr_check_rh_fault_ioc(sc,
676 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
677 		sc->unrecoverable = 1;
678 		retval = -1;
679 		goto out_unlock;
680 	}
681 
682 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
683 	     != MPI3_IOCSTATUS_SUCCESS ) {
684 		printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
685 		    " Loginfo(0x%08x) \n" , sc->name,
686 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
687 		    sc->init_cmds.ioc_loginfo);
688 		retval = -1;
689 		goto out_unlock;
690 	}
691 	op_reply_q->qid = qid;
692 	sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
693 
694 out_unlock:
695 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
696 	mtx_unlock(&sc->init_cmds.completion.lock);
697 out:
698 	if (retval) {
699 		if (op_reply_q->q_base_phys != 0)
700 			bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
701 		if (op_reply_q->q_base != NULL)
702 			bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
703 		if (op_reply_q->q_base_tag != NULL)
704 			bus_dma_tag_destroy(op_reply_q->q_base_tag);
705 		op_reply_q->q_base = NULL;
706 		op_reply_q->qid = 0;
707 	}
708 
709 	return retval;
710 }
711 
712 /**
713  * mpi3mr_create_op_req_queue - create operational request queue
714  * @sc: Adapter instance reference
715  * @req_qid: operational request queue id
716  * @reply_qid: Reply queue ID
717  *
718  * Create operatinal request queue by issuing MPI request
719  * through admin queue.
720  *
721  * Return:  0 on success, non-zero on failure.
722  */
723 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
724 {
725 	Mpi3CreateRequestQueueRequest_t create_req;
726 	struct mpi3mr_op_req_queue *op_req_q;
727 	int retval = 0;
728 	char q_lock_name[32];
729 
730 	op_req_q = &sc->op_req_q[req_qid - 1];
731 
732 	if (op_req_q->qid)
733 	{
734 		retval = -1;
735 		printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
736 		    sc->name, op_req_q->qid);
737 		return retval;
738 	}
739 
740 	op_req_q->ci = 0;
741 	op_req_q->pi = 0;
742 	op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
743 	op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
744 	op_req_q->reply_qid = reply_qid;
745 
746 	if (!op_req_q->q_base) {
747 		snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
748 		mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
749 
750 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
751 					4, 0,			/* algnmnt, boundary */
752 					sc->dma_loaddr,		/* lowaddr */
753 					BUS_SPACE_MAXADDR,	/* highaddr */
754 					NULL, NULL,		/* filter, filterarg */
755 					op_req_q->qsz,		/* maxsize */
756 					1,			/* nsegments */
757 					op_req_q->qsz,		/* maxsegsize */
758 					0,			/* flags */
759 					NULL, NULL,		/* lockfunc, lockarg */
760 					&op_req_q->q_base_tag)) {
761 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
762 			return (ENOMEM);
763 		}
764 
765 		if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
766 		    BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
767 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
768 			return (ENOMEM);
769 		}
770 
771 		bzero(op_req_q->q_base, op_req_q->qsz);
772 
773 		bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
774 		    mpi3mr_memaddr_cb, &op_req_q->q_base_phys, BUS_DMA_NOWAIT);
775 
776 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
777 		    req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
778 
779 		if (!op_req_q->q_base) {
780 			retval = -1;
781 			printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
782 			    sc->name, req_qid);
783 			goto out;
784 		}
785 	}
786 
787 	memset(&create_req, 0, sizeof(create_req));
788 
789 	mtx_lock(&sc->init_cmds.completion.lock);
790 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
791 		retval = -1;
792 		printf(IOCNAME "CreateReqQ: Init command is in use\n",
793 		    sc->name);
794 		mtx_unlock(&sc->init_cmds.completion.lock);
795 		goto out;
796 	}
797 
798 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
799 	sc->init_cmds.is_waiting = 1;
800 	sc->init_cmds.callback = NULL;
801 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
802 	create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
803 	create_req.QueueID = req_qid;
804 	create_req.Flags = 0;
805 	create_req.ReplyQueueID = reply_qid;
806 	create_req.BaseAddress = (U64)op_req_q->q_base_phys;
807 	create_req.Size = op_req_q->num_reqs;
808 
809 	init_completion(&sc->init_cmds.completion);
810 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
811 	    sizeof(create_req));
812 	if (retval) {
813 		printf(IOCNAME "CreateReqQ: Admin Post failed\n",
814 		    sc->name);
815 		goto out_unlock;
816 	}
817 
818 	wait_for_completion_timeout(&sc->init_cmds.completion,
819 	    (MPI3MR_INTADMCMD_TIMEOUT));
820 
821 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
822 		printf(IOCNAME "CreateReqQ: command timed out\n",
823 		    sc->name);
824 		mpi3mr_check_rh_fault_ioc(sc,
825 			MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
826 		sc->unrecoverable = 1;
827 		retval = -1;
828 		goto out_unlock;
829 	}
830 
831 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
832 	     != MPI3_IOCSTATUS_SUCCESS ) {
833 		printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
834 		    " Loginfo(0x%08x) \n" , sc->name,
835 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
836 		    sc->init_cmds.ioc_loginfo);
837 		retval = -1;
838 		goto out_unlock;
839 	}
840 	op_req_q->qid = req_qid;
841 
842 out_unlock:
843 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
844 	mtx_unlock(&sc->init_cmds.completion.lock);
845 out:
846 	if (retval) {
847 		if (op_req_q->q_base_phys != 0)
848 			bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
849 		if (op_req_q->q_base != NULL)
850 			bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
851 		if (op_req_q->q_base_tag != NULL)
852 			bus_dma_tag_destroy(op_req_q->q_base_tag);
853 		op_req_q->q_base = NULL;
854 		op_req_q->qid = 0;
855 	}
856 	return retval;
857 }
858 
859 /**
860  * mpi3mr_create_op_queues - create operational queues
861  * @sc: Adapter instance reference
862  *
863  * Create operatinal queues(request queues and reply queues).
864  * Return:  0 on success, non-zero on failure.
865  */
866 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
867 {
868 	int retval = 0;
869 	U16 num_queues = 0, i = 0, qid;
870 
871 	num_queues = min(sc->facts.max_op_reply_q,
872 	    sc->facts.max_op_req_q);
873 	num_queues = min(num_queues, sc->msix_count);
874 
875 	/*
876 	 * During reset set the num_queues to the number of queues
877 	 * that was set before the reset.
878 	 */
879 	if (sc->num_queues)
880 		num_queues = sc->num_queues;
881 
882 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
883 	    num_queues);
884 
885 	if (!sc->op_req_q) {
886 		sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
887 		    num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
888 
889 		if (!sc->op_req_q) {
890 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
891 			retval = -1;
892 			goto out_failed;
893 		}
894 	}
895 
896 	if (!sc->op_reply_q) {
897 		sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
898 			M_MPI3MR, M_NOWAIT | M_ZERO);
899 
900 		if (!sc->op_reply_q) {
901 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
902 			retval = -1;
903 			goto out_failed;
904 		}
905 	}
906 
907 	sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
908 
909 	/*Operational Request and reply queue ID starts with 1*/
910 	for (i = 0; i < num_queues; i++) {
911 		qid = i + 1;
912 		if (mpi3mr_create_op_reply_queue(sc, qid)) {
913 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
914 			    qid);
915 			break;
916 		}
917 		if (mpi3mr_create_op_req_queue(sc, qid,
918 		    sc->op_reply_q[qid - 1].qid)) {
919 			mpi3mr_delete_op_reply_queue(sc, qid);
920 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
921 			    qid);
922 			break;
923 		}
924 
925 	}
926 
927 	/* Not even one queue is created successfully*/
928         if (i == 0) {
929                 retval = -1;
930                 goto out_failed;
931         }
932 
933 	if (!sc->num_queues) {
934 		sc->num_queues = i;
935 	} else {
936 		if (num_queues != i) {
937 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
938 					"queues allocated (%d) during driver init\n", i, num_queues);
939 			goto out_failed;
940 		}
941 	}
942 
943 	mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
944 	    sc->num_queues);
945 	mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
946 	    sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
947 
948 	return retval;
949 out_failed:
950 	if (sc->op_req_q) {
951 		free(sc->op_req_q, M_MPI3MR);
952 		sc->op_req_q = NULL;
953 	}
954 	if (sc->op_reply_q) {
955 		free(sc->op_reply_q, M_MPI3MR);
956 		sc->op_reply_q = NULL;
957 	}
958 	return retval;
959 }
960 
961 /**
962  * mpi3mr_setup_admin_qpair - Setup admin queue pairs
963  * @sc: Adapter instance reference
964  *
965  * Allocation and setup admin queues(request queues and reply queues).
966  * Return:  0 on success, non-zero on failure.
967  */
968 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
969 {
970 	int retval = 0;
971 	U32 num_adm_entries = 0;
972 
973 	sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
974 	sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
975 	sc->admin_req_ci = sc->admin_req_pi = 0;
976 
977 	sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
978 	sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
979 	sc->admin_reply_ci = 0;
980 	sc->admin_reply_ephase = 1;
981 
982 	if (!sc->admin_req) {
983 		/*
984 		 * We need to create the tag for the admin queue to get the
985 		 * iofacts to see how many bits the controller decodes.  Solve
986 		 * this chicken and egg problem by only doing lower 4GB DMA.
987 		 */
988 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
989 					4, 0,			/* algnmnt, boundary */
990 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
991 					BUS_SPACE_MAXADDR,	/* highaddr */
992 					NULL, NULL,		/* filter, filterarg */
993 					sc->admin_req_q_sz,	/* maxsize */
994 					1,			/* nsegments */
995 					sc->admin_req_q_sz,	/* maxsegsize */
996 					0,			/* flags */
997 					NULL, NULL,		/* lockfunc, lockarg */
998 					&sc->admin_req_tag)) {
999 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1000 			return (ENOMEM);
1001 		}
1002 
1003 		if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
1004 		    BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1005 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1006 			return (ENOMEM);
1007 		}
1008 		bzero(sc->admin_req, sc->admin_req_q_sz);
1009 		bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1010 		    mpi3mr_memaddr_cb, &sc->admin_req_phys, BUS_DMA_NOWAIT);
1011 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1012 		    (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1013 
1014 		if (!sc->admin_req)
1015 		{
1016 			retval = -1;
1017 			printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1018 			    sc->name);
1019 			goto out_failed;
1020 		}
1021 	}
1022 
1023 	if (!sc->admin_reply) {
1024 		mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1025 
1026 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1027 					4, 0,			/* algnmnt, boundary */
1028 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1029 					BUS_SPACE_MAXADDR,	/* highaddr */
1030 					NULL, NULL,		/* filter, filterarg */
1031 					sc->admin_reply_q_sz,	/* maxsize */
1032 					1,			/* nsegments */
1033 					sc->admin_reply_q_sz,	/* maxsegsize */
1034 					0,			/* flags */
1035 					NULL, NULL,		/* lockfunc, lockarg */
1036 					&sc->admin_reply_tag)) {
1037 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1038 			return (ENOMEM);
1039 		}
1040 
1041 		if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1042 		    BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1043 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1044 			return (ENOMEM);
1045 		}
1046 		bzero(sc->admin_reply, sc->admin_reply_q_sz);
1047 		bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1048 		    mpi3mr_memaddr_cb, &sc->admin_reply_phys, BUS_DMA_NOWAIT);
1049 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1050 		    (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1051 
1052 
1053 		if (!sc->admin_reply)
1054 		{
1055 			retval = -1;
1056 			printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1057 			    sc->name);
1058 			goto out_failed;
1059 		}
1060 	}
1061 
1062 	num_adm_entries = (sc->num_admin_replies << 16) |
1063 				(sc->num_admin_reqs);
1064 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1065 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1066 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1067 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1068 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1069 
1070 	return retval;
1071 
1072 out_failed:
1073 	/* Free Admin reply*/
1074 	if (sc->admin_reply_phys)
1075 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1076 
1077 	if (sc->admin_reply != NULL)
1078 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1079 		    sc->admin_reply_dmamap);
1080 
1081 	if (sc->admin_reply_tag != NULL)
1082 		bus_dma_tag_destroy(sc->admin_reply_tag);
1083 
1084 	/* Free Admin request*/
1085 	if (sc->admin_req_phys)
1086 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1087 
1088 	if (sc->admin_req != NULL)
1089 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1090 		    sc->admin_req_dmamap);
1091 
1092 	if (sc->admin_req_tag != NULL)
1093 		bus_dma_tag_destroy(sc->admin_req_tag);
1094 
1095 	return retval;
1096 }
1097 
1098 /**
1099  * mpi3mr_print_fault_info - Display fault information
1100  * @sc: Adapter instance reference
1101  *
1102  * Display the controller fault information if there is a
1103  * controller fault.
1104  *
1105  * Return: Nothing.
1106  */
1107 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1108 {
1109 	U32 ioc_status, code, code1, code2, code3;
1110 
1111 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1112 
1113 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1114 		code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1115 			MPI3_SYSIF_FAULT_CODE_MASK;
1116 		code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1117 		code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1118 		code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1119 		printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1120 		    sc->name, code, code1, code2, code3);
1121 	}
1122 }
1123 
1124 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1125 {
1126 	U32 ioc_status, ioc_control;
1127 	U8 ready, enabled;
1128 
1129 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1130 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1131 
1132 	if(sc->unrecoverable)
1133 		return MRIOC_STATE_UNRECOVERABLE;
1134 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1135 		return MRIOC_STATE_FAULT;
1136 
1137 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1138 	enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1139 
1140 	if (ready && enabled)
1141 		return MRIOC_STATE_READY;
1142 	if ((!ready) && (!enabled))
1143 		return MRIOC_STATE_RESET;
1144 	if ((!ready) && (enabled))
1145 		return MRIOC_STATE_BECOMING_READY;
1146 
1147 	return MRIOC_STATE_RESET_REQUESTED;
1148 }
1149 
1150 static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
1151 {
1152         U32 ioc_status;
1153 
1154 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1155         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1156 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1157 
1158 }
1159 
1160 /**
1161  * mpi3mr_mur_ioc - Message unit Reset handler
1162  * @sc: Adapter instance reference
1163  * @reset_reason: Reset reason code
1164  *
1165  * Issue Message unit Reset to the controller and wait for it to
1166  * be complete.
1167  *
1168  * Return: 0 on success, -1 on failure.
1169  */
1170 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
1171 {
1172         U32 ioc_config, timeout, ioc_status;
1173         int retval = -1;
1174 
1175         mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1176         if (sc->unrecoverable) {
1177                 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1178                 return retval;
1179         }
1180         mpi3mr_clear_resethistory(sc);
1181 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
1182 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1183         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1184 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1185 
1186         timeout = MPI3MR_MUR_TIMEOUT * 10;
1187         do {
1188 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1189                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1190                         mpi3mr_clear_resethistory(sc);
1191 			ioc_config =
1192 				mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1193                         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1194                             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1195                             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1196                                 retval = 0;
1197                                 break;
1198                         }
1199                 }
1200                 DELAY(100 * 1000);
1201         } while (--timeout);
1202 
1203 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1204 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1205 
1206         mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1207                 !retval ? "successful":"failed", ioc_status, ioc_config);
1208         return retval;
1209 }
1210 
1211 /**
1212  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1213  * @sc: Adapter instance reference
1214  *
1215  * Set Enable IOC bit in IOC configuration register and wait for
1216  * the controller to become ready.
1217  *
1218  * Return: 0 on success, appropriate error on failure.
1219  */
1220 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
1221 {
1222         U32 ioc_config, timeout;
1223         enum mpi3mr_iocstate current_state;
1224 
1225 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1226         ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1227 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1228 
1229         timeout = sc->ready_timeout * 10;
1230         do {
1231                 current_state = mpi3mr_get_iocstate(sc);
1232                 if (current_state == MRIOC_STATE_READY)
1233                         return 0;
1234                 DELAY(100 * 1000);
1235         } while (--timeout);
1236 
1237         return -1;
1238 }
1239 
1240 static const struct {
1241 	enum mpi3mr_iocstate value;
1242 	char *name;
1243 } mrioc_states[] = {
1244 	{ MRIOC_STATE_READY, "ready" },
1245 	{ MRIOC_STATE_FAULT, "fault" },
1246 	{ MRIOC_STATE_RESET, "reset" },
1247 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
1248 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1249 	{ MRIOC_STATE_COUNT, "Count" },
1250 };
1251 
1252 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1253 {
1254 	int i;
1255 	char *name = NULL;
1256 
1257 	for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1258 		if (mrioc_states[i].value == mrioc_state){
1259 			name = mrioc_states[i].name;
1260 			break;
1261 		}
1262 	}
1263 	return name;
1264 }
1265 
1266 /* Reset reason to name mapper structure*/
1267 static const struct {
1268 	enum mpi3mr_reset_reason value;
1269 	char *name;
1270 } mpi3mr_reset_reason_codes[] = {
1271 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1272 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1273 	{ MPI3MR_RESET_FROM_IOCTL, "application" },
1274 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1275 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1276 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1277 	{ MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1278 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1279 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1280 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1281 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1282 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1283 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1284 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1285 	{
1286 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1287 		"create request queue timeout"
1288 	},
1289 	{
1290 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1291 		"create reply queue timeout"
1292 	},
1293 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1294 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1295 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1296 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1297 	{
1298 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1299 		"component image activation timeout"
1300 	},
1301 	{
1302 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1303 		"get package version timeout"
1304 	},
1305 	{
1306 		MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1307 		"persistent event log abort timeout"
1308 	},
1309 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1310 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1311 	{
1312 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1313 		"diagnostic buffer post timeout"
1314 	},
1315 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1316 	{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1317 };
1318 
1319 /**
1320  * mpi3mr_reset_rc_name - get reset reason code name
1321  * @reason_code: reset reason code value
1322  *
1323  * Map reset reason to an NULL terminated ASCII string
1324  *
1325  * Return: Name corresponding to reset reason value or NULL.
1326  */
1327 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1328 {
1329 	int i;
1330 	char *name = NULL;
1331 
1332 	for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1333 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1334 			name = mpi3mr_reset_reason_codes[i].name;
1335 			break;
1336 		}
1337 	}
1338 	return name;
1339 }
1340 
1341 #define MAX_RESET_TYPE 3
1342 /* Reset type to name mapper structure*/
1343 static const struct {
1344 	U16 reset_type;
1345 	char *name;
1346 } mpi3mr_reset_types[] = {
1347 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1348 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1349 	{ MAX_RESET_TYPE, "count"}
1350 };
1351 
1352 /**
1353  * mpi3mr_reset_type_name - get reset type name
1354  * @reset_type: reset type value
1355  *
1356  * Map reset type to an NULL terminated ASCII string
1357  *
1358  * Return: Name corresponding to reset type value or NULL.
1359  */
1360 static const char *mpi3mr_reset_type_name(U16 reset_type)
1361 {
1362 	int i;
1363 	char *name = NULL;
1364 
1365 	for (i = 0; i < MAX_RESET_TYPE; i++) {
1366 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1367 			name = mpi3mr_reset_types[i].name;
1368 			break;
1369 		}
1370 	}
1371 	return name;
1372 }
1373 
1374 /**
1375  * mpi3mr_soft_reset_success - Check softreset is success or not
1376  * @ioc_status: IOC status register value
1377  * @ioc_config: IOC config register value
1378  *
1379  * Check whether the soft reset is successful or not based on
1380  * IOC status and IOC config register values.
1381  *
1382  * Return: True when the soft reset is success, false otherwise.
1383  */
1384 static inline bool
1385 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1386 {
1387 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1388 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1389 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1390 		return true;
1391 	return false;
1392 }
1393 
1394 /**
1395  * mpi3mr_diagfault_success - Check diag fault is success or not
1396  * @sc: Adapter reference
1397  * @ioc_status: IOC status register value
1398  *
1399  * Check whether the controller hit diag reset fault code.
1400  *
1401  * Return: True when there is diag fault, false otherwise.
1402  */
1403 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1404 	U32 ioc_status)
1405 {
1406 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1407 		return false;
1408 	mpi3mr_print_fault_info(sc);
1409 	return true;
1410 }
1411 
1412 /**
1413  * mpi3mr_issue_iocfacts - Send IOC Facts
1414  * @sc: Adapter instance reference
1415  * @facts_data: Cached IOC facts data
1416  *
1417  * Issue IOC Facts MPI request through admin queue and wait for
1418  * the completion of it or time out.
1419  *
1420  * Return: 0 on success, non-zero on failures.
1421  */
1422 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1423     Mpi3IOCFactsData_t *facts_data)
1424 {
1425 	Mpi3IOCFactsRequest_t iocfacts_req;
1426 	bus_dma_tag_t data_tag = NULL;
1427 	bus_dmamap_t data_map = NULL;
1428 	bus_addr_t data_phys = 0;
1429 	void *data = NULL;
1430 	U32 data_len = sizeof(*facts_data);
1431 	int retval = 0;
1432 
1433 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1434                 	MPI3_SGE_FLAGS_DLAS_SYSTEM |
1435 			MPI3_SGE_FLAGS_END_OF_LIST);
1436 
1437 
1438 	/*
1439 	 * We can't use sc->dma_loaddr here.  We set those only after we get the
1440 	 * iocfacts.  So allocate in the lower 4GB.  The amount of data is tiny
1441 	 * and we don't do this that often, so any bouncing we might have to do
1442 	 * isn't a cause for concern.
1443 	 */
1444         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1445 				4, 0,			/* algnmnt, boundary */
1446 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1447 				BUS_SPACE_MAXADDR,	/* highaddr */
1448 				NULL, NULL,		/* filter, filterarg */
1449                                 data_len,		/* maxsize */
1450                                 1,			/* nsegments */
1451                                 data_len,		/* maxsegsize */
1452                                 0,			/* flags */
1453                                 NULL, NULL,		/* lockfunc, lockarg */
1454                                 &data_tag)) {
1455 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1456 		return (ENOMEM);
1457         }
1458 
1459         if (bus_dmamem_alloc(data_tag, (void **)&data,
1460 	    BUS_DMA_NOWAIT, &data_map)) {
1461 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
1462 			__func__, __LINE__);
1463 		return (ENOMEM);
1464         }
1465 
1466         bzero(data, data_len);
1467         bus_dmamap_load(data_tag, data_map, data, data_len,
1468 	    mpi3mr_memaddr_cb, &data_phys, BUS_DMA_NOWAIT);
1469 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1470 	    __func__, __LINE__, (uintmax_t)data_phys, data_len);
1471 
1472 	if (!data)
1473 	{
1474 		retval = -1;
1475 		printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1476 		    sc->name);
1477 		goto out;
1478 	}
1479 
1480 	mtx_lock(&sc->init_cmds.completion.lock);
1481 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1482 
1483 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1484 		retval = -1;
1485 		printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1486 		    sc->name);
1487 		mtx_unlock(&sc->init_cmds.completion.lock);
1488 		goto out;
1489 	}
1490 
1491 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1492 	sc->init_cmds.is_waiting = 1;
1493 	sc->init_cmds.callback = NULL;
1494 	iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1495 	iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1496 
1497 	mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1498 	    data_phys);
1499 
1500 	init_completion(&sc->init_cmds.completion);
1501 
1502 	retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1503 	    sizeof(iocfacts_req));
1504 
1505 	if (retval) {
1506 		printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1507 		    sc->name);
1508 		goto out_unlock;
1509 	}
1510 
1511 	wait_for_completion_timeout(&sc->init_cmds.completion,
1512 	    (MPI3MR_INTADMCMD_TIMEOUT));
1513 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1514 		printf(IOCNAME "Issue IOCFacts: command timed out\n",
1515 		    sc->name);
1516 		mpi3mr_check_rh_fault_ioc(sc,
1517 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1518 		sc->unrecoverable = 1;
1519 		retval = -1;
1520 		goto out_unlock;
1521 	}
1522 
1523 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1524 	     != MPI3_IOCSTATUS_SUCCESS ) {
1525 		printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1526 		    " Loginfo(0x%08x) \n" , sc->name,
1527 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1528 		    sc->init_cmds.ioc_loginfo);
1529 		retval = -1;
1530 		goto out_unlock;
1531 	}
1532 
1533 	memcpy(facts_data, (U8 *)data, data_len);
1534 out_unlock:
1535 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1536 	mtx_unlock(&sc->init_cmds.completion.lock);
1537 
1538 out:
1539 	if (data_phys != 0)
1540 		bus_dmamap_unload(data_tag, data_map);
1541 	if (data != NULL)
1542 		bus_dmamem_free(data_tag, data, data_map);
1543 	if (data_tag != NULL)
1544 		bus_dma_tag_destroy(data_tag);
1545 	return retval;
1546 }
1547 
1548 /**
1549  * mpi3mr_process_factsdata - Process IOC facts data
1550  * @sc: Adapter instance reference
1551  * @facts_data: Cached IOC facts data
1552  *
1553  * Convert IOC facts data into cpu endianness and cache it in
1554  * the driver .
1555  *
1556  * Return: Nothing.
1557  */
1558 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1559     Mpi3IOCFactsData_t *facts_data)
1560 {
1561 	int retval = 0;
1562 	U32 ioc_config, req_sz, facts_flags;
1563         struct mpi3mr_compimg_ver *fwver;
1564 
1565 	if (le16toh(facts_data->IOCFactsDataLength) !=
1566 	    (sizeof(*facts_data) / 4)) {
1567 		mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1568 		    " driver_sz(%ld) firmware_sz(%d) \n",
1569 		    sizeof(*facts_data),
1570 		    facts_data->IOCFactsDataLength);
1571 	}
1572 
1573 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1574         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1575                   MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1576 
1577 	if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1578 		 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1579 		    " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1580 		    facts_data->IOCRequestFrameSize);
1581 	}
1582 
1583 	memset(&sc->facts, 0, sizeof(sc->facts));
1584 
1585 	facts_flags = le32toh(facts_data->Flags);
1586 	sc->facts.op_req_sz = req_sz;
1587 	sc->op_reply_sz = 1 << ((ioc_config &
1588                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1589                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1590 
1591 	sc->facts.ioc_num = facts_data->IOCNumber;
1592         sc->facts.who_init = facts_data->WhoInit;
1593         sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1594 	sc->facts.personality = (facts_flags &
1595 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1596 	sc->facts.dma_mask = (facts_flags &
1597 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1598 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1599         sc->facts.protocol_flags = facts_data->ProtocolFlags;
1600         sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1601         sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1602         sc->facts.product_id = (facts_data->ProductID);
1603 	sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1604         sc->facts.exceptions = (facts_data->IOCExceptions);
1605         sc->facts.max_perids = (facts_data->MaxPersistentID);
1606         sc->facts.max_vds = (facts_data->MaxVDs);
1607         sc->facts.max_hpds = (facts_data->MaxHostPDs);
1608         sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1609         sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1610         sc->facts.max_nvme = (facts_data->MaxNVMe);
1611         sc->facts.max_pcieswitches =
1612                 (facts_data->MaxPCIeSwitches);
1613         sc->facts.max_sasexpanders =
1614                 (facts_data->MaxSASExpanders);
1615         sc->facts.max_sasinitiators =
1616                 (facts_data->MaxSASInitiators);
1617         sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1618         sc->facts.min_devhandle = (facts_data->MinDevHandle);
1619         sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1620 	sc->facts.max_op_req_q =
1621                 (facts_data->MaxOperationalRequestQueues);
1622 	sc->facts.max_op_reply_q =
1623                 (facts_data->MaxOperationalReplyQueues);
1624         sc->facts.ioc_capabilities =
1625                 (facts_data->IOCCapabilities);
1626         sc->facts.fw_ver.build_num =
1627                 (facts_data->FWVersion.BuildNum);
1628         sc->facts.fw_ver.cust_id =
1629                 (facts_data->FWVersion.CustomerID);
1630         sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1631         sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1632         sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1633         sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1634         sc->max_msix_vectors = min(sc->max_msix_vectors,
1635             sc->facts.max_msix_vectors);
1636         sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1637         sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1638         sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1639         sc->facts.shutdown_timeout =
1640                 (facts_data->ShutdownTimeout);
1641 	sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1642 	sc->facts.io_throttle_data_length =
1643 	    facts_data->IOThrottleDataLength;
1644 	sc->facts.max_io_throttle_group =
1645 	    facts_data->MaxIOThrottleGroup;
1646 	sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1647 	sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1648 
1649 	/*Store in 512b block count*/
1650 	if (sc->facts.io_throttle_data_length)
1651 		sc->io_throttle_data_length =
1652 		    (sc->facts.io_throttle_data_length * 2 * 4);
1653 	else
1654 		/* set the length to 1MB + 1K to disable throttle*/
1655 		sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1656 
1657 	sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1658 	sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1659 
1660 	fwver = &sc->facts.fw_ver;
1661 	snprintf(sc->fw_version, sizeof(sc->fw_version),
1662 	    "%d.%d.%d.%d.%05d-%05d",
1663 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
1664 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
1665 
1666 	mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1667             "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1668 	    sc->facts.ioc_num, sc->facts.max_op_req_q,
1669 	    sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1670             sc->facts.max_reqs, sc->facts.min_devhandle,
1671             sc->facts.max_pds, sc->facts.max_msix_vectors,
1672             sc->facts.max_perids);
1673         mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1674             sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1675             sc->facts.sge_mod_shift);
1676 	mpi3mr_dprint(sc, MPI3MR_INFO,
1677 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1678 	    sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1679 	    sc->facts.io_throttle_data_length * 4,
1680 	    sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1681 
1682 	sc->max_host_ios = sc->facts.max_reqs -
1683 	    (MPI3MR_INTERNALCMDS_RESVD + 1);
1684 
1685 	/*
1686 	 * Set the DMA mask for the card.  dma_mask is the number of bits that
1687 	 * can have bits set in them.  Translate this into bus_dma loaddr args.
1688 	 * Add sanity for more bits than address space or other overflow
1689 	 * situations.
1690 	 */
1691 	if (sc->facts.dma_mask == 0 ||
1692 	    (sc->facts.dma_mask >= sizeof(bus_addr_t) * 8))
1693 		sc->dma_loaddr = BUS_SPACE_MAXADDR;
1694 	else
1695 		sc->dma_loaddr = ~((1ull << sc->facts.dma_mask) - 1);
1696 	mpi3mr_dprint(sc, MPI3MR_INFO,
1697 	    "dma_mask bits: %d loaddr 0x%jx\n",
1698 	    sc->facts.dma_mask, sc->dma_loaddr);
1699 
1700 	return retval;
1701 }
1702 
1703 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1704 {
1705 	int i;
1706 	bus_addr_t phys_addr;
1707 
1708 	/* initialize Reply buffer Queue */
1709 	for (i = 0, phys_addr = sc->reply_buf_phys;
1710 	    i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1711 		sc->reply_free_q[i] = phys_addr;
1712 	sc->reply_free_q[i] = (0);
1713 
1714 	/* initialize Sense Buffer Queue */
1715 	for (i = 0, phys_addr = sc->sense_buf_phys;
1716 	    i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1717 		sc->sense_buf_q[i] = phys_addr;
1718 	sc->sense_buf_q[i] = (0);
1719 
1720 }
1721 
1722 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1723 {
1724 	U32 sz;
1725 
1726 	sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1727 	sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1728 	sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1729 	sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1730 
1731 	sz = sc->num_reply_bufs * sc->reply_sz;
1732 
1733 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1734 				16, 0,			/* algnmnt, boundary */
1735 				sc->dma_loaddr,		/* lowaddr */
1736 				BUS_SPACE_MAXADDR,	/* highaddr */
1737 				NULL, NULL,		/* filter, filterarg */
1738                                 sz,			/* maxsize */
1739                                 1,			/* nsegments */
1740                                 sz,			/* maxsegsize */
1741                                 0,			/* flags */
1742                                 NULL, NULL,		/* lockfunc, lockarg */
1743                                 &sc->reply_buf_tag)) {
1744 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1745 		return (ENOMEM);
1746         }
1747 
1748 	if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1749 	    BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1750 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1751 			__func__, __LINE__);
1752 		return (ENOMEM);
1753         }
1754 
1755 	bzero(sc->reply_buf, sz);
1756         bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1757 	    mpi3mr_memaddr_cb, &sc->reply_buf_phys, BUS_DMA_NOWAIT);
1758 
1759 	sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1760 	sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1761 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1762 	    "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1763 	    sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1764 	    (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1765 
1766 	/* reply free queue, 8 byte align */
1767 	sz = sc->reply_free_q_sz * 8;
1768 
1769         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1770 				8, 0,			/* algnmnt, boundary */
1771 				sc->dma_loaddr,		/* lowaddr */
1772 				BUS_SPACE_MAXADDR,	/* highaddr */
1773 				NULL, NULL,		/* filter, filterarg */
1774                                 sz,			/* maxsize */
1775                                 1,			/* nsegments */
1776                                 sz,			/* maxsegsize */
1777                                 0,			/* flags */
1778                                 NULL, NULL,		/* lockfunc, lockarg */
1779                                 &sc->reply_free_q_tag)) {
1780 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1781 		return (ENOMEM);
1782         }
1783 
1784         if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1785 	    BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1786 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1787 			__func__, __LINE__);
1788 		return (ENOMEM);
1789         }
1790 
1791 	bzero(sc->reply_free_q, sz);
1792         bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1793 	    mpi3mr_memaddr_cb, &sc->reply_free_q_phys, BUS_DMA_NOWAIT);
1794 
1795 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1796 	    "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1797 	    sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1798 	    (unsigned long long)sc->reply_free_q_phys);
1799 
1800 	/* sense buffer pool,  4 byte align */
1801 	sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1802 
1803         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1804 				4, 0,			/* algnmnt, boundary */
1805 				sc->dma_loaddr,		/* lowaddr */
1806 				BUS_SPACE_MAXADDR,	/* highaddr */
1807 				NULL, NULL,		/* filter, filterarg */
1808                                 sz,			/* maxsize */
1809                                 1,			/* nsegments */
1810                                 sz,			/* maxsegsize */
1811                                 0,			/* flags */
1812                                 NULL, NULL,		/* lockfunc, lockarg */
1813                                 &sc->sense_buf_tag)) {
1814 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1815 		return (ENOMEM);
1816         }
1817 
1818 	if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1819 	    BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1820 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1821 			__func__, __LINE__);
1822 		return (ENOMEM);
1823         }
1824 
1825 	bzero(sc->sense_buf, sz);
1826         bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1827 	    mpi3mr_memaddr_cb, &sc->sense_buf_phys, BUS_DMA_NOWAIT);
1828 
1829 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1830 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1831 	    sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1832 	    (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1833 
1834 	/* sense buffer queue, 8 byte align */
1835 	sz = sc->sense_buf_q_sz * 8;
1836 
1837         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1838 				8, 0,			/* algnmnt, boundary */
1839 				sc->dma_loaddr,		/* lowaddr */
1840 				BUS_SPACE_MAXADDR,	/* highaddr */
1841 				NULL, NULL,		/* filter, filterarg */
1842                                 sz,			/* maxsize */
1843                                 1,			/* nsegments */
1844                                 sz,			/* maxsegsize */
1845                                 0,			/* flags */
1846                                 NULL, NULL,		/* lockfunc, lockarg */
1847                                 &sc->sense_buf_q_tag)) {
1848 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1849 		return (ENOMEM);
1850         }
1851 
1852 	if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1853 	    BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1854 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1855 			__func__, __LINE__);
1856 		return (ENOMEM);
1857         }
1858 
1859 	bzero(sc->sense_buf_q, sz);
1860         bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1861 	    mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, BUS_DMA_NOWAIT);
1862 
1863 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1864 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1865 	    sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1866 	    (unsigned long long)sc->sense_buf_q_phys);
1867 
1868 	return 0;
1869 }
1870 
1871 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1872 {
1873 	int retval = 0;
1874 	U32 i;
1875 
1876 	if (sc->init_cmds.reply)
1877 		goto post_reply_sbuf;
1878 
1879 	sc->init_cmds.reply = malloc(sc->reply_sz,
1880 		M_MPI3MR, M_NOWAIT | M_ZERO);
1881 
1882 	if (!sc->init_cmds.reply) {
1883 		printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1884 		    sc->name);
1885 		goto out_failed;
1886 	}
1887 
1888 	sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1889 	if (!sc->ioctl_cmds.reply) {
1890 		printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1891 		    sc->name);
1892 		goto out_failed;
1893 	}
1894 
1895 	sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1896 	if (!sc->host_tm_cmds.reply) {
1897 		printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1898 		    sc->name);
1899 		goto out_failed;
1900 	}
1901 	for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1902 		sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1903 		    M_MPI3MR, M_NOWAIT | M_ZERO);
1904 		if (!sc->dev_rmhs_cmds[i].reply) {
1905 			printf(IOCNAME "Cannot allocate memory for"
1906 			    " dev_rmhs_cmd[%d].reply\n",
1907 			    sc->name, i);
1908 			goto out_failed;
1909 		}
1910 	}
1911 
1912 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1913 		sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1914 			M_MPI3MR, M_NOWAIT | M_ZERO);
1915 		if (!sc->evtack_cmds[i].reply)
1916 			goto out_failed;
1917 	}
1918 
1919 	sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1920 
1921 	sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1922 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1923 	if (!sc->removepend_bitmap) {
1924 		printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1925 		    sc->name);
1926 		goto out_failed;
1927 	}
1928 
1929 	sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1930 	sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1931 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1932 	if (!sc->devrem_bitmap) {
1933 		printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1934 		    sc->name);
1935 		goto out_failed;
1936 	}
1937 
1938 	sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1939 
1940 	sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1941 		M_MPI3MR, M_NOWAIT | M_ZERO);
1942 	if (!sc->evtack_cmds_bitmap)
1943 		goto out_failed;
1944 
1945 	if (mpi3mr_reply_dma_alloc(sc)) {
1946 		printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1947 		    sc->name, __func__, __LINE__);
1948 		goto out_failed;
1949 	}
1950 
1951 post_reply_sbuf:
1952 	mpi3mr_setup_reply_free_queues(sc);
1953 	return retval;
1954 out_failed:
1955 	mpi3mr_cleanup_interrupts(sc);
1956 	mpi3mr_free_mem(sc);
1957 	retval = -1;
1958 	return retval;
1959 }
1960 
1961 static void
1962 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
1963 {
1964 	int retval = 0;
1965 	void *fw_pkg_ver = NULL;
1966 	bus_dma_tag_t fw_pkg_ver_tag;
1967 	bus_dmamap_t fw_pkg_ver_map;
1968 	bus_addr_t fw_pkg_ver_dma;
1969 	Mpi3CIUploadRequest_t ci_upload;
1970 	Mpi3ComponentImageHeader_t *ci_header;
1971 	U32 fw_pkg_ver_len = sizeof(*ci_header);
1972 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
1973 
1974 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1975 				4, 0,			/* algnmnt, boundary */
1976 				sc->dma_loaddr,		/* lowaddr */
1977 				BUS_SPACE_MAXADDR,	/* highaddr */
1978 				NULL, NULL,		/* filter, filterarg */
1979 				fw_pkg_ver_len,		/* maxsize */
1980 				1,			/* nsegments */
1981 				fw_pkg_ver_len,		/* maxsegsize */
1982 				0,			/* flags */
1983 				NULL, NULL,		/* lockfunc, lockarg */
1984 				&fw_pkg_ver_tag)) {
1985 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
1986 		return;
1987 	}
1988 
1989 	if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
1990 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
1991 			      __func__, __LINE__);
1992 		return;
1993 	}
1994 
1995 	bzero(fw_pkg_ver, fw_pkg_ver_len);
1996 
1997 	bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len,
1998 	    mpi3mr_memaddr_cb, &fw_pkg_ver_dma, BUS_DMA_NOWAIT);
1999 
2000 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
2001 		      __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
2002 
2003 	if (!fw_pkg_ver) {
2004 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
2005 		goto out;
2006 	}
2007 
2008 	memset(&ci_upload, 0, sizeof(ci_upload));
2009 	mtx_lock(&sc->init_cmds.completion.lock);
2010 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2011 		mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
2012 		mtx_unlock(&sc->init_cmds.completion.lock);
2013 		goto out;
2014 	}
2015 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2016 	sc->init_cmds.is_waiting = 1;
2017 	sc->init_cmds.callback = NULL;
2018 	ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2019 	ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
2020 	ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2021 	ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
2022 	ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
2023 
2024 	mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
2025 	    fw_pkg_ver_dma);
2026 
2027 	init_completion(&sc->init_cmds.completion);
2028 	if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
2029 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2030 		goto out_unlock;
2031 	}
2032 	wait_for_completion_timeout(&sc->init_cmds.completion,
2033 		(MPI3MR_INTADMCMD_TIMEOUT));
2034 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2035 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2036 		sc->init_cmds.is_waiting = 0;
2037 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2038 			mpi3mr_check_rh_fault_ioc(sc,
2039 				MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2040 		goto out_unlock;
2041 	}
2042 	if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2043 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2044 			      "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2045 			      GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2046 		goto out_unlock;
2047 	}
2048 
2049 	ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2050 	mpi3mr_dprint(sc, MPI3MR_XINFO,
2051 		      "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2052 		      HeaderSize(0x%x) Signature1(0x%x)\n",
2053 		      ci_header->EnvironmentVariableOffset,
2054 		      ci_header->HeaderSize,
2055 		      ci_header->Signature1);
2056 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2057 		      ci_header->ComponentImageVersion.GenMajor,
2058 		      ci_header->ComponentImageVersion.GenMinor,
2059 		      ci_header->ComponentImageVersion.PhaseMajor,
2060 		      ci_header->ComponentImageVersion.PhaseMinor);
2061 out_unlock:
2062 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2063 	mtx_unlock(&sc->init_cmds.completion.lock);
2064 
2065 out:
2066 	if (fw_pkg_ver_dma != 0)
2067 		bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2068 	if (fw_pkg_ver)
2069 		bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2070 	if (fw_pkg_ver_tag)
2071 		bus_dma_tag_destroy(fw_pkg_ver_tag);
2072 
2073 }
2074 
2075 /**
2076  * mpi3mr_issue_iocinit - Send IOC Init
2077  * @sc: Adapter instance reference
2078  *
2079  * Issue IOC Init MPI request through admin queue and wait for
2080  * the completion of it or time out.
2081  *
2082  * Return: 0 on success, non-zero on failures.
2083  */
2084 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2085 {
2086 	Mpi3IOCInitRequest_t iocinit_req;
2087 	Mpi3DriverInfoLayout_t *drvr_info = NULL;
2088 	bus_dma_tag_t drvr_info_tag;
2089 	bus_dmamap_t drvr_info_map;
2090 	bus_addr_t drvr_info_phys;
2091 	U32 drvr_info_len = sizeof(*drvr_info);
2092 	int retval = 0;
2093 	struct timeval now;
2094 	uint64_t time_in_msec;
2095 
2096 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2097 				4, 0,			/* algnmnt, boundary */
2098 				sc->dma_loaddr,		/* lowaddr */
2099 				BUS_SPACE_MAXADDR,	/* highaddr */
2100 				NULL, NULL,		/* filter, filterarg */
2101                                 drvr_info_len,		/* maxsize */
2102                                 1,			/* nsegments */
2103                                 drvr_info_len,		/* maxsegsize */
2104                                 0,			/* flags */
2105                                 NULL, NULL,		/* lockfunc, lockarg */
2106                                 &drvr_info_tag)) {
2107 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2108 		return (ENOMEM);
2109         }
2110 
2111 	if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2112 	    BUS_DMA_NOWAIT, &drvr_info_map)) {
2113 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
2114 			__func__, __LINE__);
2115 		return (ENOMEM);
2116         }
2117 
2118 	bzero(drvr_info, drvr_info_len);
2119         bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2120 	    mpi3mr_memaddr_cb, &drvr_info_phys, BUS_DMA_NOWAIT);
2121 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2122 	    __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2123 
2124 	if (!drvr_info)
2125 	{
2126 		retval = -1;
2127 		printf(IOCNAME "Memory alloc for Driver Info failed\n",
2128 		    sc->name);
2129 		goto out;
2130 	}
2131 	drvr_info->InformationLength = (drvr_info_len);
2132 	strcpy(drvr_info->DriverSignature, "Broadcom");
2133 	strcpy(drvr_info->OsName, "FreeBSD");
2134 	strcpy(drvr_info->OsVersion, fmt_os_ver);
2135 	strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2136 	strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2137 	strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2138 	drvr_info->DriverCapabilities = 0;
2139 	memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2140 
2141 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2142 	mtx_lock(&sc->init_cmds.completion.lock);
2143 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2144 		retval = -1;
2145 		printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2146 		    sc->name);
2147 		mtx_unlock(&sc->init_cmds.completion.lock);
2148 		goto out;
2149 	}
2150 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2151 	sc->init_cmds.is_waiting = 1;
2152 	sc->init_cmds.callback = NULL;
2153         iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2154         iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2155         iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2156         iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2157         iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2158         iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2159         iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2160         iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2161         iocinit_req.ReplyFreeQueueAddress =
2162                 sc->reply_free_q_phys;
2163         iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2164         iocinit_req.SenseBufferFreeQueueDepth =
2165                 sc->sense_buf_q_sz;
2166         iocinit_req.SenseBufferFreeQueueAddress =
2167                 sc->sense_buf_q_phys;
2168         iocinit_req.DriverInformationAddress = drvr_info_phys;
2169 
2170 	getmicrotime(&now);
2171 	time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2172 	iocinit_req.TimeStamp = htole64(time_in_msec);
2173 
2174 	init_completion(&sc->init_cmds.completion);
2175 	retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2176 	    sizeof(iocinit_req));
2177 
2178 	if (retval) {
2179 		printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2180 		    sc->name);
2181 		goto out_unlock;
2182 	}
2183 
2184 	wait_for_completion_timeout(&sc->init_cmds.completion,
2185 	    (MPI3MR_INTADMCMD_TIMEOUT));
2186 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2187 		printf(IOCNAME "Issue IOCInit: command timed out\n",
2188 		    sc->name);
2189 		mpi3mr_check_rh_fault_ioc(sc,
2190 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2191 		sc->unrecoverable = 1;
2192 		retval = -1;
2193 		goto out_unlock;
2194 	}
2195 
2196 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2197 	     != MPI3_IOCSTATUS_SUCCESS ) {
2198 		printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2199 		    " Loginfo(0x%08x) \n" , sc->name,
2200 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2201 		    sc->init_cmds.ioc_loginfo);
2202 		retval = -1;
2203 		goto out_unlock;
2204 	}
2205 
2206 out_unlock:
2207 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2208 	mtx_unlock(&sc->init_cmds.completion.lock);
2209 
2210 out:
2211 	if (drvr_info_phys != 0)
2212 		bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2213 	if (drvr_info != NULL)
2214 		bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2215 	if (drvr_info_tag != NULL)
2216 		bus_dma_tag_destroy(drvr_info_tag);
2217 	return retval;
2218 }
2219 
2220 static void
2221 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2222 {
2223         int i = 0;
2224         char personality[16];
2225 
2226         switch (sc->facts.personality) {
2227         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2228                 strcpy(personality, "Enhanced HBA");
2229                 break;
2230         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2231                 strcpy(personality, "RAID");
2232                 break;
2233         default:
2234                 strcpy(personality, "Unknown");
2235                 break;
2236         }
2237 
2238 	mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2239 
2240 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s\n", sc->fw_version);
2241 
2242         mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2243 
2244         if (sc->facts.protocol_flags &
2245             MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2246                 printf("Initiator");
2247                 i++;
2248         }
2249 
2250         if (sc->facts.protocol_flags &
2251             MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2252                 printf("%sTarget", i ? "," : "");
2253                 i++;
2254         }
2255 
2256         if (sc->facts.protocol_flags &
2257             MPI3_IOCFACTS_PROTOCOL_NVME) {
2258                 printf("%sNVMe attachment", i ? "," : "");
2259                 i++;
2260         }
2261         i = 0;
2262         printf("), ");
2263         printf("Capabilities=(");
2264 
2265         if (sc->facts.ioc_capabilities &
2266             MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE) {
2267                 printf("RAID");
2268                 i++;
2269         }
2270 
2271         printf(")\n");
2272 }
2273 
2274 /**
2275  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2276  * @sc: Adapter instance reference
2277  * @event: MPI event ID
2278  *
2279  * Un mask the specific event by resetting the event_mask
2280  * bitmap.
2281  *
2282  * Return: None.
2283  */
2284 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2285 {
2286 	U32 desired_event;
2287 
2288 	if (event >= 128)
2289 		return;
2290 
2291 	desired_event = (1 << (event % 32));
2292 
2293 	if (event < 32)
2294 		sc->event_masks[0] &= ~desired_event;
2295 	else if (event < 64)
2296 		sc->event_masks[1] &= ~desired_event;
2297 	else if (event < 96)
2298 		sc->event_masks[2] &= ~desired_event;
2299 	else if (event < 128)
2300 		sc->event_masks[3] &= ~desired_event;
2301 }
2302 
2303 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2304 {
2305 	int i;
2306 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2307 		sc->event_masks[i] = -1;
2308 
2309         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2310         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2311         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2312 
2313         mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2314 
2315         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2316         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2317         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2318         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2319 
2320         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2321         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2322 
2323         mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2324         mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2325         mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2326 }
2327 
2328 /**
2329  * mpi3mr_issue_event_notification - Send event notification
2330  * @sc: Adapter instance reference
2331  *
2332  * Issue event notification MPI request through admin queue and
2333  * wait for the completion of it or time out.
2334  *
2335  * Return: 0 on success, non-zero on failures.
2336  */
2337 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2338 {
2339 	Mpi3EventNotificationRequest_t evtnotify_req;
2340 	int retval = 0;
2341 	U8 i;
2342 
2343 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2344 	mtx_lock(&sc->init_cmds.completion.lock);
2345 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2346 		retval = -1;
2347 		printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2348 		    sc->name);
2349 		mtx_unlock(&sc->init_cmds.completion.lock);
2350 		goto out;
2351 	}
2352 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2353 	sc->init_cmds.is_waiting = 1;
2354 	sc->init_cmds.callback = NULL;
2355 	evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2356 	evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2357 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2358 		evtnotify_req.EventMasks[i] =
2359 		    (sc->event_masks[i]);
2360 	init_completion(&sc->init_cmds.completion);
2361 	retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2362 	    sizeof(evtnotify_req));
2363 	if (retval) {
2364 		printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2365 		    sc->name);
2366 		goto out_unlock;
2367 	}
2368 
2369 	poll_for_command_completion(sc,
2370 				    &sc->init_cmds,
2371 				    (MPI3MR_INTADMCMD_TIMEOUT));
2372 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2373 		printf(IOCNAME "Issue EvtNotify: command timed out\n",
2374 		    sc->name);
2375 		mpi3mr_check_rh_fault_ioc(sc,
2376 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2377 		retval = -1;
2378 		goto out_unlock;
2379 	}
2380 
2381 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2382 	     != MPI3_IOCSTATUS_SUCCESS ) {
2383 		printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2384 		    " Loginfo(0x%08x) \n" , sc->name,
2385 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2386 		    sc->init_cmds.ioc_loginfo);
2387 		retval = -1;
2388 		goto out_unlock;
2389 	}
2390 
2391 out_unlock:
2392 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2393 	mtx_unlock(&sc->init_cmds.completion.lock);
2394 
2395 out:
2396 	return retval;
2397 }
2398 
2399 int
2400 mpi3mr_register_events(struct mpi3mr_softc *sc)
2401 {
2402 	int error;
2403 
2404 	mpi3mr_set_events_mask(sc);
2405 
2406 	error = mpi3mr_issue_event_notification(sc);
2407 
2408 	if (error) {
2409 		printf(IOCNAME "Failed to issue event notification %d\n",
2410 		    sc->name, error);
2411 	}
2412 
2413 	return error;
2414 }
2415 
2416 /**
2417  * mpi3mr_process_event_ack - Process event acknowledgment
2418  * @sc: Adapter instance reference
2419  * @event: MPI3 event ID
2420  * @event_ctx: Event context
2421  *
2422  * Send event acknowledgement through admin queue and wait for
2423  * it to complete.
2424  *
2425  * Return: 0 on success, non-zero on failures.
2426  */
2427 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2428 	U32 event_ctx)
2429 {
2430 	Mpi3EventAckRequest_t evtack_req;
2431 	int retval = 0;
2432 
2433 	memset(&evtack_req, 0, sizeof(evtack_req));
2434 	mtx_lock(&sc->init_cmds.completion.lock);
2435 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2436 		retval = -1;
2437 		printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2438 		    sc->name);
2439 		mtx_unlock(&sc->init_cmds.completion.lock);
2440 		goto out;
2441 	}
2442 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2443 	sc->init_cmds.is_waiting = 1;
2444 	sc->init_cmds.callback = NULL;
2445 	evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2446 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2447 	evtack_req.Event = event;
2448 	evtack_req.EventContext = htole32(event_ctx);
2449 
2450 	init_completion(&sc->init_cmds.completion);
2451 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2452 	    sizeof(evtack_req));
2453 	if (retval) {
2454 		printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2455 		    sc->name);
2456 		goto out_unlock;
2457 	}
2458 
2459 	wait_for_completion_timeout(&sc->init_cmds.completion,
2460 	    (MPI3MR_INTADMCMD_TIMEOUT));
2461 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2462 		printf(IOCNAME "Issue EvtAck: command timed out\n",
2463 		    sc->name);
2464 		retval = -1;
2465 		goto out_unlock;
2466 	}
2467 
2468 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2469 	     != MPI3_IOCSTATUS_SUCCESS ) {
2470 		printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2471 		    " Loginfo(0x%08x) \n" , sc->name,
2472 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2473 		    sc->init_cmds.ioc_loginfo);
2474 		retval = -1;
2475 		goto out_unlock;
2476 	}
2477 
2478 out_unlock:
2479 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2480 	mtx_unlock(&sc->init_cmds.completion.lock);
2481 
2482 out:
2483 	return retval;
2484 }
2485 
2486 
2487 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2488 {
2489 	int retval = 0;
2490 	U32 sz, i;
2491 	U16 num_chains;
2492 
2493 	num_chains = sc->max_host_ios;
2494 
2495 	sc->chain_buf_count = num_chains;
2496 	sz = sizeof(struct mpi3mr_chain) * num_chains;
2497 
2498 	sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2499 
2500 	if (!sc->chain_sgl_list) {
2501 		printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2502 		    sc->name);
2503 		retval = -1;
2504 		goto out_failed;
2505 	}
2506 
2507 	sz = MPI3MR_CHAINSGE_SIZE;
2508 
2509         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2510 				4096, 0,		/* algnmnt, boundary */
2511 				sc->dma_loaddr,		/* lowaddr */
2512 				BUS_SPACE_MAXADDR,	/* highaddr */
2513 				NULL, NULL,		/* filter, filterarg */
2514                                 sz,			/* maxsize */
2515                                 1,			/* nsegments */
2516                                 sz,			/* maxsegsize */
2517                                 0,			/* flags */
2518                                 NULL, NULL,		/* lockfunc, lockarg */
2519                                 &sc->chain_sgl_list_tag)) {
2520 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2521 		return (ENOMEM);
2522         }
2523 
2524 	for (i = 0; i < num_chains; i++) {
2525 		if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2526 		    BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2527 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
2528 				__func__, __LINE__);
2529 			return (ENOMEM);
2530 		}
2531 
2532 		bzero(sc->chain_sgl_list[i].buf, sz);
2533 		bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2534 		    mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, BUS_DMA_NOWAIT);
2535 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2536 		    __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2537 	}
2538 
2539 	sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2540 
2541 	sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2542 	if (!sc->chain_bitmap) {
2543 		mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2544 		retval = -1;
2545 		goto out_failed;
2546 	}
2547 	return retval;
2548 
2549 out_failed:
2550 	for (i = 0; i < num_chains; i++) {
2551 		if (sc->chain_sgl_list[i].buf_phys != 0)
2552 			bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2553 		if (sc->chain_sgl_list[i].buf != NULL)
2554 			bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2555 	}
2556 	if (sc->chain_sgl_list_tag != NULL)
2557 		bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2558 	return retval;
2559 }
2560 
2561 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2562 {
2563 	int retval = 0;
2564 
2565 	if (!sc->pel_cmds.reply) {
2566 		sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2567 		if (!sc->pel_cmds.reply) {
2568 			printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2569 			    sc->name);
2570 			goto out_failed;
2571 		}
2572 	}
2573 
2574 	if (!sc->pel_abort_cmd.reply) {
2575 		sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2576 		if (!sc->pel_abort_cmd.reply) {
2577 			printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2578 			    sc->name);
2579 			goto out_failed;
2580 		}
2581 	}
2582 
2583 	if (!sc->pel_seq_number) {
2584 		sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2585 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,   /* parent */
2586 				 4, 0,                           /* alignment, boundary */
2587 				 sc->dma_loaddr,	         /* lowaddr */
2588 				 BUS_SPACE_MAXADDR,		 /* highaddr */
2589 				 NULL, NULL,                     /* filter, filterarg */
2590 				 sc->pel_seq_number_sz,		 /* maxsize */
2591 				 1,                              /* nsegments */
2592 				 sc->pel_seq_number_sz,          /* maxsegsize */
2593 				 0,                              /* flags */
2594 				 NULL, NULL,                     /* lockfunc, lockarg */
2595 				 &sc->pel_seq_num_dmatag)) {
2596 			 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2597 			 retval = -ENOMEM;
2598 			 goto out_failed;
2599 		}
2600 
2601 		if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2602 		    BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2603 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2604 			retval = -ENOMEM;
2605 			goto out_failed;
2606 		}
2607 
2608 		bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2609 
2610 		bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2611 		    sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, BUS_DMA_NOWAIT);
2612 
2613 		if (!sc->pel_seq_number) {
2614 			printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2615 				__func__, __LINE__, sc->pel_seq_number_sz);
2616 			retval = -ENOMEM;
2617 			goto out_failed;
2618 		}
2619 	}
2620 
2621 out_failed:
2622 	return retval;
2623 }
2624 
2625 /**
2626  * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2627  * @sc: Adapter instance reference
2628  *
2629  * Return zero if the new IOCFacts is compatible with previous values
2630  * else return appropriate error
2631  */
2632 static int
2633 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2634 {
2635 	U16 dev_handle_bitmap_sz;
2636 	U8 *removepend_bitmap;
2637 
2638 	if (sc->facts.reply_sz > sc->reply_sz) {
2639 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2640 		    "Cannot increase reply size from %d to %d\n",
2641 		    sc->reply_sz, sc->reply_sz);
2642 		return -EPERM;
2643 	}
2644 
2645 	if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2646 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2647 		    "max io throttle group doesn't match old(%d), new(%d)\n",
2648 		    sc->num_io_throttle_group,
2649 		    sc->facts.max_io_throttle_group);
2650 		return -EPERM;
2651 	}
2652 
2653 	if (sc->facts.max_op_reply_q < sc->num_queues) {
2654 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2655 		    "Cannot reduce number of operational reply queues from %d to %d\n",
2656 		    sc->num_queues,
2657 		    sc->facts.max_op_reply_q);
2658 		return -EPERM;
2659 	}
2660 
2661 	if (sc->facts.max_op_req_q < sc->num_queues) {
2662 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2663 		    "Cannot reduce number of operational request queues from %d to %d\n",
2664 		    sc->num_queues, sc->facts.max_op_req_q);
2665 		return -EPERM;
2666 	}
2667 
2668 	dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2669 
2670 	if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2671 		removepend_bitmap = realloc(sc->removepend_bitmap,
2672 		    dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2673 
2674 		if (!removepend_bitmap) {
2675 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2676 			    "failed to increase removepend_bitmap sz from: %d to %d\n",
2677 			    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2678 			return -ENOMEM;
2679 		}
2680 
2681 		memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2682 		    dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2683 		sc->removepend_bitmap = removepend_bitmap;
2684 		mpi3mr_dprint(sc, MPI3MR_INFO,
2685 		    "increased dev_handle_bitmap_sz from %d to %d\n",
2686 		    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2687 		sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2688 	}
2689 
2690 	return 0;
2691 }
2692 
2693 /*
2694  * mpi3mr_initialize_ioc - Controller initialization
2695  * @dev: pointer to device struct
2696  *
2697  * This function allocates the controller wide resources and brings
2698  * the controller to operational state
2699  *
2700  * Return: 0 on success and proper error codes on failure
2701  */
2702 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2703 {
2704 	int retval = 0;
2705 	enum mpi3mr_iocstate ioc_state;
2706 	U64 ioc_info;
2707 	U32 ioc_status, ioc_control, i, timeout;
2708 	Mpi3IOCFactsData_t facts_data;
2709 	char str[32];
2710 	U32 size;
2711 
2712 	sc->cpu_count = mp_ncpus;
2713 
2714 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2715 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2716 	ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2717 
2718 	mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2719 	    "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2720 
2721         /*The timeout value is in 2sec unit, changing it to seconds*/
2722 	sc->ready_timeout =
2723                 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2724                     MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2725 
2726 	ioc_state = mpi3mr_get_iocstate(sc);
2727 
2728 	mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s   IOC ready timeout: %d\n",
2729 	    mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2730 
2731 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
2732 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
2733 		timeout = sc->ready_timeout * 10;
2734 		do {
2735 			DELAY(1000 * 100);
2736 		} while (--timeout);
2737 
2738 		ioc_state = mpi3mr_get_iocstate(sc);
2739 		mpi3mr_dprint(sc, MPI3MR_INFO,
2740 			"IOC in %s state after waiting for reset time\n",
2741 			mpi3mr_iocstate_name(ioc_state));
2742 	}
2743 
2744 	if (ioc_state == MRIOC_STATE_READY) {
2745                 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2746                 if (retval) {
2747                         mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2748                                 retval);
2749                 }
2750                 ioc_state = mpi3mr_get_iocstate(sc);
2751         }
2752 
2753         if (ioc_state != MRIOC_STATE_RESET) {
2754                 mpi3mr_print_fault_info(sc);
2755 		 mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2756                  retval = mpi3mr_issue_reset(sc,
2757                      MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2758                      MPI3MR_RESET_FROM_BRINGUP);
2759                 if (retval) {
2760                         mpi3mr_dprint(sc, MPI3MR_ERROR,
2761                             "%s :Failed to soft reset IOC, error 0x%d\n",
2762                             __func__, retval);
2763                         goto out_failed;
2764                 }
2765         }
2766 
2767 	ioc_state = mpi3mr_get_iocstate(sc);
2768 
2769         if (ioc_state != MRIOC_STATE_RESET) {
2770 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2771 		goto out_failed;
2772         }
2773 
2774 	retval = mpi3mr_setup_admin_qpair(sc);
2775 	if (retval) {
2776 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2777 		    retval);
2778 		goto out_failed;
2779 	}
2780 
2781 	retval = mpi3mr_bring_ioc_ready(sc);
2782 	if (retval) {
2783 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
2784 		    retval);
2785 		goto out_failed;
2786 	}
2787 
2788 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2789 		retval = mpi3mr_alloc_interrupts(sc, 1);
2790 		if (retval) {
2791 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2792 			    retval);
2793 			goto out_failed;
2794 		}
2795 
2796 		retval = mpi3mr_setup_irqs(sc);
2797 		if (retval) {
2798 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2799 			    retval);
2800 			goto out_failed;
2801 		}
2802 	}
2803 
2804 	mpi3mr_enable_interrupts(sc);
2805 
2806 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2807 		mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2808 		mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2809 		mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2810 		mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2811 		mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2812 		mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2813 		mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2814 		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2815 		mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2816 		mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2817 
2818 		mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2819 		sc->init_cmds.reply = NULL;
2820 		sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2821 		sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2822 		sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2823 
2824 		mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2825 		sc->ioctl_cmds.reply = NULL;
2826 		sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2827 		sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2828 		sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2829 
2830 		mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2831 		sc->pel_abort_cmd.reply = NULL;
2832 		sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2833 		sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2834 		sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2835 
2836 		mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2837 		sc->host_tm_cmds.reply = NULL;
2838 		sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2839 		sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2840 		sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2841 
2842 		TAILQ_INIT(&sc->cmd_list_head);
2843 		TAILQ_INIT(&sc->event_list);
2844 		TAILQ_INIT(&sc->delayed_rmhs_list);
2845 		TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2846 
2847 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2848 			snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2849 			mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2850 			sc->dev_rmhs_cmds[i].reply = NULL;
2851 			sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2852 			sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2853 			sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2854 							    + i;
2855 		}
2856 	}
2857 
2858 	retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2859 	if (retval) {
2860 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
2861 		    retval);
2862 		goto out_failed;
2863 	}
2864 
2865 	retval = mpi3mr_process_factsdata(sc, &facts_data);
2866 	if (retval) {
2867 		mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
2868 		    retval);
2869 		goto out_failed;
2870 	}
2871 
2872 	sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2873 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2874 
2875 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2876 		retval = mpi3mr_validate_fw_update(sc);
2877 		if (retval)
2878 			goto out_failed;
2879 	} else {
2880 		sc->reply_sz = sc->facts.reply_sz;
2881 	}
2882 
2883 	mpi3mr_display_ioc_info(sc);
2884 
2885 	retval = mpi3mr_reply_alloc(sc);
2886 	if (retval) {
2887 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
2888 		    retval);
2889 		goto out_failed;
2890 	}
2891 
2892 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2893 		retval = mpi3mr_alloc_chain_bufs(sc);
2894 		if (retval) {
2895 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
2896 			    retval);
2897 			goto out_failed;
2898 		}
2899 	}
2900 
2901 	retval = mpi3mr_issue_iocinit(sc);
2902 	if (retval) {
2903 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
2904 		    retval);
2905 		goto out_failed;
2906 	}
2907 
2908 	mpi3mr_print_fw_pkg_ver(sc);
2909 
2910 	sc->reply_free_q_host_index = sc->num_reply_bufs;
2911 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2912 		sc->reply_free_q_host_index);
2913 
2914 	sc->sense_buf_q_host_index = sc->num_sense_bufs;
2915 
2916 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2917 		sc->sense_buf_q_host_index);
2918 
2919 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2920 		retval = mpi3mr_alloc_interrupts(sc, 0);
2921 		if (retval) {
2922 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
2923 			    retval);
2924 			goto out_failed;
2925 		}
2926 
2927 		retval = mpi3mr_setup_irqs(sc);
2928 		if (retval) {
2929 			printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
2930 			    sc->name, retval);
2931 			goto out_failed;
2932 		}
2933 
2934 		mpi3mr_enable_interrupts(sc);
2935 
2936 	} else
2937 		mpi3mr_enable_interrupts(sc);
2938 
2939 	retval = mpi3mr_create_op_queues(sc);
2940 
2941 	if (retval) {
2942 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
2943 		    retval);
2944 		goto out_failed;
2945 	}
2946 
2947 	if (!sc->throttle_groups && sc->num_io_throttle_group) {
2948 		mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
2949 		size = sizeof(struct mpi3mr_throttle_group_info);
2950 		sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
2951 					  malloc(sc->num_io_throttle_group *
2952 					      size, M_MPI3MR, M_NOWAIT | M_ZERO);
2953 		if (!sc->throttle_groups)
2954 			goto out_failed;
2955 	}
2956 
2957 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2958 		mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
2959 		retval = mpi3mr_register_events(sc);
2960 		if (retval) {
2961 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
2962 			    retval);
2963 			goto out_failed;
2964 		}
2965 
2966 		mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
2967 		retval = mpi3mr_issue_port_enable(sc, 0);
2968 		if (retval) {
2969 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
2970 			    retval);
2971 			goto out_failed;
2972 		}
2973 	}
2974 	retval = mpi3mr_pel_alloc(sc);
2975 	if (retval) {
2976 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
2977 		    retval);
2978 		goto out_failed;
2979 	}
2980 
2981 	return retval;
2982 
2983 out_failed:
2984 	retval = -1;
2985 	return retval;
2986 }
2987 
2988 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
2989     struct mpi3mr_drvr_cmd *drvrcmd)
2990 {
2991 	drvrcmd->state = MPI3MR_CMD_NOTUSED;
2992 	drvrcmd->callback = NULL;
2993 	printf(IOCNAME "Completing Port Enable Request\n", sc->name);
2994 	sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
2995 	mpi3mr_startup_decrement(sc->cam_sc);
2996 }
2997 
2998 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
2999 {
3000 	Mpi3PortEnableRequest_t pe_req;
3001 	int retval = 0;
3002 
3003 	memset(&pe_req, 0, sizeof(pe_req));
3004 	mtx_lock(&sc->init_cmds.completion.lock);
3005 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
3006 		retval = -1;
3007 		printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
3008 		mtx_unlock(&sc->init_cmds.completion.lock);
3009 		goto out;
3010 	}
3011 
3012 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
3013 
3014 	if (async) {
3015 		sc->init_cmds.is_waiting = 0;
3016 		sc->init_cmds.callback = mpi3mr_port_enable_complete;
3017 	} else {
3018 		sc->init_cmds.is_waiting = 1;
3019 		sc->init_cmds.callback = NULL;
3020 		init_completion(&sc->init_cmds.completion);
3021 	}
3022 	pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
3023 	pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
3024 
3025 	printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3026 	retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3027 	if (retval) {
3028 		printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3029 		    sc->name);
3030 		goto out_unlock;
3031 	}
3032 
3033 	if (!async) {
3034 		wait_for_completion_timeout(&sc->init_cmds.completion,
3035 		    MPI3MR_PORTENABLE_TIMEOUT);
3036 		if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3037 			printf(IOCNAME "Issue PortEnable: command timed out\n",
3038 			    sc->name);
3039 			retval = -1;
3040 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3041 			goto out_unlock;
3042 		}
3043 		mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3044 	}
3045 out_unlock:
3046 	mtx_unlock(&sc->init_cmds.completion.lock);
3047 
3048 out:
3049 	return retval;
3050 }
3051 
3052 void
3053 mpi3mr_watchdog_thread(void *arg)
3054 {
3055 	struct mpi3mr_softc *sc;
3056 	enum mpi3mr_iocstate ioc_state;
3057 	U32 fault, host_diagnostic, ioc_status;
3058 
3059 	sc = (struct mpi3mr_softc *)arg;
3060 
3061 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3062 
3063 	sc->watchdog_thread_active = 1;
3064 	mtx_lock(&sc->reset_mutex);
3065 	for (;;) {
3066 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3067 		    (sc->unrecoverable == 1)) {
3068 			mpi3mr_dprint(sc, MPI3MR_INFO,
3069 			    "Exit due to %s from %s\n",
3070 			   sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3071 			    "Hardware critical error", __func__);
3072 			break;
3073 		}
3074 		mtx_unlock(&sc->reset_mutex);
3075 
3076 		if ((sc->prepare_for_reset) &&
3077 		    ((sc->prepare_for_reset_timeout_counter++) >=
3078 		     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3079 			mpi3mr_soft_reset_handler(sc,
3080 			    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3081 			goto sleep;
3082 		}
3083 
3084 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3085 
3086 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3087 			mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3088 			goto sleep;
3089 		}
3090 
3091 		ioc_state = mpi3mr_get_iocstate(sc);
3092 		if (ioc_state == MRIOC_STATE_FAULT) {
3093 			fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3094 			    MPI3_SYSIF_FAULT_CODE_MASK;
3095 
3096 			host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3097 			if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3098 				if (!sc->diagsave_timeout) {
3099 					mpi3mr_print_fault_info(sc);
3100 					mpi3mr_dprint(sc, MPI3MR_INFO,
3101 						"diag save in progress\n");
3102 				}
3103 				if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3104 					goto sleep;
3105 			}
3106 			mpi3mr_print_fault_info(sc);
3107 			sc->diagsave_timeout = 0;
3108 
3109 			if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3110 			    (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3111 				mpi3mr_dprint(sc, MPI3MR_INFO,
3112 				    "Controller requires system power cycle or complete reset is needed,"
3113 				    "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3114 				sc->unrecoverable = 1;
3115 				break;
3116 			}
3117 			if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3118 			    || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3119 			    || (sc->reset_in_progress))
3120 				break;
3121 			if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3122 				mpi3mr_soft_reset_handler(sc,
3123 				    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3124 			else
3125 				mpi3mr_soft_reset_handler(sc,
3126 				    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3127 
3128 		}
3129 
3130 		if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3131 			mpi3mr_print_fault_info(sc);
3132 			mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3133 		}
3134 sleep:
3135 		mtx_lock(&sc->reset_mutex);
3136 		/*
3137 		 * Sleep for 1 second if we're not exiting, then loop to top
3138 		 * to poll exit status and hardware health.
3139 		 */
3140 		if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) == 0 &&
3141 		    !sc->unrecoverable) {
3142 			msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3143 			    "mpi3mr_watchdog", 1 * hz);
3144 		}
3145 	}
3146 	mtx_unlock(&sc->reset_mutex);
3147 	sc->watchdog_thread_active = 0;
3148 	mpi3mr_kproc_exit(0);
3149 }
3150 
3151 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3152 	Mpi3EventNotificationReply_t *event_rep)
3153 {
3154 	char *desc = NULL;
3155 	U16 event;
3156 
3157 	event = event_rep->Event;
3158 
3159 	switch (event) {
3160 	case MPI3_EVENT_LOG_DATA:
3161 		desc = "Log Data";
3162 		break;
3163 	case MPI3_EVENT_CHANGE:
3164 		desc = "Event Change";
3165 		break;
3166 	case MPI3_EVENT_GPIO_INTERRUPT:
3167 		desc = "GPIO Interrupt";
3168 		break;
3169 	case MPI3_EVENT_CABLE_MGMT:
3170 		desc = "Cable Management";
3171 		break;
3172 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3173 		desc = "Energy Pack Change";
3174 		break;
3175 	case MPI3_EVENT_DEVICE_ADDED:
3176 	{
3177 		Mpi3DevicePage0_t *event_data =
3178 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3179 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3180 			event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3181 		return;
3182 	}
3183 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3184 	{
3185 		Mpi3DevicePage0_t *event_data =
3186 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3187 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3188 			event_data->DevHandle, event_data->DeviceForm);
3189 		return;
3190 	}
3191 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3192 	{
3193 		Mpi3EventDataDeviceStatusChange_t *event_data =
3194 		    (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3195 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3196 			event_data->DevHandle, event_data->ReasonCode);
3197 		return;
3198 	}
3199 	case MPI3_EVENT_SAS_DISCOVERY:
3200 	{
3201 		Mpi3EventDataSasDiscovery_t *event_data =
3202 		    (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3203 		mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3204 			(event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3205 		    "start" : "stop");
3206 		if (event_data->DiscoveryStatus &&
3207 		    (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3208 			printf("discovery_status(0x%08x)",
3209 			    event_data->DiscoveryStatus);
3210 
3211 		}
3212 
3213 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3214 			printf("\n");
3215 		return;
3216 	}
3217 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3218 		desc = "SAS Broadcast Primitive";
3219 		break;
3220 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3221 		desc = "SAS Notify Primitive";
3222 		break;
3223 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3224 		desc = "SAS Init Device Status Change";
3225 		break;
3226 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3227 		desc = "SAS Init Table Overflow";
3228 		break;
3229 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3230 		desc = "SAS Topology Change List";
3231 		break;
3232 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3233 		desc = "Enclosure Device Status Change";
3234 		break;
3235 	case MPI3_EVENT_HARD_RESET_RECEIVED:
3236 		desc = "Hard Reset Received";
3237 		break;
3238 	case MPI3_EVENT_SAS_PHY_COUNTER:
3239 		desc = "SAS PHY Counter";
3240 		break;
3241 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3242 		desc = "SAS Device Discovery Error";
3243 		break;
3244 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3245 		desc = "PCIE Topology Change List";
3246 		break;
3247 	case MPI3_EVENT_PCIE_ENUMERATION:
3248 	{
3249 		Mpi3EventDataPcieEnumeration_t *event_data =
3250 			(Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3251 		mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3252 			(event_data->ReasonCode ==
3253 			    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3254 			    "stop");
3255 		if (event_data->EnumerationStatus)
3256 			mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3257 			   event_data->EnumerationStatus);
3258 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3259 			printf("\n");
3260 		return;
3261 	}
3262 	case MPI3_EVENT_PREPARE_FOR_RESET:
3263 		desc = "Prepare For Reset";
3264 		break;
3265 	}
3266 
3267 	if (!desc)
3268 		return;
3269 
3270 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3271 }
3272 
3273 struct mpi3mr_target *
3274 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3275     uint16_t per_id)
3276 {
3277 	struct mpi3mr_target *target = NULL;
3278 
3279 	mtx_lock_spin(&cam_sc->sc->target_lock);
3280 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3281 		if (target->per_id == per_id)
3282 			break;
3283 	}
3284 
3285 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3286 	return target;
3287 }
3288 
3289 struct mpi3mr_target *
3290 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3291     uint16_t handle)
3292 {
3293 	struct mpi3mr_target *target = NULL;
3294 
3295 	mtx_lock_spin(&cam_sc->sc->target_lock);
3296 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3297 		if (target->dev_handle == handle)
3298 			break;
3299 
3300 	}
3301 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3302 	return target;
3303 }
3304 
3305 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3306     struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3307     bool is_added)
3308 {
3309 	U16 flags = 0;
3310 
3311 	tgtdev->per_id = (dev_pg0->PersistentID);
3312 	tgtdev->dev_handle = (dev_pg0->DevHandle);
3313 	tgtdev->dev_type = dev_pg0->DeviceForm;
3314 	tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3315 	tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3316 	tgtdev->slot = (dev_pg0->Slot);
3317 	tgtdev->qdepth = (dev_pg0->QueueDepth);
3318 	tgtdev->wwid = (dev_pg0->WWID);
3319 
3320 	flags = (dev_pg0->Flags);
3321 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3322 	if (is_added == true)
3323 		tgtdev->io_throttle_enabled =
3324 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3325 
3326 	switch (dev_pg0->AccessStatus) {
3327 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3328 	case MPI3_DEVICE0_ASTATUS_PREPARE:
3329 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3330 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3331 		break;
3332 	default:
3333 		tgtdev->is_hidden = 1;
3334 		break;
3335 	}
3336 
3337 	switch (tgtdev->dev_type) {
3338 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
3339 	{
3340 		Mpi3Device0SasSataFormat_t *sasinf =
3341 		    &dev_pg0->DeviceSpecific.SasSataFormat;
3342 		U16 dev_info = (sasinf->DeviceInfo);
3343 		tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3344 		tgtdev->dev_spec.sassata_inf.sas_address =
3345 		    (sasinf->SASAddress);
3346 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3347 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3348 			tgtdev->is_hidden = 1;
3349 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3350 			    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3351 			tgtdev->is_hidden = 1;
3352 		break;
3353 	}
3354 	case MPI3_DEVICE_DEVFORM_PCIE:
3355 	{
3356 		Mpi3Device0PcieFormat_t *pcieinf =
3357 		    &dev_pg0->DeviceSpecific.PcieFormat;
3358 		U16 dev_info = (pcieinf->DeviceInfo);
3359 
3360 		tgtdev->q_depth = dev_pg0->QueueDepth;
3361 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3362 		tgtdev->dev_spec.pcie_inf.capb =
3363 		    (pcieinf->Capabilities);
3364 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3365 		if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3366 			tgtdev->dev_spec.pcie_inf.mdts =
3367 			    (pcieinf->MaximumDataTransferSize);
3368 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3369 			tgtdev->dev_spec.pcie_inf.reset_to =
3370 				pcieinf->ControllerResetTO;
3371 			tgtdev->dev_spec.pcie_inf.abort_to =
3372 				pcieinf->NVMeAbortTO;
3373 		}
3374 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3375 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3376 
3377 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3378 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3379 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3380 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3381 			tgtdev->is_hidden = 1;
3382 
3383 		break;
3384 	}
3385 	case MPI3_DEVICE_DEVFORM_VD:
3386 	{
3387 		Mpi3Device0VdFormat_t *vdinf =
3388 		    &dev_pg0->DeviceSpecific.VdFormat;
3389 		struct mpi3mr_throttle_group_info *tg = NULL;
3390 
3391 		tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3392 		if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3393 			tgtdev->is_hidden = 1;
3394 		tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3395 		tgtdev->dev_spec.vol_inf.tg_high =
3396 			vdinf->IOThrottleGroupHigh * 2048;
3397 		tgtdev->dev_spec.vol_inf.tg_low =
3398 			vdinf->IOThrottleGroupLow * 2048;
3399 		if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3400 			tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3401 			tg->id = vdinf->IOThrottleGroup;
3402 			tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3403 			tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3404 			if (is_added == true)
3405 				tg->fw_qd = tgtdev->q_depth;
3406 			tg->modified_qd = tgtdev->q_depth;
3407 		}
3408 		tgtdev->dev_spec.vol_inf.tg = tg;
3409 		tgtdev->throttle_group = tg;
3410 		break;
3411 	}
3412 	default:
3413 		goto out;
3414 	}
3415 
3416 out:
3417 	return;
3418 }
3419 
3420 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3421     Mpi3DevicePage0_t *dev_pg0)
3422 {
3423 	int retval = 0;
3424 	struct mpi3mr_target *target = NULL;
3425 	U16 per_id = 0;
3426 
3427 	per_id = dev_pg0->PersistentID;
3428 
3429 	mtx_lock_spin(&sc->target_lock);
3430 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3431 		if (target->per_id == per_id) {
3432 			target->state = MPI3MR_DEV_CREATED;
3433 			break;
3434 		}
3435 	}
3436 	mtx_unlock_spin(&sc->target_lock);
3437 
3438 	if (target) {
3439 			mpi3mr_update_device(sc, target, dev_pg0, true);
3440 	} else {
3441 			target = malloc(sizeof(*target), M_MPI3MR,
3442 				 M_NOWAIT | M_ZERO);
3443 
3444 			if (target == NULL) {
3445 				retval = -1;
3446 				goto out;
3447 			}
3448 
3449 			target->exposed_to_os = 0;
3450 			mpi3mr_update_device(sc, target, dev_pg0, true);
3451 			mtx_lock_spin(&sc->target_lock);
3452 			TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3453 			target->state = MPI3MR_DEV_CREATED;
3454 			mtx_unlock_spin(&sc->target_lock);
3455 	}
3456 out:
3457 	return retval;
3458 }
3459 
3460 /**
3461  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3462  * @sc: Adapter instance reference
3463  * @drv_cmd: Internal command tracker
3464  *
3465  * Issues a target reset TM to the firmware from the device
3466  * removal TM pend list or retry the removal handshake sequence
3467  * based on the IOU control request IOC status.
3468  *
3469  * Return: Nothing
3470  */
3471 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3472 	struct mpi3mr_drvr_cmd *drv_cmd)
3473 {
3474 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3475 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3476 	struct mpi3mr_target *tgtdev = NULL;
3477 
3478 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3479 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3480 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3481 	    drv_cmd->ioc_loginfo);
3482 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3483 		if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3484 			drv_cmd->retry_count++;
3485 			mpi3mr_dprint(sc, MPI3MR_EVENT,
3486 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3487 			    __func__, drv_cmd->dev_handle,
3488 			    drv_cmd->retry_count);
3489 			mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3490 			    drv_cmd, drv_cmd->iou_rc);
3491 			return;
3492 		}
3493 		mpi3mr_dprint(sc, MPI3MR_ERROR,
3494 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3495 		    __func__, drv_cmd->dev_handle);
3496 	} else {
3497 		mtx_lock_spin(&sc->target_lock);
3498 		TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3499 		       if (tgtdev->dev_handle == drv_cmd->dev_handle)
3500 			       tgtdev->state = MPI3MR_DEV_REMOVE_HS_COMPLETED;
3501 		}
3502 		mtx_unlock_spin(&sc->target_lock);
3503 
3504 		mpi3mr_dprint(sc, MPI3MR_INFO,
3505 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3506 		    __func__, drv_cmd->dev_handle);
3507 		mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3508 	}
3509 
3510 	if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3511 		delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3512 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3513 		drv_cmd->retry_count = 0;
3514 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3515 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3516 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3517 		    __func__, drv_cmd->dev_handle);
3518 		mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3519 		    drv_cmd->iou_rc);
3520 		TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3521 		free(delayed_dev_rmhs, M_MPI3MR);
3522 		return;
3523 	}
3524 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3525 	drv_cmd->callback = NULL;
3526 	drv_cmd->retry_count = 0;
3527 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3528 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3529 }
3530 
3531 /**
3532  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3533  * @sc: Adapter instance reference
3534  * @drv_cmd: Internal command tracker
3535  *
3536  * Issues a target reset TM to the firmware from the device
3537  * removal TM pend list or issue IO Unit control request as
3538  * part of device removal or hidden acknowledgment handshake.
3539  *
3540  * Return: Nothing
3541  */
3542 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3543 	struct mpi3mr_drvr_cmd *drv_cmd)
3544 {
3545 	Mpi3IoUnitControlRequest_t iou_ctrl;
3546 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3547 	Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3548 	int retval;
3549 
3550 	if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3551 		tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3552 
3553 	if (tm_reply)
3554 		printf(IOCNAME
3555 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3556 		    sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3557 		    drv_cmd->ioc_loginfo,
3558 		    le32toh(tm_reply->TerminationCount));
3559 
3560 	printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3561 	    sc->name, drv_cmd->dev_handle, cmd_idx);
3562 
3563 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3564 
3565 	drv_cmd->state = MPI3MR_CMD_PENDING;
3566 	drv_cmd->is_waiting = 0;
3567 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3568 	iou_ctrl.Operation = drv_cmd->iou_rc;
3569 	iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3570 	iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3571 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3572 
3573 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3574 	if (retval) {
3575 		printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3576 		    sc->name);
3577 		goto out_failed;
3578 	}
3579 
3580 	return;
3581 out_failed:
3582 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3583 	drv_cmd->callback = NULL;
3584 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3585 	drv_cmd->retry_count = 0;
3586 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3587 }
3588 
3589 /**
3590  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3591  * @sc: Adapter instance reference
3592  * @handle: Device handle
3593  * @cmdparam: Internal command tracker
3594  * @iou_rc: IO Unit reason code
3595  *
3596  * Issues a target reset TM to the firmware or add it to a pend
3597  * list as part of device removal or hidden acknowledgment
3598  * handshake.
3599  *
3600  * Return: Nothing
3601  */
3602 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3603 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3604 {
3605 	Mpi3SCSITaskMgmtRequest_t tm_req;
3606 	int retval = 0;
3607 	U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3608 	U8 retrycount = 5;
3609 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3610 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3611 
3612 	if (drv_cmd)
3613 		goto issue_cmd;
3614 	do {
3615 		cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3616 		    MPI3MR_NUM_DEVRMCMD);
3617 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3618 			if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3619 				break;
3620 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
3621 		}
3622 	} while (retrycount--);
3623 
3624 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3625 		delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3626 		     M_ZERO|M_NOWAIT);
3627 
3628 		if (!delayed_dev_rmhs)
3629 			return;
3630 		delayed_dev_rmhs->handle = handle;
3631 		delayed_dev_rmhs->iou_rc = iou_rc;
3632 		TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3633 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3634 		    __func__, handle);
3635 
3636 
3637 		return;
3638 	}
3639 	drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3640 
3641 issue_cmd:
3642 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3643 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3644 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3645 	    __func__, handle, cmd_idx);
3646 
3647 	memset(&tm_req, 0, sizeof(tm_req));
3648 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3649 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3650 		goto out;
3651 	}
3652 	drv_cmd->state = MPI3MR_CMD_PENDING;
3653 	drv_cmd->is_waiting = 0;
3654 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3655 	drv_cmd->dev_handle = handle;
3656 	drv_cmd->iou_rc = iou_rc;
3657 	tm_req.DevHandle = htole16(handle);
3658 	tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3659 	tm_req.HostTag = htole16(drv_cmd->host_tag);
3660 	tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3661 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3662 
3663 	mpi3mr_set_bit(handle, sc->removepend_bitmap);
3664 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3665 	if (retval) {
3666 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3667 		    __func__);
3668 		goto out_failed;
3669 	}
3670 out:
3671 	return;
3672 out_failed:
3673 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3674 	drv_cmd->callback = NULL;
3675 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3676 	drv_cmd->retry_count = 0;
3677 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3678 }
3679 
3680 /**
3681  * mpi3mr_complete_evt_ack - Event ack request completion
3682  * @sc: Adapter instance reference
3683  * @drv_cmd: Internal command tracker
3684  *
3685  * This is the completion handler for non blocking event
3686  * acknowledgment sent to the firmware and this will issue any
3687  * pending event acknowledgment request.
3688  *
3689  * Return: Nothing
3690  */
3691 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3692 	struct mpi3mr_drvr_cmd *drv_cmd)
3693 {
3694 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3695 	struct delayed_evtack_node *delayed_evtack = NULL;
3696 
3697 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3698 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3699 		    "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3700 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3701 		    drv_cmd->ioc_loginfo);
3702 	}
3703 
3704 	if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3705 		delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3706 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3707 		    "%s: processing delayed event ack for event %d\n",
3708 		    __func__, delayed_evtack->event);
3709 		mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3710 		    delayed_evtack->event_ctx);
3711 		TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3712 		free(delayed_evtack, M_MPI3MR);
3713 		return;
3714 	}
3715 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3716 	drv_cmd->callback = NULL;
3717 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3718 }
3719 
3720 /**
3721  * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3722  * @sc: Adapter instance reference
3723  * @event: MPI3 event id
3724  * @cmdparam: Internal command tracker
3725  * @event_ctx: Event context
3726  *
3727  * Issues event acknowledgment request to the firmware if there
3728  * is a free command to send the event ack else it to a pend
3729  * list so that it will be processed on a completion of a prior
3730  * event acknowledgment .
3731  *
3732  * Return: Nothing
3733  */
3734 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3735 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3736 {
3737 	Mpi3EventAckRequest_t evtack_req;
3738 	int retval = 0;
3739 	U8 retrycount = 5;
3740 	U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3741 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3742 	struct delayed_evtack_node *delayed_evtack = NULL;
3743 
3744 	if (drv_cmd)
3745 		goto issue_cmd;
3746 	do {
3747 		cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3748 		    MPI3MR_NUM_EVTACKCMD);
3749 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3750 			if (!mpi3mr_test_and_set_bit(cmd_idx,
3751 			    sc->evtack_cmds_bitmap))
3752 				break;
3753 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
3754 		}
3755 	} while (retrycount--);
3756 
3757 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3758 		delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3759 		     M_ZERO | M_NOWAIT);
3760 		if (!delayed_evtack)
3761 			return;
3762 		delayed_evtack->event = event;
3763 		delayed_evtack->event_ctx = event_ctx;
3764 		TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3765 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3766 		    __func__, event);
3767 		return;
3768 	}
3769 	drv_cmd = &sc->evtack_cmds[cmd_idx];
3770 
3771 issue_cmd:
3772 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3773 
3774 	memset(&evtack_req, 0, sizeof(evtack_req));
3775 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3776 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3777 		goto out;
3778 	}
3779 	drv_cmd->state = MPI3MR_CMD_PENDING;
3780 	drv_cmd->is_waiting = 0;
3781 	drv_cmd->callback = mpi3mr_complete_evt_ack;
3782 	evtack_req.HostTag = htole16(drv_cmd->host_tag);
3783 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
3784 	evtack_req.Event = event;
3785 	evtack_req.EventContext = htole32(event_ctx);
3786 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
3787 	    sizeof(evtack_req));
3788 
3789 	if (retval) {
3790 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
3791 		goto out_failed;
3792 	}
3793 out:
3794 	return;
3795 out_failed:
3796 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3797 	drv_cmd->callback = NULL;
3798 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3799 }
3800 
3801 /*
3802  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
3803  * @sc: Adapter instance reference
3804  * @event_reply: Event data
3805  *
3806  * Checks for the reason code and based on that either block I/O
3807  * to device, or unblock I/O to the device, or start the device
3808  * removal handshake with reason as remove with the firmware for
3809  * PCIe devices.
3810  *
3811  * Return: Nothing
3812  */
3813 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
3814 	Mpi3EventNotificationReply_t *event_reply)
3815 {
3816 	Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
3817 	    (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
3818 	int i;
3819 	U16 handle;
3820 	U8 reason_code;
3821 	struct mpi3mr_target *tgtdev = NULL;
3822 
3823 	for (i = 0; i < topo_evt->NumEntries; i++) {
3824 		handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
3825 		if (!handle)
3826 			continue;
3827 		reason_code = topo_evt->PortEntry[i].PortStatus;
3828 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3829 		switch (reason_code) {
3830 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
3831 			if (tgtdev) {
3832 				tgtdev->dev_removed = 1;
3833 				tgtdev->dev_removedelay = 0;
3834 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3835 			}
3836 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3837 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3838 			break;
3839 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
3840 			if (tgtdev) {
3841 				tgtdev->dev_removedelay = 1;
3842 				mpi3mr_atomic_inc(&tgtdev->block_io);
3843 			}
3844 			break;
3845 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
3846 			if (tgtdev &&
3847 			    tgtdev->dev_removedelay) {
3848 				tgtdev->dev_removedelay = 0;
3849 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3850 					mpi3mr_atomic_dec(&tgtdev->block_io);
3851 			}
3852 			break;
3853 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
3854 		default:
3855 			break;
3856 		}
3857 	}
3858 }
3859 
3860 /**
3861  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
3862  * @sc: Adapter instance reference
3863  * @event_reply: Event data
3864  *
3865  * Checks for the reason code and based on that either block I/O
3866  * to device, or unblock I/O to the device, or start the device
3867  * removal handshake with reason as remove with the firmware for
3868  * SAS/SATA devices.
3869  *
3870  * Return: Nothing
3871  */
3872 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
3873 	Mpi3EventNotificationReply_t *event_reply)
3874 {
3875 	Mpi3EventDataSasTopologyChangeList_t *topo_evt =
3876 	    (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
3877 	int i;
3878 	U16 handle;
3879 	U8 reason_code;
3880 	struct mpi3mr_target *tgtdev = NULL;
3881 
3882 	for (i = 0; i < topo_evt->NumEntries; i++) {
3883 		handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
3884 		if (!handle)
3885 			continue;
3886 		reason_code = topo_evt->PhyEntry[i].Status &
3887 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
3888 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
3889 		switch (reason_code) {
3890 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
3891 			if (tgtdev) {
3892 				tgtdev->dev_removed = 1;
3893 				tgtdev->dev_removedelay = 0;
3894 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
3895 			}
3896 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
3897 			    MPI3_CTRL_OP_REMOVE_DEVICE);
3898 			break;
3899 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
3900 			if (tgtdev) {
3901 				tgtdev->dev_removedelay = 1;
3902 				mpi3mr_atomic_inc(&tgtdev->block_io);
3903 			}
3904 			break;
3905 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
3906 			if (tgtdev &&
3907 			    tgtdev->dev_removedelay) {
3908 				tgtdev->dev_removedelay = 0;
3909 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3910 					mpi3mr_atomic_dec(&tgtdev->block_io);
3911 			}
3912 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
3913 		default:
3914 			break;
3915 		}
3916 	}
3917 
3918 }
3919 /**
3920  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
3921  * @sc: Adapter instance reference
3922  * @event_reply: Event data
3923  *
3924  * Checks for the reason code and based on that either block I/O
3925  * to device, or unblock I/O to the device, or start the device
3926  * removal handshake with reason as remove/hide acknowledgment
3927  * with the firmware.
3928  *
3929  * Return: Nothing
3930  */
3931 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
3932 	Mpi3EventNotificationReply_t *event_reply)
3933 {
3934 	U16 dev_handle = 0;
3935 	U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
3936 	struct mpi3mr_target *tgtdev = NULL;
3937 	Mpi3EventDataDeviceStatusChange_t *evtdata =
3938 	    (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
3939 
3940 	dev_handle = le16toh(evtdata->DevHandle);
3941 
3942 	switch (evtdata->ReasonCode) {
3943 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
3944 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
3945 		block = 1;
3946 		break;
3947 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
3948 		delete = 1;
3949 		hide = 1;
3950 		break;
3951 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
3952 		uhide = 1;
3953 		break;
3954 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
3955 		delete = 1;
3956 		remove = 1;
3957 		break;
3958 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
3959 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
3960 		ublock = 1;
3961 		break;
3962 	default:
3963 		break;
3964 	}
3965 
3966 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
3967 
3968 	if (!tgtdev) {
3969 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
3970 		    __func__, dev_handle);
3971 		return;
3972 	}
3973 
3974 	if (block)
3975 		mpi3mr_atomic_inc(&tgtdev->block_io);
3976 
3977 	if (hide)
3978 		tgtdev->is_hidden = hide;
3979 
3980 	if (uhide) {
3981 		tgtdev->is_hidden = 0;
3982 		tgtdev->dev_removed = 0;
3983 	}
3984 
3985 	if (delete)
3986 		tgtdev->dev_removed = 1;
3987 
3988 	if (ublock) {
3989 		if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
3990 			mpi3mr_atomic_dec(&tgtdev->block_io);
3991 	}
3992 
3993 	if (remove) {
3994 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3995 					MPI3_CTRL_OP_REMOVE_DEVICE);
3996 	}
3997 	if (hide)
3998 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
3999 					MPI3_CTRL_OP_HIDDEN_ACK);
4000 }
4001 
4002 /**
4003  * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
4004  * @sc: Adapter instance reference
4005  * @event_reply: Event data
4006  *
4007  * Blocks and unblocks host level I/O based on the reason code
4008  *
4009  * Return: Nothing
4010  */
4011 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
4012 	Mpi3EventNotificationReply_t *event_reply)
4013 {
4014 	Mpi3EventDataPrepareForReset_t *evtdata =
4015 	    (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
4016 
4017 	if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
4018 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
4019 		    __func__);
4020 		if (sc->prepare_for_reset)
4021 			return;
4022 		sc->prepare_for_reset = 1;
4023 		sc->prepare_for_reset_timeout_counter = 0;
4024 	} else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
4025 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
4026 		    __func__);
4027 		sc->prepare_for_reset = 0;
4028 		sc->prepare_for_reset_timeout_counter = 0;
4029 	}
4030 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4031 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4032 		mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4033 		    le32toh(event_reply->EventContext));
4034 }
4035 
4036 /**
4037  * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4038  * @sc: Adapter instance reference
4039  * @event_reply: Event data
4040  *
4041  * Identifies the new shutdown timeout value and update.
4042  *
4043  * Return: Nothing
4044  */
4045 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4046 	Mpi3EventNotificationReply_t *event_reply)
4047 {
4048 	Mpi3EventDataEnergyPackChange_t *evtdata =
4049 	    (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4050 	U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4051 
4052 	if (shutdown_timeout <= 0) {
4053 		mpi3mr_dprint(sc, MPI3MR_ERROR,
4054 		    "%s :Invalid Shutdown Timeout received = %d\n",
4055 		    __func__, shutdown_timeout);
4056 		return;
4057 	}
4058 
4059 	mpi3mr_dprint(sc, MPI3MR_EVENT,
4060 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4061 	    __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4062 	sc->facts.shutdown_timeout = shutdown_timeout;
4063 }
4064 
4065 /**
4066  * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4067  * @sc: Adapter instance reference
4068  * @event_reply: Event data
4069  *
4070  * Displays Cable manegemt event details.
4071  *
4072  * Return: Nothing
4073  */
4074 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4075 	Mpi3EventNotificationReply_t *event_reply)
4076 {
4077 	Mpi3EventDataCableManagement_t *evtdata =
4078 	    (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4079 
4080 	switch (evtdata->Status) {
4081 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4082 	{
4083 		mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4084 		    "Devices connected to this cable are not detected.\n"
4085 		    "This cable requires %d mW of power.\n",
4086 		    evtdata->ReceptacleID,
4087 		    le32toh(evtdata->ActiveCablePowerRequirement));
4088 		break;
4089 	}
4090 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4091 	{
4092 		mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4093 		    evtdata->ReceptacleID);
4094 		break;
4095 	}
4096 	default:
4097 		break;
4098 	}
4099 }
4100 
4101 /**
4102  * mpi3mr_process_events - Event's toph-half handler
4103  * @sc: Adapter instance reference
4104  * @event_reply: Event data
4105  *
4106  * Top half of event processing.
4107  *
4108  * Return: Nothing
4109  */
4110 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4111     uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4112 {
4113 	U16 evt_type;
4114 	bool ack_req = 0, process_evt_bh = 0;
4115 	struct mpi3mr_fw_event_work *fw_event;
4116 	U16 sz;
4117 
4118 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4119 		goto out;
4120 
4121 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4122 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4123 		ack_req = 1;
4124 
4125 	evt_type = event_reply->Event;
4126 
4127 	switch (evt_type) {
4128 	case MPI3_EVENT_DEVICE_ADDED:
4129 	{
4130 		Mpi3DevicePage0_t *dev_pg0 =
4131 			(Mpi3DevicePage0_t *) event_reply->EventData;
4132 		if (mpi3mr_create_device(sc, dev_pg0))
4133 			mpi3mr_dprint(sc, MPI3MR_ERROR,
4134 			"%s :Failed to add device in the device add event\n",
4135 			__func__);
4136 		else
4137 			process_evt_bh = 1;
4138 		break;
4139 	}
4140 
4141 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4142 	{
4143 		process_evt_bh = 1;
4144 		mpi3mr_devstatuschg_evt_th(sc, event_reply);
4145 		break;
4146 	}
4147 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4148 	{
4149 		process_evt_bh = 1;
4150 		mpi3mr_sastopochg_evt_th(sc, event_reply);
4151 		break;
4152 	}
4153 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4154 	{
4155 		process_evt_bh = 1;
4156 		mpi3mr_pcietopochg_evt_th(sc, event_reply);
4157 		break;
4158 	}
4159 	case MPI3_EVENT_PREPARE_FOR_RESET:
4160 	{
4161 		mpi3mr_preparereset_evt_th(sc, event_reply);
4162 		ack_req = 0;
4163 		break;
4164 	}
4165 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
4166 	case MPI3_EVENT_LOG_DATA:
4167 	{
4168 		process_evt_bh = 1;
4169 		break;
4170 	}
4171 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
4172 	{
4173 		mpi3mr_energypackchg_evt_th(sc, event_reply);
4174 		break;
4175 	}
4176 	case MPI3_EVENT_CABLE_MGMT:
4177 	{
4178 		mpi3mr_cablemgmt_evt_th(sc, event_reply);
4179 		break;
4180 	}
4181 
4182 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4183 	case MPI3_EVENT_SAS_DISCOVERY:
4184 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4185 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4186 	case MPI3_EVENT_PCIE_ENUMERATION:
4187 		break;
4188 	default:
4189 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4190 		    __func__, evt_type);
4191 		break;
4192 	}
4193 
4194 	if (process_evt_bh || ack_req) {
4195 		fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4196 		     M_ZERO|M_NOWAIT);
4197 
4198 		if (!fw_event) {
4199 			printf("%s: allocate failed for fw_event\n", __func__);
4200 			return;
4201 		}
4202 
4203 		sz = le16toh(event_reply->EventDataLength) * 4;
4204 		fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4205 
4206 		if (!fw_event->event_data) {
4207 			printf("%s: allocate failed for event_data\n", __func__);
4208 			free(fw_event, M_MPI3MR);
4209 			return;
4210 		}
4211 
4212 		bcopy(event_reply->EventData, fw_event->event_data, sz);
4213 		fw_event->event = event_reply->Event;
4214 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4215 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4216 		    event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4217 		    sc->track_mapping_events)
4218 			sc->pending_map_events++;
4219 
4220 		/*
4221 		 * Events should be processed after Port enable is completed.
4222 		 */
4223 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4224 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4225 		    !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4226 			mpi3mr_startup_increment(sc->cam_sc);
4227 
4228 		fw_event->send_ack = ack_req;
4229 		fw_event->event_context = le32toh(event_reply->EventContext);
4230 		fw_event->event_data_size = sz;
4231 		fw_event->process_event = process_evt_bh;
4232 
4233 		mtx_lock(&sc->fwevt_lock);
4234 		TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4235 		taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4236 		mtx_unlock(&sc->fwevt_lock);
4237 
4238 	}
4239 out:
4240 	return;
4241 }
4242 
4243 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4244     Mpi3DefaultReply_t *def_reply)
4245 {
4246 	Mpi3EventNotificationReply_t *event_reply =
4247 		(Mpi3EventNotificationReply_t *)def_reply;
4248 
4249 	sc->change_count = event_reply->IOCChangeCount;
4250 	mpi3mr_display_event_data(sc, event_reply);
4251 
4252 	mpi3mr_process_events(sc, data, event_reply);
4253 }
4254 
4255 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4256     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4257 {
4258 	U16 reply_desc_type, host_tag = 0, idx;
4259 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4260 	U32 ioc_loginfo = 0;
4261 	Mpi3StatusReplyDescriptor_t *status_desc;
4262 	Mpi3AddressReplyDescriptor_t *addr_desc;
4263 	Mpi3SuccessReplyDescriptor_t *success_desc;
4264 	Mpi3DefaultReply_t *def_reply = NULL;
4265 	struct mpi3mr_drvr_cmd *cmdptr = NULL;
4266 	Mpi3SCSIIOReply_t *scsi_reply;
4267 	U8 *sense_buf = NULL;
4268 
4269 	*reply_dma = 0;
4270 	reply_desc_type = reply_desc->ReplyFlags &
4271 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4272 	switch (reply_desc_type) {
4273 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4274 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4275 		host_tag = status_desc->HostTag;
4276 		ioc_status = status_desc->IOCStatus;
4277 		if (ioc_status &
4278 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4279 			ioc_loginfo = status_desc->IOCLogInfo;
4280 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4281 		break;
4282 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4283 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4284 		*reply_dma = addr_desc->ReplyFrameAddress;
4285 		def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4286 		if (def_reply == NULL)
4287 			goto out;
4288 		host_tag = def_reply->HostTag;
4289 		ioc_status = def_reply->IOCStatus;
4290 		if (ioc_status &
4291 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4292 			ioc_loginfo = def_reply->IOCLogInfo;
4293 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4294 		if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4295 			scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4296 			sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4297 			    scsi_reply->SenseDataBufferAddress);
4298 		}
4299 		break;
4300 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4301 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4302 		host_tag = success_desc->HostTag;
4303 		break;
4304 	default:
4305 		break;
4306 	}
4307 	switch (host_tag) {
4308 	case MPI3MR_HOSTTAG_INITCMDS:
4309 		cmdptr = &sc->init_cmds;
4310 		break;
4311 	case MPI3MR_HOSTTAG_IOCTLCMDS:
4312 		cmdptr = &sc->ioctl_cmds;
4313 		break;
4314 	case MPI3MR_HOSTTAG_TMS:
4315 		cmdptr = &sc->host_tm_cmds;
4316 		wakeup((void *)&sc->tm_chan);
4317 		break;
4318 	case MPI3MR_HOSTTAG_PELABORT:
4319 		cmdptr = &sc->pel_abort_cmd;
4320 		break;
4321 	case MPI3MR_HOSTTAG_PELWAIT:
4322 		cmdptr = &sc->pel_cmds;
4323 		break;
4324 	case MPI3MR_HOSTTAG_INVALID:
4325 		if (def_reply && def_reply->Function ==
4326 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
4327 			mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4328 	default:
4329 		break;
4330 	}
4331 
4332 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4333 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4334 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4335 		cmdptr = &sc->dev_rmhs_cmds[idx];
4336 	}
4337 
4338 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4339 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4340 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4341 		cmdptr = &sc->evtack_cmds[idx];
4342 	}
4343 
4344 	if (cmdptr) {
4345 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
4346 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
4347 			cmdptr->ioc_loginfo = ioc_loginfo;
4348 			cmdptr->ioc_status = ioc_status;
4349 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
4350 			if (def_reply) {
4351 				cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4352 				memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4353 				    sc->reply_sz);
4354 			}
4355 			if (sense_buf && cmdptr->sensebuf) {
4356 				cmdptr->is_senseprst = 1;
4357 				memcpy(cmdptr->sensebuf, sense_buf,
4358 				    MPI3MR_SENSEBUF_SZ);
4359 			}
4360 			if (cmdptr->is_waiting) {
4361 				complete(&cmdptr->completion);
4362 				cmdptr->is_waiting = 0;
4363 			} else if (cmdptr->callback)
4364 				cmdptr->callback(sc, cmdptr);
4365 		}
4366 	}
4367 out:
4368 	if (sense_buf != NULL)
4369 		mpi3mr_repost_sense_buf(sc,
4370 		    scsi_reply->SenseDataBufferAddress);
4371 	return;
4372 }
4373 
4374 /*
4375  * mpi3mr_complete_admin_cmd:	ISR routine for admin commands
4376  * @sc:				Adapter's soft instance
4377  *
4378  * This function processes admin command completions.
4379  */
4380 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4381 {
4382 	U32 exp_phase = sc->admin_reply_ephase;
4383 	U32 adm_reply_ci = sc->admin_reply_ci;
4384 	U32 num_adm_reply = 0;
4385 	U64 reply_dma = 0;
4386 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4387 	U16 threshold_comps = 0;
4388 
4389 	mtx_lock_spin(&sc->admin_reply_lock);
4390 	if (sc->admin_in_use == false) {
4391 		sc->admin_in_use = true;
4392 		mtx_unlock_spin(&sc->admin_reply_lock);
4393 	} else {
4394 		mtx_unlock_spin(&sc->admin_reply_lock);
4395 		return 0;
4396 	}
4397 
4398 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4399 		adm_reply_ci;
4400 
4401 	if ((reply_desc->ReplyFlags &
4402 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4403 		mtx_lock_spin(&sc->admin_reply_lock);
4404 		sc->admin_in_use = false;
4405 		mtx_unlock_spin(&sc->admin_reply_lock);
4406 		return 0;
4407 	}
4408 
4409 	do {
4410 		sc->admin_req_ci = reply_desc->RequestQueueCI;
4411 		mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4412 		if (reply_dma)
4413 			mpi3mr_repost_reply_buf(sc, reply_dma);
4414 		num_adm_reply++;
4415 		if (++adm_reply_ci == sc->num_admin_replies) {
4416 			adm_reply_ci = 0;
4417 			exp_phase ^= 1;
4418 		}
4419 		reply_desc =
4420 			(Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4421 			    adm_reply_ci;
4422 		if ((reply_desc->ReplyFlags &
4423 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4424 			break;
4425 
4426 		if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
4427 			mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4428 			threshold_comps = 0;
4429 		}
4430 	} while (1);
4431 
4432 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4433 	sc->admin_reply_ci = adm_reply_ci;
4434 	sc->admin_reply_ephase = exp_phase;
4435 	mtx_lock_spin(&sc->admin_reply_lock);
4436 	sc->admin_in_use = false;
4437 	mtx_unlock_spin(&sc->admin_reply_lock);
4438 	return num_adm_reply;
4439 }
4440 
4441 static void
4442 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4443 {
4444 	mpi3mr_unmap_request(sc, cmd);
4445 
4446 	mtx_lock(&sc->mpi3mr_mtx);
4447 	if (cmd->callout_owner) {
4448 		callout_stop(&cmd->callout);
4449 		cmd->callout_owner = false;
4450 	}
4451 
4452 	if (sc->unrecoverable)
4453 		mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4454 
4455 	xpt_done(cmd->ccb);
4456 	cmd->ccb = NULL;
4457 	mtx_unlock(&sc->mpi3mr_mtx);
4458 	mpi3mr_release_command(cmd);
4459 }
4460 
4461 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4462     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4463 {
4464 	U16 reply_desc_type, host_tag = 0;
4465 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4466 	U32 ioc_loginfo = 0;
4467 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4468 	Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4469 	Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4470 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4471 	U8 *sense_buf = NULL;
4472 	U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4473 	U32 xfer_count = 0, sense_count =0, resp_data = 0;
4474 	struct mpi3mr_cmd *cm = NULL;
4475 	union ccb *ccb;
4476 	struct ccb_scsiio *csio;
4477 	struct mpi3mr_cam_softc *cam_sc;
4478 	U32 target_id;
4479 	U8 *scsi_cdb;
4480 	struct mpi3mr_target *target = NULL;
4481 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4482 	struct mpi3mr_throttle_group_info *tg = NULL;
4483 	U8 throttle_enabled_dev = 0;
4484 	static int ratelimit;
4485 
4486 	*reply_dma = 0;
4487 	reply_desc_type = reply_desc->ReplyFlags &
4488 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4489 	switch (reply_desc_type) {
4490 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4491 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4492 		host_tag = status_desc->HostTag;
4493 		ioc_status = status_desc->IOCStatus;
4494 		if (ioc_status &
4495 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4496 			ioc_loginfo = status_desc->IOCLogInfo;
4497 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4498 		break;
4499 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4500 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4501 		*reply_dma = addr_desc->ReplyFrameAddress;
4502 		scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4503 		    *reply_dma);
4504 		if (scsi_reply == NULL) {
4505 			mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4506 			    "this shouldn't happen, reply_desc: %p\n",
4507 			    reply_desc);
4508 			goto out;
4509 		}
4510 
4511 		host_tag = scsi_reply->HostTag;
4512 		ioc_status = scsi_reply->IOCStatus;
4513 		scsi_status = scsi_reply->SCSIStatus;
4514 		scsi_state = scsi_reply->SCSIState;
4515 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4516 		xfer_count = scsi_reply->TransferCount;
4517 		sense_count = scsi_reply->SenseCount;
4518 		resp_data = scsi_reply->ResponseData;
4519 		sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4520 		    scsi_reply->SenseDataBufferAddress);
4521 		if (ioc_status &
4522 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
4523 			ioc_loginfo = scsi_reply->IOCLogInfo;
4524 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
4525 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4526 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4527 
4528 		break;
4529 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4530 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4531 		host_tag = success_desc->HostTag;
4532 
4533 	default:
4534 		break;
4535 	}
4536 
4537 	cm = sc->cmd_list[host_tag];
4538 
4539 	if (cm->state == MPI3MR_CMD_STATE_FREE)
4540 		goto out;
4541 
4542 	cam_sc = sc->cam_sc;
4543 	ccb = cm->ccb;
4544 	csio = &ccb->csio;
4545 	target_id = csio->ccb_h.target_id;
4546 
4547 	scsi_cdb = scsiio_cdb_ptr(csio);
4548 
4549 	target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4550 	if (sc->iot_enable) {
4551 		data_len_blks = csio->dxfer_len >> 9;
4552 
4553 		if (target) {
4554 			tg = target->throttle_group;
4555 			throttle_enabled_dev =
4556 				target->io_throttle_enabled;
4557 		}
4558 
4559 		if ((data_len_blks >= sc->io_throttle_data_length) &&
4560 		     throttle_enabled_dev) {
4561 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4562 			ioc_pend_data_len = mpi3mr_atomic_read(
4563 			    &sc->pend_large_data_sz);
4564 			if (tg) {
4565 				mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4566 					data_len_blks);
4567 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4568 				if (ratelimit % 1000) {
4569 					mpi3mr_dprint(sc, MPI3MR_IOT,
4570 						"large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4571 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4572 						    target->per_id,
4573 						    target->dev_handle,
4574 						    data_len_blks, ioc_pend_data_len,
4575 						    tg_pend_data_len,
4576 						    sc->io_throttle_low,
4577 						    tg->low);
4578 					ratelimit++;
4579 				}
4580 				if (tg->io_divert  && ((ioc_pend_data_len <=
4581 				    sc->io_throttle_low) &&
4582 				    (tg_pend_data_len <= tg->low))) {
4583 					tg->io_divert = 0;
4584 					mpi3mr_dprint(sc, MPI3MR_IOT,
4585 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4586 						target->per_id, tg->id);
4587 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4588 					    sc, tg, 0);
4589 				}
4590 			} else {
4591 				if (ratelimit % 1000) {
4592 					mpi3mr_dprint(sc, MPI3MR_IOT,
4593 					    "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4594 					    target->per_id,
4595 					    target->dev_handle,
4596 					    data_len_blks, ioc_pend_data_len,
4597 					    sc->io_throttle_low);
4598 					ratelimit++;
4599 				}
4600 
4601 				if (ioc_pend_data_len <= sc->io_throttle_low) {
4602 					target->io_divert = 0;
4603 					mpi3mr_dprint(sc, MPI3MR_IOT,
4604 						"PD: Coming out of divert perst_id(%d)\n",
4605 						target->per_id);
4606 				}
4607 			}
4608 
4609 			} else if (target->io_divert) {
4610 			ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4611 			if (!tg) {
4612 				if (ratelimit % 1000) {
4613 					mpi3mr_dprint(sc, MPI3MR_IOT,
4614 					    "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4615 					    target->per_id,
4616 					    target->dev_handle,
4617 					    data_len_blks, ioc_pend_data_len,
4618 					    sc->io_throttle_low);
4619 					ratelimit++;
4620 				}
4621 
4622 				if ( ioc_pend_data_len <= sc->io_throttle_low) {
4623 					mpi3mr_dprint(sc, MPI3MR_IOT,
4624 						"PD: Coming out of divert perst_id(%d)\n",
4625 						target->per_id);
4626 					target->io_divert = 0;
4627 				}
4628 
4629 			} else if (ioc_pend_data_len <= sc->io_throttle_low) {
4630 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4631 				if (ratelimit % 1000) {
4632 					mpi3mr_dprint(sc, MPI3MR_IOT,
4633 						"vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4634 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4635 						    target->per_id,
4636 						    target->dev_handle,
4637 						    data_len_blks, ioc_pend_data_len,
4638 						    tg_pend_data_len,
4639 						    sc->io_throttle_low,
4640 						    tg->low);
4641 					ratelimit++;
4642 				}
4643 				if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
4644 					tg->io_divert = 0;
4645 					mpi3mr_dprint(sc, MPI3MR_IOT,
4646 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4647 						target->per_id, tg->id);
4648 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4649 					    sc, tg, 0);
4650 				}
4651 
4652 			}
4653 		}
4654 	}
4655 
4656 	if (success_desc) {
4657 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4658 		goto out_success;
4659 	}
4660 
4661 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4662 	    && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4663 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4664 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4665 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
4666 
4667 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4668 	    && sense_buf) {
4669 		int sense_len, returned_sense_len;
4670 
4671 		returned_sense_len = min(le32toh(sense_count),
4672 		    sizeof(struct scsi_sense_data));
4673 		if (returned_sense_len < csio->sense_len)
4674 			csio->sense_resid = csio->sense_len -
4675 			    returned_sense_len;
4676 		else
4677 			csio->sense_resid = 0;
4678 
4679 		sense_len = min(returned_sense_len,
4680 		    csio->sense_len - csio->sense_resid);
4681 		bzero(&csio->sense_data, sizeof(csio->sense_data));
4682 		bcopy(sense_buf, &csio->sense_data, sense_len);
4683 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4684 	}
4685 
4686 	switch (ioc_status) {
4687 	case MPI3_IOCSTATUS_BUSY:
4688 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4689 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4690 		break;
4691 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4692 		/*
4693 		 * If devinfo is 0 this will be a volume.  In that case don't
4694 		 * tell CAM that the volume is not there.  We want volumes to
4695 		 * be enumerated until they are deleted/removed, not just
4696 		 * failed.
4697 		 */
4698 		if (cm->targ->devinfo == 0)
4699 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4700 		else
4701 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4702 		break;
4703 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4704 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4705 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4706 		mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4707 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4708 		    "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4709 		    __func__, __LINE__,
4710 		    target_id, cm->hosttag,
4711 		    le32toh(scsi_reply->IOCLogInfo));
4712 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4713 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
4714 		    scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4715 		    le32toh(xfer_count));
4716 		break;
4717 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4718 		/* resid is ignored for this condition */
4719 		csio->resid = 0;
4720 		mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4721 		break;
4722 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4723 		csio->resid = cm->length - le32toh(xfer_count);
4724 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4725 	case MPI3_IOCSTATUS_SUCCESS:
4726 		if ((scsi_reply->IOCStatus & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK) ==
4727 		    MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4728 			mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n",  __func__, __LINE__);
4729 
4730 		/* Completion failed at the transport level. */
4731 		if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4732 		    MPI3_SCSI_STATE_TERMINATED)) {
4733 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4734 			break;
4735 		}
4736 
4737 		/* In a modern packetized environment, an autosense failure
4738 		 * implies that there's not much else that can be done to
4739 		 * recover the command.
4740 		 */
4741 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4742 			mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4743 			break;
4744 		}
4745 
4746 		/*
4747 		 * Intentionally override the normal SCSI status reporting
4748 		 * for these two cases.  These are likely to happen in a
4749 		 * multi-initiator environment, and we want to make sure that
4750 		 * CAM retries these commands rather than fail them.
4751 		 */
4752 		if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4753 		    (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4754 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4755 			break;
4756 		}
4757 
4758 		/* Handle normal status and sense */
4759 		csio->scsi_status = scsi_reply->SCSIStatus;
4760 		if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4761 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4762 		else
4763 			mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4764 
4765 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4766 			int sense_len, returned_sense_len;
4767 
4768 			returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4769 			    sizeof(struct scsi_sense_data));
4770 			if (returned_sense_len < csio->sense_len)
4771 				csio->sense_resid = csio->sense_len -
4772 				    returned_sense_len;
4773 			else
4774 				csio->sense_resid = 0;
4775 
4776 			sense_len = min(returned_sense_len,
4777 			    csio->sense_len - csio->sense_resid);
4778 			bzero(&csio->sense_data, sizeof(csio->sense_data));
4779 			bcopy(cm->sense, &csio->sense_data, sense_len);
4780 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4781 		}
4782 
4783 		break;
4784 	case MPI3_IOCSTATUS_INVALID_SGL:
4785 		mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
4786 		break;
4787 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
4788 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
4789 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
4790 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4791 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
4792 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
4793 	case MPI3_IOCSTATUS_INVALID_FIELD:
4794 	case MPI3_IOCSTATUS_INVALID_STATE:
4795 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
4796 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4797 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
4798 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4799 	default:
4800 		csio->resid = cm->length;
4801 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4802 		break;
4803 	}
4804 
4805 out_success:
4806 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
4807 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
4808 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
4809 	}
4810 
4811 	mpi3mr_atomic_dec(&cm->targ->outstanding);
4812 	mpi3mr_cmd_done(sc, cm);
4813 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
4814 		" cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
4815 		"scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
4816 		ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
4817 	mpi3mr_atomic_dec(&sc->fw_outstanding);
4818 out:
4819 
4820 	if (sense_buf)
4821 		mpi3mr_repost_sense_buf(sc,
4822 		    scsi_reply->SenseDataBufferAddress);
4823 	return;
4824 }
4825 
4826 /*
4827  * mpi3mr_complete_io_cmd:	ISR routine for IO commands
4828  * @sc:				Adapter's soft instance
4829  * @irq_ctx:			Driver's internal per IRQ structure
4830  *
4831  * This function processes IO command completions.
4832  */
4833 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
4834     struct mpi3mr_irq_context *irq_ctx)
4835 {
4836 	struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
4837 	U32 exp_phase = op_reply_q->ephase;
4838 	U32 reply_ci = op_reply_q->ci;
4839 	U32 num_op_replies = 0;
4840 	U64 reply_dma = 0;
4841 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4842 	U16 req_qid = 0, threshold_comps = 0;
4843 
4844 	mtx_lock_spin(&op_reply_q->q_lock);
4845 	if (op_reply_q->in_use == false) {
4846 		op_reply_q->in_use = true;
4847 		mtx_unlock_spin(&op_reply_q->q_lock);
4848 	} else {
4849 		mtx_unlock_spin(&op_reply_q->q_lock);
4850 		return 0;
4851 	}
4852 
4853 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4854 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
4855 		" reply_desc->ReplyFlags: 0x%x\n"
4856 		"reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
4857 		op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
4858 		op_reply_q->q_base, exp_phase);
4859 
4860 	if (((reply_desc->ReplyFlags &
4861 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
4862 		mtx_lock_spin(&op_reply_q->q_lock);
4863 		op_reply_q->in_use = false;
4864 		mtx_unlock_spin(&op_reply_q->q_lock);
4865 		return 0;
4866 	}
4867 
4868 	do {
4869 		req_qid = reply_desc->RequestQueueID;
4870 		sc->op_req_q[req_qid - 1].ci =
4871 		    reply_desc->RequestQueueCI;
4872 
4873 		mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
4874 		mpi3mr_atomic_dec(&op_reply_q->pend_ios);
4875 		if (reply_dma)
4876 			mpi3mr_repost_reply_buf(sc, reply_dma);
4877 		num_op_replies++;
4878 		if (++reply_ci == op_reply_q->num_replies) {
4879 			reply_ci = 0;
4880 			exp_phase ^= 1;
4881 		}
4882 		reply_desc =
4883 		    (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
4884 		if ((reply_desc->ReplyFlags &
4885 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4886 			break;
4887 
4888 		if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
4889 			mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4890 			threshold_comps = 0;
4891 		}
4892 
4893 	} while (1);
4894 
4895 
4896 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
4897 	op_reply_q->ci = reply_ci;
4898 	op_reply_q->ephase = exp_phase;
4899 	mtx_lock_spin(&op_reply_q->q_lock);
4900 	op_reply_q->in_use = false;
4901 	mtx_unlock_spin(&op_reply_q->q_lock);
4902 	return num_op_replies;
4903 }
4904 
4905 /*
4906  * mpi3mr_isr:			Primary ISR function
4907  * privdata:			Driver's internal per IRQ structure
4908  *
4909  * This is driver's primary ISR function which is being called whenever any admin/IO
4910  * command completion.
4911  */
4912 void mpi3mr_isr(void *privdata)
4913 {
4914 	struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
4915 	struct mpi3mr_softc *sc = irq_ctx->sc;
4916 	U16 msi_idx;
4917 
4918 	if (!irq_ctx)
4919 		return;
4920 
4921 	msi_idx = irq_ctx->msix_index;
4922 
4923 	if (!sc->intr_enabled)
4924 		return;
4925 
4926 	if (!msi_idx)
4927 		mpi3mr_complete_admin_cmd(sc);
4928 
4929 	if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
4930 		mpi3mr_complete_io_cmd(sc, irq_ctx);
4931 	}
4932 }
4933 
4934 /*
4935  * mpi3mr_alloc_requests - Allocates host commands
4936  * @sc: Adapter reference
4937  *
4938  * This function allocates controller supported host commands
4939  *
4940  * Return: 0 on success and proper error codes on failure
4941  */
4942 int
4943 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
4944 {
4945 	struct mpi3mr_cmd *cmd;
4946 	int i, j, nsegs, ret;
4947 
4948 	nsegs = MPI3MR_SG_DEPTH;
4949 	ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat,    /* parent */
4950 				1, 0,			/* algnmnt, boundary */
4951 				sc->dma_loaddr,		/* lowaddr */
4952 				BUS_SPACE_MAXADDR,	/* highaddr */
4953 				NULL, NULL,		/* filter, filterarg */
4954 				BUS_SPACE_MAXSIZE,	/* maxsize */
4955                                 nsegs,			/* nsegments */
4956 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4957                                 BUS_DMA_ALLOCNOW,	/* flags */
4958                                 busdma_lock_mutex,	/* lockfunc */
4959 				&sc->io_lock,	/* lockarg */
4960 				&sc->buffer_dmat);
4961 	if (ret) {
4962 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
4963 		return (ENOMEM);
4964         }
4965 
4966 	/*
4967 	 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
4968 	 * Allocate the dynamic array first and then allocate individual
4969 	 * commands.
4970 	 */
4971 	sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
4972 	    M_MPI3MR, M_NOWAIT | M_ZERO);
4973 
4974 	if (!sc->cmd_list) {
4975 		device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
4976 		return (ENOMEM);
4977 	}
4978 
4979 	for (i = 0; i < sc->max_host_ios; i++) {
4980 		sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
4981 		    M_MPI3MR, M_NOWAIT | M_ZERO);
4982 		if (!sc->cmd_list[i]) {
4983 			for (j = 0; j < i; j++)
4984 				free(sc->cmd_list[j], M_MPI3MR);
4985 			free(sc->cmd_list, M_MPI3MR);
4986 			sc->cmd_list = NULL;
4987 			return (ENOMEM);
4988 		}
4989 	}
4990 
4991 	for (i = 1; i < sc->max_host_ios; i++) {
4992 		cmd = sc->cmd_list[i];
4993 		cmd->hosttag = i;
4994 		cmd->sc = sc;
4995 		cmd->state = MPI3MR_CMD_STATE_BUSY;
4996 		callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
4997 		cmd->ccb = NULL;
4998 		TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
4999 		if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
5000 			return ENOMEM;
5001 	}
5002 	return (0);
5003 }
5004 
5005 /*
5006  * mpi3mr_get_command:		Get a coomand structure from free command pool
5007  * @sc:				Adapter soft instance
5008  * Return:			MPT command reference
5009  *
5010  * This function returns an MPT command to the caller.
5011  */
5012 struct mpi3mr_cmd *
5013 mpi3mr_get_command(struct mpi3mr_softc *sc)
5014 {
5015 	struct mpi3mr_cmd *cmd = NULL;
5016 
5017 	mtx_lock(&sc->cmd_pool_lock);
5018 	if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
5019 		cmd = TAILQ_FIRST(&sc->cmd_list_head);
5020 		TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
5021 	} else {
5022 		goto out;
5023 	}
5024 
5025 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
5026 
5027 	memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
5028 	cmd->data_dir = 0;
5029 	cmd->ccb = NULL;
5030 	cmd->targ = NULL;
5031 	cmd->state = MPI3MR_CMD_STATE_BUSY;
5032 	cmd->data = NULL;
5033 	cmd->length = 0;
5034 out:
5035 	mtx_unlock(&sc->cmd_pool_lock);
5036 	return cmd;
5037 }
5038 
5039 /*
5040  * mpi3mr_release_command:	Return a cmd to free command pool
5041  * input:			Command packet for return to free command pool
5042  *
5043  * This function returns an MPT command to the free command list.
5044  */
5045 void
5046 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5047 {
5048 	struct mpi3mr_softc *sc = cmd->sc;
5049 
5050 	mtx_lock(&sc->cmd_pool_lock);
5051 	TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5052 	cmd->state = MPI3MR_CMD_STATE_FREE;
5053 	cmd->req_qidx = 0;
5054 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5055 	mtx_unlock(&sc->cmd_pool_lock);
5056 
5057 	return;
5058 }
5059 
5060  /**
5061  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5062  * @sc: Adapter instance reference
5063  *
5064  * Free the DMA memory allocated for IOCTL handling purpose.
5065  *
5066  * Return: None
5067  */
5068 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5069 {
5070 	U16 i;
5071 	struct dma_memory_desc *mem_desc;
5072 
5073 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5074 		mem_desc = &sc->ioctl_sge[i];
5075 		if (mem_desc->addr && mem_desc->dma_addr) {
5076 			bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5077 			bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5078 			mem_desc->addr = NULL;
5079 			if (mem_desc->tag != NULL)
5080 				bus_dma_tag_destroy(mem_desc->tag);
5081 		}
5082 	}
5083 
5084 	mem_desc = &sc->ioctl_chain_sge;
5085 	if (mem_desc->addr && mem_desc->dma_addr) {
5086 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5087 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5088 		mem_desc->addr = NULL;
5089 		if (mem_desc->tag != NULL)
5090 			bus_dma_tag_destroy(mem_desc->tag);
5091 	}
5092 
5093 	mem_desc = &sc->ioctl_resp_sge;
5094 	if (mem_desc->addr && mem_desc->dma_addr) {
5095 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5096 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5097 		mem_desc->addr = NULL;
5098 		if (mem_desc->tag != NULL)
5099 			bus_dma_tag_destroy(mem_desc->tag);
5100 	}
5101 
5102 	sc->ioctl_sges_allocated = false;
5103 }
5104 
5105 /**
5106  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5107  * @sc: Adapter instance reference
5108  *
5109  * This function allocates dmaable memory required to handle the
5110  * application issued MPI3 IOCTL requests.
5111  *
5112  * Return: None
5113  */
5114 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5115 {
5116 	struct dma_memory_desc *mem_desc;
5117 	U16 i;
5118 
5119 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5120 		mem_desc = &sc->ioctl_sge[i];
5121 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5122 
5123 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5124 					4, 0,			/* algnmnt, boundary */
5125 					sc->dma_loaddr,		/* lowaddr */
5126 					BUS_SPACE_MAXADDR,	/* highaddr */
5127 					NULL, NULL,		/* filter, filterarg */
5128 					mem_desc->size,		/* maxsize */
5129 					1,			/* nsegments */
5130 					mem_desc->size,		/* maxsegsize */
5131 					0,			/* flags */
5132 					NULL, NULL,		/* lockfunc, lockarg */
5133 					&mem_desc->tag)) {
5134 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5135 			goto out_failed;
5136 		}
5137 
5138 		if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5139 		    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5140 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5141 			goto out_failed;
5142 		}
5143 		bzero(mem_desc->addr, mem_desc->size);
5144 		bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5145 		    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5146 
5147 		if (!mem_desc->addr)
5148 			goto out_failed;
5149 	}
5150 
5151 	mem_desc = &sc->ioctl_chain_sge;
5152 	mem_desc->size = MPI3MR_4K_PGSZ;
5153 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5154 				4, 0,			/* algnmnt, boundary */
5155 				sc->dma_loaddr,		/* lowaddr */
5156 				BUS_SPACE_MAXADDR,	/* highaddr */
5157 				NULL, NULL,		/* filter, filterarg */
5158 				mem_desc->size,		/* maxsize */
5159 				1,			/* nsegments */
5160 				mem_desc->size,		/* maxsegsize */
5161 				0,			/* flags */
5162 				NULL, NULL,		/* lockfunc, lockarg */
5163 				&mem_desc->tag)) {
5164 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5165 		goto out_failed;
5166 	}
5167 
5168 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5169 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5170 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5171 		goto out_failed;
5172 	}
5173 	bzero(mem_desc->addr, mem_desc->size);
5174 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5175 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5176 
5177 	if (!mem_desc->addr)
5178 		goto out_failed;
5179 
5180 	mem_desc = &sc->ioctl_resp_sge;
5181 	mem_desc->size = MPI3MR_4K_PGSZ;
5182 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5183 				4, 0,			/* algnmnt, boundary */
5184 				sc->dma_loaddr,		/* lowaddr */
5185 				BUS_SPACE_MAXADDR,	/* highaddr */
5186 				NULL, NULL,		/* filter, filterarg */
5187 				mem_desc->size,		/* maxsize */
5188 				1,			/* nsegments */
5189 				mem_desc->size,		/* maxsegsize */
5190 				0,			/* flags */
5191 				NULL, NULL,		/* lockfunc, lockarg */
5192 				&mem_desc->tag)) {
5193 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5194 		goto out_failed;
5195 	}
5196 
5197 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5198 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5199 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5200 		goto out_failed;
5201 	}
5202 	bzero(mem_desc->addr, mem_desc->size);
5203 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5204 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5205 
5206 	if (!mem_desc->addr)
5207 		goto out_failed;
5208 
5209 	sc->ioctl_sges_allocated = true;
5210 
5211 	return;
5212 out_failed:
5213 	printf("cannot allocate DMA memory for the mpt commands"
5214 	    "  from the applications, application interface for MPT command is disabled\n");
5215 	mpi3mr_free_ioctl_dma_memory(sc);
5216 }
5217 
5218 void
5219 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5220 {
5221 	int i;
5222 	struct mpi3mr_op_req_queue *op_req_q;
5223 	struct mpi3mr_op_reply_queue *op_reply_q;
5224 
5225 	if (sc->admin_reply) {
5226 		if (mtx_initialized(&sc->admin_reply_lock))
5227 			mtx_destroy(&sc->admin_reply_lock);
5228 	}
5229 
5230 	if (sc->op_reply_q) {
5231 		for(i = 0; i < sc->num_queues; i++) {
5232 			op_reply_q = sc->op_reply_q + i;
5233 			if (mtx_initialized(&op_reply_q->q_lock))
5234 				mtx_destroy(&op_reply_q->q_lock);
5235 		}
5236 	}
5237 
5238 	if (sc->op_req_q) {
5239 		for(i = 0; i < sc->num_queues; i++) {
5240 			op_req_q = sc->op_req_q + i;
5241 			if (mtx_initialized(&op_req_q->q_lock))
5242 				mtx_destroy(&op_req_q->q_lock);
5243 		}
5244 	}
5245 
5246 	if (mtx_initialized(&sc->init_cmds.completion.lock))
5247 		mtx_destroy(&sc->init_cmds.completion.lock);
5248 
5249 	if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5250 		mtx_destroy(&sc->ioctl_cmds.completion.lock);
5251 
5252 	if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5253 		mtx_destroy(&sc->host_tm_cmds.completion.lock);
5254 
5255 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5256 		if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5257 			mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5258 	}
5259 
5260 	if (mtx_initialized(&sc->reset_mutex))
5261 		mtx_destroy(&sc->reset_mutex);
5262 
5263 	if (mtx_initialized(&sc->target_lock))
5264 		mtx_destroy(&sc->target_lock);
5265 
5266 	if (mtx_initialized(&sc->fwevt_lock))
5267 		mtx_destroy(&sc->fwevt_lock);
5268 
5269 	if (mtx_initialized(&sc->cmd_pool_lock))
5270 		mtx_destroy(&sc->cmd_pool_lock);
5271 
5272 	if (mtx_initialized(&sc->reply_free_q_lock))
5273 		mtx_destroy(&sc->reply_free_q_lock);
5274 
5275 	if (mtx_initialized(&sc->sense_buf_q_lock))
5276 		mtx_destroy(&sc->sense_buf_q_lock);
5277 
5278 	if (mtx_initialized(&sc->chain_buf_lock))
5279 		mtx_destroy(&sc->chain_buf_lock);
5280 
5281 	if (mtx_initialized(&sc->admin_req_lock))
5282 		mtx_destroy(&sc->admin_req_lock);
5283 
5284 	if (mtx_initialized(&sc->mpi3mr_mtx))
5285 		mtx_destroy(&sc->mpi3mr_mtx);
5286 }
5287 
5288 /**
5289  * mpi3mr_free_mem - Freeup adapter level data structures
5290  * @sc: Adapter reference
5291  *
5292  * Return: Nothing.
5293  */
5294 void
5295 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5296 {
5297 	int i;
5298 	struct mpi3mr_op_req_queue *op_req_q;
5299 	struct mpi3mr_op_reply_queue *op_reply_q;
5300 	struct mpi3mr_irq_context *irq_ctx;
5301 
5302 	if (sc->cmd_list) {
5303 		for (i = 0; i < sc->max_host_ios; i++) {
5304 			free(sc->cmd_list[i], M_MPI3MR);
5305 		}
5306 		free(sc->cmd_list, M_MPI3MR);
5307 		sc->cmd_list = NULL;
5308 	}
5309 
5310 	if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5311 		bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5312 		bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5313 		sc->pel_seq_number = NULL;
5314 		if (sc->pel_seq_num_dmatag != NULL)
5315 			bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5316 	}
5317 
5318 	if (sc->throttle_groups) {
5319 		free(sc->throttle_groups, M_MPI3MR);
5320 		sc->throttle_groups = NULL;
5321 	}
5322 
5323 	/* Free up operational queues*/
5324 	if (sc->op_req_q) {
5325 		for (i = 0; i < sc->num_queues; i++) {
5326 			op_req_q = sc->op_req_q + i;
5327 			if (op_req_q->q_base && op_req_q->q_base_phys) {
5328 				bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5329 				bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5330 				op_req_q->q_base = NULL;
5331 				if (op_req_q->q_base_tag != NULL)
5332 					bus_dma_tag_destroy(op_req_q->q_base_tag);
5333 			}
5334 		}
5335 		free(sc->op_req_q, M_MPI3MR);
5336 		sc->op_req_q = NULL;
5337 	}
5338 
5339 	if (sc->op_reply_q) {
5340 		for (i = 0; i < sc->num_queues; i++) {
5341 			op_reply_q = sc->op_reply_q + i;
5342 			if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5343 				bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5344 				bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5345 				op_reply_q->q_base = NULL;
5346 				if (op_reply_q->q_base_tag != NULL)
5347 					bus_dma_tag_destroy(op_reply_q->q_base_tag);
5348 			}
5349 		}
5350 		free(sc->op_reply_q, M_MPI3MR);
5351 		sc->op_reply_q = NULL;
5352 	}
5353 
5354 	/* Free up chain buffers*/
5355 	if (sc->chain_sgl_list) {
5356 		for (i = 0; i < sc->chain_buf_count; i++) {
5357 			if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5358 				bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5359 				bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5360 						sc->chain_sgl_list[i].buf_dmamap);
5361 				sc->chain_sgl_list[i].buf = NULL;
5362 			}
5363 		}
5364 		if (sc->chain_sgl_list_tag != NULL)
5365 			bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5366 		free(sc->chain_sgl_list, M_MPI3MR);
5367 		sc->chain_sgl_list = NULL;
5368 	}
5369 
5370 	if (sc->chain_bitmap) {
5371 		free(sc->chain_bitmap, M_MPI3MR);
5372 		sc->chain_bitmap = NULL;
5373 	}
5374 
5375 	for (i = 0; i < sc->msix_count; i++) {
5376 		irq_ctx = sc->irq_ctx + i;
5377 		if (irq_ctx)
5378 			irq_ctx->op_reply_q = NULL;
5379 	}
5380 
5381 	/* Free reply_buf_tag */
5382 	if (sc->reply_buf && sc->reply_buf_phys) {
5383 		bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5384 		bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5385 				sc->reply_buf_dmamap);
5386 		sc->reply_buf = NULL;
5387 		if (sc->reply_buf_tag != NULL)
5388 			bus_dma_tag_destroy(sc->reply_buf_tag);
5389 	}
5390 
5391 	/* Free reply_free_q_tag */
5392 	if (sc->reply_free_q && sc->reply_free_q_phys) {
5393 		bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5394 		bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5395 				sc->reply_free_q_dmamap);
5396 		sc->reply_free_q = NULL;
5397 		if (sc->reply_free_q_tag != NULL)
5398 			bus_dma_tag_destroy(sc->reply_free_q_tag);
5399 	}
5400 
5401 	/* Free sense_buf_tag */
5402 	if (sc->sense_buf && sc->sense_buf_phys) {
5403 		bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5404 		bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5405 				sc->sense_buf_dmamap);
5406 		sc->sense_buf = NULL;
5407 		if (sc->sense_buf_tag != NULL)
5408 			bus_dma_tag_destroy(sc->sense_buf_tag);
5409 	}
5410 
5411 	/* Free sense_buf_q_tag */
5412 	if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5413 		bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5414 		bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5415 				sc->sense_buf_q_dmamap);
5416 		sc->sense_buf_q = NULL;
5417 		if (sc->sense_buf_q_tag != NULL)
5418 			bus_dma_tag_destroy(sc->sense_buf_q_tag);
5419 	}
5420 
5421 	/* Free up internal(non-IO) commands*/
5422 	if (sc->init_cmds.reply) {
5423 		free(sc->init_cmds.reply, M_MPI3MR);
5424 		sc->init_cmds.reply = NULL;
5425 	}
5426 
5427 	if (sc->ioctl_cmds.reply) {
5428 		free(sc->ioctl_cmds.reply, M_MPI3MR);
5429 		sc->ioctl_cmds.reply = NULL;
5430 	}
5431 
5432 	if (sc->pel_cmds.reply) {
5433 		free(sc->pel_cmds.reply, M_MPI3MR);
5434 		sc->pel_cmds.reply = NULL;
5435 	}
5436 
5437 	if (sc->pel_abort_cmd.reply) {
5438 		free(sc->pel_abort_cmd.reply, M_MPI3MR);
5439 		sc->pel_abort_cmd.reply = NULL;
5440 	}
5441 
5442 	if (sc->host_tm_cmds.reply) {
5443 		free(sc->host_tm_cmds.reply, M_MPI3MR);
5444 		sc->host_tm_cmds.reply = NULL;
5445 	}
5446 
5447 	if (sc->log_data_buffer) {
5448 		free(sc->log_data_buffer, M_MPI3MR);
5449 		sc->log_data_buffer = NULL;
5450 	}
5451 
5452 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5453 		if (sc->dev_rmhs_cmds[i].reply) {
5454 			free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5455 			sc->dev_rmhs_cmds[i].reply = NULL;
5456 		}
5457 	}
5458 
5459 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5460 		if (sc->evtack_cmds[i].reply) {
5461 			free(sc->evtack_cmds[i].reply, M_MPI3MR);
5462 			sc->evtack_cmds[i].reply = NULL;
5463 		}
5464 	}
5465 
5466 	if (sc->removepend_bitmap) {
5467 		free(sc->removepend_bitmap, M_MPI3MR);
5468 		sc->removepend_bitmap = NULL;
5469 	}
5470 
5471 	if (sc->devrem_bitmap) {
5472 		free(sc->devrem_bitmap, M_MPI3MR);
5473 		sc->devrem_bitmap = NULL;
5474 	}
5475 
5476 	if (sc->evtack_cmds_bitmap) {
5477 		free(sc->evtack_cmds_bitmap, M_MPI3MR);
5478 		sc->evtack_cmds_bitmap = NULL;
5479 	}
5480 
5481 	/* Free Admin reply*/
5482 	if (sc->admin_reply && sc->admin_reply_phys) {
5483 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5484 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5485 				sc->admin_reply_dmamap);
5486 		sc->admin_reply = NULL;
5487 		if (sc->admin_reply_tag != NULL)
5488 			bus_dma_tag_destroy(sc->admin_reply_tag);
5489 	}
5490 
5491 	/* Free Admin request*/
5492 	if (sc->admin_req && sc->admin_req_phys) {
5493 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5494 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5495 				sc->admin_req_dmamap);
5496 		sc->admin_req = NULL;
5497 		if (sc->admin_req_tag != NULL)
5498 			bus_dma_tag_destroy(sc->admin_req_tag);
5499 	}
5500 	mpi3mr_free_ioctl_dma_memory(sc);
5501 
5502 }
5503 
5504 /**
5505  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5506  * @sc: Adapter instance reference
5507  * @cmdptr: Internal command tracker
5508  *
5509  * Complete an internal driver commands with state indicating it
5510  * is completed due to reset.
5511  *
5512  * Return: Nothing.
5513  */
5514 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5515 	struct mpi3mr_drvr_cmd *cmdptr)
5516 {
5517 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5518 		cmdptr->state |= MPI3MR_CMD_RESET;
5519 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5520 		if (cmdptr->is_waiting) {
5521 			complete(&cmdptr->completion);
5522 			cmdptr->is_waiting = 0;
5523 		} else if (cmdptr->callback)
5524 			cmdptr->callback(sc, cmdptr);
5525 	}
5526 }
5527 
5528 /**
5529  * mpi3mr_flush_drv_cmds - Flush internal driver commands
5530  * @sc: Adapter instance reference
5531  *
5532  * Flush all internal driver commands post reset
5533  *
5534  * Return: Nothing.
5535  */
5536 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5537 {
5538 	int i = 0;
5539 	struct mpi3mr_drvr_cmd *cmdptr;
5540 
5541 	cmdptr = &sc->init_cmds;
5542 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5543 
5544 	cmdptr = &sc->ioctl_cmds;
5545 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5546 
5547 	cmdptr = &sc->host_tm_cmds;
5548 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5549 
5550 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5551 		cmdptr = &sc->dev_rmhs_cmds[i];
5552 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5553 	}
5554 
5555 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5556 		cmdptr = &sc->evtack_cmds[i];
5557 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5558 	}
5559 
5560 	cmdptr = &sc->pel_cmds;
5561 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5562 
5563 	cmdptr = &sc->pel_abort_cmd;
5564 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5565 }
5566 
5567 
5568 /**
5569  * mpi3mr_memset_buffers - memset memory for a controller
5570  * @sc: Adapter instance reference
5571  *
5572  * clear all the memory allocated for a controller, typically
5573  * called post reset to reuse the memory allocated during the
5574  * controller init.
5575  *
5576  * Return: Nothing.
5577  */
5578 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5579 {
5580 	U16 i;
5581 	struct mpi3mr_throttle_group_info *tg;
5582 
5583 	memset(sc->admin_req, 0, sc->admin_req_q_sz);
5584 	memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5585 
5586 	memset(sc->init_cmds.reply, 0, sc->reply_sz);
5587 	memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
5588 	memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
5589 	memset(sc->pel_cmds.reply, 0, sc->reply_sz);
5590 	memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
5591 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5592 		memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
5593 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5594 		memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
5595 	memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
5596 	memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
5597 	memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
5598 
5599 	for (i = 0; i < sc->num_queues; i++) {
5600 		sc->op_reply_q[i].qid = 0;
5601 		sc->op_reply_q[i].ci = 0;
5602 		sc->op_reply_q[i].num_replies = 0;
5603 		sc->op_reply_q[i].ephase = 0;
5604 		mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
5605 		memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
5606 
5607 		sc->op_req_q[i].ci = 0;
5608 		sc->op_req_q[i].pi = 0;
5609 		sc->op_req_q[i].num_reqs = 0;
5610 		sc->op_req_q[i].qid = 0;
5611 		sc->op_req_q[i].reply_qid = 0;
5612 		memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
5613 	}
5614 
5615 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
5616 	if (sc->throttle_groups) {
5617 		tg = sc->throttle_groups;
5618 		for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
5619 			tg->id = 0;
5620 			tg->fw_qd = 0;
5621 			tg->modified_qd = 0;
5622 			tg->io_divert= 0;
5623 			tg->high = 0;
5624 			tg->low = 0;
5625 			mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
5626 		}
5627  	}
5628 }
5629 
5630 /**
5631  * mpi3mr_invalidate_devhandles -Invalidate device handles
5632  * @sc: Adapter instance reference
5633  *
5634  * Invalidate the device handles in the target device structures
5635  * . Called post reset prior to reinitializing the controller.
5636  *
5637  * Return: Nothing.
5638  */
5639 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
5640 {
5641 	struct mpi3mr_target *target = NULL;
5642 
5643 	mtx_lock_spin(&sc->target_lock);
5644 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5645 		if (target) {
5646 			target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5647 			target->io_throttle_enabled = 0;
5648 			target->io_divert = 0;
5649 			target->throttle_group = NULL;
5650 		}
5651 	}
5652 	mtx_unlock_spin(&sc->target_lock);
5653 }
5654 
5655 /**
5656  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
5657  * @sc: Adapter instance reference
5658  *
5659  * This is executed post controller reset to identify any
5660  * missing devices during reset and remove from the upper layers
5661  * or expose any newly detected device to the upper layers.
5662  *
5663  * Return: Nothing.
5664  */
5665 
5666 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
5667 {
5668 	struct mpi3mr_target *target = NULL;
5669 	struct mpi3mr_target *target_temp = NULL;
5670 
5671 	TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
5672 		if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5673 			if (target->exposed_to_os)
5674 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5675 			mpi3mr_remove_device_from_list(sc, target, true);
5676 		} else if (target->is_hidden && target->exposed_to_os) {
5677 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
5678 		}
5679 	}
5680 
5681 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
5682 		if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
5683 		    !target->is_hidden && !target->exposed_to_os) {
5684 			mpi3mr_add_device(sc, target->per_id);
5685 		}
5686 	}
5687 
5688 }
5689 
5690 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
5691 {
5692 	int i;
5693 	struct mpi3mr_cmd *cmd = NULL;
5694 	union ccb *ccb = NULL;
5695 
5696 	for (i = 0; i < sc->max_host_ios; i++) {
5697 		cmd = sc->cmd_list[i];
5698 
5699 		if (cmd && cmd->ccb) {
5700 			if (cmd->callout_owner) {
5701 				ccb = (union ccb *)(cmd->ccb);
5702 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
5703 				mpi3mr_atomic_dec(&sc->fw_outstanding);
5704 				mpi3mr_atomic_dec(&cmd->targ->outstanding);
5705 				mpi3mr_cmd_done(sc, cmd);
5706 			} else {
5707 				cmd->ccb = NULL;
5708 				mpi3mr_release_command(cmd);
5709 			}
5710 		}
5711 	}
5712 }
5713 /**
5714  * mpi3mr_clear_reset_history - Clear reset history
5715  * @sc: Adapter instance reference
5716  *
5717  * Write the reset history bit in IOC Status to clear the bit,
5718  * if it is already set.
5719  *
5720  * Return: Nothing.
5721  */
5722 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
5723 {
5724 	U32 ioc_status;
5725 
5726 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5727 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
5728 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
5729 }
5730 
5731 /**
5732  * mpi3mr_set_diagsave - Set diag save bit for snapdump
5733  * @sc: Adapter reference
5734  *
5735  * Set diag save bit in IOC configuration register to enable
5736  * snapdump.
5737  *
5738  * Return: Nothing.
5739  */
5740 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
5741 {
5742 	U32 ioc_config;
5743 
5744 	ioc_config =
5745 	    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5746 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
5747 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
5748 }
5749 
5750 /**
5751  * mpi3mr_issue_reset - Issue reset to the controller
5752  * @sc: Adapter reference
5753  * @reset_type: Reset type
5754  * @reset_reason: Reset reason code
5755  *
5756  * Unlock the host diagnostic registers and write the specific
5757  * reset type to that, wait for reset acknowledgement from the
5758  * controller, if the reset is not successful retry for the
5759  * predefined number of times.
5760  *
5761  * Return: 0 on success, non-zero on failure.
5762  */
5763 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
5764 	U32 reset_reason)
5765 {
5766 	int retval = -1;
5767 	U8 unlock_retry_count = 0;
5768 	U32 host_diagnostic, ioc_status, ioc_config;
5769 	U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
5770 
5771 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
5772 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
5773 		return retval;
5774 	if (sc->unrecoverable)
5775 		return retval;
5776 
5777 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
5778 		retval = 0;
5779 		return retval;
5780 	}
5781 
5782 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
5783 	    mpi3mr_reset_type_name(reset_type),
5784 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
5785 
5786 	mpi3mr_clear_reset_history(sc);
5787 	do {
5788 		mpi3mr_dprint(sc, MPI3MR_INFO,
5789 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
5790 		    ++unlock_retry_count);
5791 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
5792 			mpi3mr_dprint(sc, MPI3MR_ERROR,
5793 			    "%s reset failed! due to host diag register unlock failure"
5794 			    "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
5795 			    host_diagnostic);
5796 			sc->unrecoverable = 1;
5797 			return retval;
5798 		}
5799 
5800 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5801 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
5802 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5803 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
5804 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5805 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5806 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5807 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
5808 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5809 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
5810 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5811 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
5812 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5813 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
5814 
5815 		DELAY(1000); /* delay in usec */
5816 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
5817 		mpi3mr_dprint(sc, MPI3MR_INFO,
5818 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
5819 		    unlock_retry_count, host_diagnostic);
5820 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
5821 
5822 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
5823 	mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
5824 
5825 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
5826 		do {
5827 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5828 			if (ioc_status &
5829 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
5830 				ioc_config =
5831 				    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5832 				if (mpi3mr_soft_reset_success(ioc_status,
5833 				    ioc_config)) {
5834 					mpi3mr_clear_reset_history(sc);
5835 					retval = 0;
5836 					break;
5837 				}
5838 			}
5839 			DELAY(100 * 1000);
5840 		} while (--timeout);
5841 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
5842 		do {
5843 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5844 			if (mpi3mr_diagfault_success(sc, ioc_status)) {
5845 				retval = 0;
5846 				break;
5847 			}
5848 			DELAY(100 * 1000);
5849 		} while (--timeout);
5850 	}
5851 
5852 	mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
5853 		MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
5854 
5855 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
5856 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
5857 
5858 	mpi3mr_dprint(sc, MPI3MR_INFO,
5859 	    "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
5860 	    !retval ? "successful":"failed", ioc_status,
5861 	    ioc_config);
5862 
5863 	if (retval)
5864 		sc->unrecoverable = 1;
5865 
5866 	return retval;
5867 }
5868 
5869 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
5870 {
5871 	/*
5872 	 * Block the taskqueue before draining.  This means any new tasks won't
5873 	 * be queued to the taskqueue worker thread.  But it doesn't stop the
5874 	 * current workers that are running.  taskqueue_drain waits for those
5875 	 * correctly in the case of thread backed taskqueues.  The while loop
5876 	 * ensures that all taskqueue threads have finished their current tasks.
5877 	 */
5878 	taskqueue_block(sc->cam_sc->ev_tq);
5879 	while (taskqueue_cancel(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task, NULL) != 0) {
5880 		taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
5881 	}
5882 }
5883 
5884 /**
5885  * mpi3mr_soft_reset_handler - Reset the controller
5886  * @sc: Adapter instance reference
5887  * @reset_reason: Reset reason code
5888  * @snapdump: snapdump enable/disbale bit
5889  *
5890  * This is an handler for recovering controller by issuing soft
5891  * reset or diag fault reset. This is a blocking function and
5892  * when one reset is executed if any other resets they will be
5893  * blocked. All IOCTLs/IO will be blocked during the reset. If
5894  * controller reset is successful then the controller will be
5895  * reinitalized, otherwise the controller will be marked as not
5896  * recoverable
5897  *
5898  * Return: 0 on success, non-zero on failure.
5899  */
5900 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
5901 	U32 reset_reason, bool snapdump)
5902 {
5903 	int retval = 0, i = 0;
5904 	enum mpi3mr_iocstate ioc_state;
5905 
5906 	mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
5907 	    mpi3mr_reset_rc_name(reset_reason));
5908 
5909 	if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
5910 	     (sc->reset.ioctl_reset_snapdump != true))
5911 		snapdump = false;
5912 
5913 	mpi3mr_dprint(sc, MPI3MR_INFO,
5914 	    "soft_reset_handler: wait if diag save is in progress\n");
5915 	while (sc->diagsave_timeout)
5916 		DELAY(1000 * 1000);
5917 
5918 	ioc_state = mpi3mr_get_iocstate(sc);
5919 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
5920 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
5921 		sc->reset.type = MPI3MR_NO_RESET;
5922 		sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
5923 		sc->reset.status = -1;
5924 		sc->reset.ioctl_reset_snapdump = false;
5925 		return -1;
5926 	}
5927 
5928 	if (sc->reset_in_progress) {
5929 		mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
5930 		return -1;
5931 	}
5932 
5933 	/* Pause IOs, drain and block the event taskqueue */
5934 	xpt_freeze_simq(sc->cam_sc->sim, 1);
5935 
5936 	mpi3mr_cleanup_event_taskq(sc);
5937 
5938 	sc->reset_in_progress = 1;
5939 	sc->block_ioctls = 1;
5940 
5941 	while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
5942 		ioc_state = mpi3mr_get_iocstate(sc);
5943 		if (ioc_state == MRIOC_STATE_FAULT)
5944 			break;
5945 		i++;
5946 		if (!(i % 5)) {
5947 			mpi3mr_dprint(sc, MPI3MR_INFO,
5948 			    "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
5949 		}
5950 		DELAY(1000 * 1000);
5951 	}
5952 
5953 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5954 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5955 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5956 
5957 		mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
5958 
5959 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5960 			sc->event_masks[i] = -1;
5961 		mpi3mr_issue_event_notification(sc);
5962 	}
5963 
5964 	mpi3mr_disable_interrupts(sc);
5965 
5966 	if (snapdump)
5967 		mpi3mr_trigger_snapdump(sc, reset_reason);
5968 
5969 	retval = mpi3mr_issue_reset(sc,
5970 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5971 	if (retval) {
5972 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
5973 		goto out;
5974 	}
5975 
5976 	mpi3mr_flush_drv_cmds(sc);
5977 	mpi3mr_flush_io(sc);
5978 	mpi3mr_invalidate_devhandles(sc);
5979 	mpi3mr_memset_buffers(sc);
5980 
5981 	if (sc->prepare_for_reset) {
5982 		sc->prepare_for_reset = 0;
5983 		sc->prepare_for_reset_timeout_counter = 0;
5984 	}
5985 
5986 	retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
5987 	if (retval) {
5988 		mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
5989 		    reset_reason);
5990 		goto out;
5991 	}
5992 
5993 	DELAY((1000 * 1000) * 10);
5994 out:
5995 	if (!retval) {
5996 		sc->diagsave_timeout = 0;
5997 		sc->reset_in_progress = 0;
5998 		mpi3mr_rfresh_tgtdevs(sc);
5999 		sc->ts_update_counter = 0;
6000 		sc->block_ioctls = 0;
6001 		sc->pel_abort_requested = 0;
6002 		if (sc->pel_wait_pend) {
6003 			sc->pel_cmds.retry_count = 0;
6004 			mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
6005 			mpi3mr_app_send_aen(sc);
6006 		}
6007 	} else {
6008 		mpi3mr_issue_reset(sc,
6009 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
6010 		sc->unrecoverable = 1;
6011 		sc->reset_in_progress = 0;
6012 	}
6013 
6014 	mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
6015 
6016 	taskqueue_unblock(sc->cam_sc->ev_tq);
6017 	xpt_release_simq(sc->cam_sc->sim, 1);
6018 
6019 	sc->reset.type = MPI3MR_NO_RESET;
6020 	sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
6021 	sc->reset.status = retval;
6022 	sc->reset.ioctl_reset_snapdump = false;
6023 
6024 	return retval;
6025 }
6026 
6027 /**
6028  * mpi3mr_issue_ioc_shutdown - shutdown controller
6029  * @sc: Adapter instance reference
6030  *
6031  * Send shutodwn notification to the controller and wait for the
6032  * shutdown_timeout for it to be completed.
6033  *
6034  * Return: Nothing.
6035  */
6036 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
6037 {
6038 	U32 ioc_config, ioc_status;
6039 	U8 retval = 1, retry = 0;
6040 	U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
6041 
6042 	mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
6043 	if (sc->unrecoverable) {
6044 		mpi3mr_dprint(sc, MPI3MR_ERROR,
6045 		    "controller is unrecoverable, shutdown not issued\n");
6046 		return;
6047 	}
6048 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6049 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6050 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6051 		mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6052 		return;
6053 	}
6054 
6055 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6056 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6057 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6058 
6059 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6060 
6061 	if (sc->facts.shutdown_timeout)
6062 		timeout = sc->facts.shutdown_timeout * 10;
6063 
6064 	do {
6065 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6066 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6067 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6068 			retval = 0;
6069 			break;
6070 		}
6071 
6072 		if (sc->unrecoverable)
6073 			break;
6074 
6075 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6076 			mpi3mr_print_fault_info(sc);
6077 
6078 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6079 				break;
6080 
6081 			if (mpi3mr_issue_reset(sc,
6082 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6083 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6084 				break;
6085 
6086 			ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6087 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6088 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6089 
6090 			mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6091 
6092 			if (sc->facts.shutdown_timeout)
6093 				timeout = sc->facts.shutdown_timeout * 10;
6094 
6095 			retry++;
6096 		}
6097 
6098                 DELAY(100 * 1000);
6099 
6100 	} while (--timeout);
6101 
6102 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6103 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6104 
6105 	if (retval) {
6106 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6107 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6108 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6109 			    "shutdown still in progress after timeout\n");
6110 	}
6111 
6112 	mpi3mr_dprint(sc, MPI3MR_INFO,
6113 	    "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6114 	    (!retval)?"successful":"failed", ioc_status,
6115 	    ioc_config);
6116 }
6117 
6118 /**
6119  * mpi3mr_cleanup_ioc - Cleanup controller
6120  * @sc: Adapter instance reference
6121 
6122  * controller cleanup handler, Message unit reset or soft reset
6123  * and shutdown notification is issued to the controller.
6124  *
6125  * Return: Nothing.
6126  */
6127 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6128 {
6129 	enum mpi3mr_iocstate ioc_state;
6130 
6131 	mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6132 	mpi3mr_disable_interrupts(sc);
6133 
6134 	ioc_state = mpi3mr_get_iocstate(sc);
6135 
6136 	if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6137 	    (ioc_state == MRIOC_STATE_READY)) {
6138 		if (mpi3mr_mur_ioc(sc,
6139 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6140 			mpi3mr_issue_reset(sc,
6141 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6142 			    MPI3MR_RESET_FROM_MUR_FAILURE);
6143 		mpi3mr_issue_ioc_shutdown(sc);
6144 	}
6145 
6146 	mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6147 }
6148