xref: /freebsd/sys/dev/mpi3mr/mpi3mr.c (revision 3dfb4e52587ae692745993a3fee975ad1d3865be)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/module.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/uio.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pci_private.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include "mpi3mr.h"
76 #include "mpi3mr_cam.h"
77 #include "mpi3mr_app.h"
78 
79 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
80 	U64 reply_dma);
81 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc);
82 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
83 	struct mpi3mr_drvr_cmd *drvrcmd);
84 static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
85 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
86 	U16 reset_reason);
87 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
88 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
89 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
90 	struct mpi3mr_drvr_cmd *drv_cmd);
91 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
92 	struct mpi3mr_drvr_cmd *drv_cmd);
93 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
94 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx);
95 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc);
96 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc);
97 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code);
98 
99 void
mpi3mr_hexdump(void * buf,int sz,int format)100 mpi3mr_hexdump(void *buf, int sz, int format)
101 {
102         int i;
103         U32 *buf_loc = (U32 *)buf;
104 
105         for (i = 0; i < (sz / sizeof(U32)); i++) {
106                 if ((i % format) == 0) {
107                         if (i != 0)
108                                 printf("\n");
109                         printf("%08x: ", (i * 4));
110                 }
111                 printf("%08x ", buf_loc[i]);
112         }
113         printf("\n");
114 }
115 
116 void
init_completion(struct completion * completion)117 init_completion(struct completion *completion)
118 {
119 	completion->done = 0;
120 }
121 
122 void
complete(struct completion * completion)123 complete(struct completion *completion)
124 {
125 	completion->done = 1;
126 	wakeup(complete);
127 }
128 
wait_for_completion_timeout(struct completion * completion,U32 timeout)129 void wait_for_completion_timeout(struct completion *completion,
130 	    U32 timeout)
131 {
132 	U32 count = timeout * 1000;
133 
134 	while ((completion->done == 0) && count) {
135                 DELAY(1000);
136 		count--;
137 	}
138 
139 	if (completion->done == 0) {
140 		printf("%s: Command is timedout\n", __func__);
141 		completion->done = 1;
142 	}
143 }
wait_for_completion_timeout_tm(struct completion * completion,U32 timeout,struct mpi3mr_softc * sc)144 void wait_for_completion_timeout_tm(struct completion *completion,
145 	    U32 timeout, struct mpi3mr_softc *sc)
146 {
147 	U32 count = timeout * 1000;
148 
149 	while ((completion->done == 0) && count) {
150 		msleep(&sc->tm_chan, &sc->mpi3mr_mtx, PRIBIO,
151 		       "TM command", 1 * hz);
152 		count--;
153 	}
154 
155 	if (completion->done == 0) {
156 		printf("%s: Command is timedout\n", __func__);
157 		completion->done = 1;
158 	}
159 }
160 
161 
162 void
poll_for_command_completion(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * cmd,U16 wait)163 poll_for_command_completion(struct mpi3mr_softc *sc,
164        struct mpi3mr_drvr_cmd *cmd, U16 wait)
165 {
166 	int wait_time = wait * 1000;
167        while (wait_time) {
168                mpi3mr_complete_admin_cmd(sc);
169                if (cmd->state & MPI3MR_CMD_COMPLETE)
170                        break;
171 	       DELAY(1000);
172                wait_time--;
173        }
174 }
175 
176 /**
177  * mpi3mr_trigger_snapdump - triggers firmware snapdump
178  * @sc: Adapter instance reference
179  * @reason_code: reason code for the fault.
180  *
181  * This routine will trigger the snapdump and wait for it to
182  * complete or timeout before it returns.
183  * This will be called during initilaization time faults/resets/timeouts
184  * before soft reset invocation.
185  *
186  * Return:  None.
187  */
188 static void
mpi3mr_trigger_snapdump(struct mpi3mr_softc * sc,U16 reason_code)189 mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U16 reason_code)
190 {
191 	U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
192 
193 	mpi3mr_dprint(sc, MPI3MR_INFO, "snapdump triggered: reason code: %s\n",
194 	    mpi3mr_reset_rc_name(reason_code));
195 
196 	mpi3mr_set_diagsave(sc);
197 	mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
198 			   reason_code);
199 
200 	do {
201 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
202 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
203 			break;
204                 DELAY(100 * 1000);
205 	} while (--timeout);
206 
207 	return;
208 }
209 
210 /**
211  * mpi3mr_check_rh_fault_ioc - check reset history and fault
212  * controller
213  * @sc: Adapter instance reference
214  * @reason_code, reason code for the fault.
215  *
216  * This routine will fault the controller with
217  * the given reason code if it is not already in the fault or
218  * not asynchronosuly reset. This will be used to handle
219  * initilaization time faults/resets/timeout as in those cases
220  * immediate soft reset invocation is not required.
221  *
222  * Return:  None.
223  */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc * sc,U16 reason_code)224 static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U16 reason_code)
225 {
226 	U32 ioc_status;
227 
228 	if (sc->unrecoverable) {
229 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is unrecoverable\n");
230 		return;
231 	}
232 
233 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
234 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
235 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
236 		mpi3mr_print_fault_info(sc);
237 		return;
238 	}
239 
240 	mpi3mr_trigger_snapdump(sc, reason_code);
241 
242 	return;
243 }
244 
mpi3mr_get_reply_virt_addr(struct mpi3mr_softc * sc,bus_addr_t phys_addr)245 static void * mpi3mr_get_reply_virt_addr(struct mpi3mr_softc *sc,
246     bus_addr_t phys_addr)
247 {
248 	if (!phys_addr)
249 		return NULL;
250 	if ((phys_addr < sc->reply_buf_dma_min_address) ||
251 	    (phys_addr > sc->reply_buf_dma_max_address))
252 		return NULL;
253 
254 	return sc->reply_buf + (phys_addr - sc->reply_buf_phys);
255 }
256 
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc * sc,bus_addr_t phys_addr)257 static void * mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_softc *sc,
258     bus_addr_t phys_addr)
259 {
260 	if (!phys_addr)
261 		return NULL;
262 	return sc->sense_buf + (phys_addr - sc->sense_buf_phys);
263 }
264 
mpi3mr_repost_reply_buf(struct mpi3mr_softc * sc,U64 reply_dma)265 static void mpi3mr_repost_reply_buf(struct mpi3mr_softc *sc,
266     U64 reply_dma)
267 {
268 	U32 old_idx = 0;
269 
270 	mtx_lock_spin(&sc->reply_free_q_lock);
271 	old_idx  =  sc->reply_free_q_host_index;
272 	sc->reply_free_q_host_index = ((sc->reply_free_q_host_index ==
273 	    (sc->reply_free_q_sz - 1)) ? 0 :
274 	    (sc->reply_free_q_host_index + 1));
275 	sc->reply_free_q[old_idx] = reply_dma;
276 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
277 		sc->reply_free_q_host_index);
278 	mtx_unlock_spin(&sc->reply_free_q_lock);
279 }
280 
mpi3mr_repost_sense_buf(struct mpi3mr_softc * sc,U64 sense_buf_phys)281 static void mpi3mr_repost_sense_buf(struct mpi3mr_softc *sc,
282     U64 sense_buf_phys)
283 {
284 	U32 old_idx = 0;
285 
286 	mtx_lock_spin(&sc->sense_buf_q_lock);
287 	old_idx  =  sc->sense_buf_q_host_index;
288 	sc->sense_buf_q_host_index = ((sc->sense_buf_q_host_index ==
289 	    (sc->sense_buf_q_sz - 1)) ? 0 :
290 	    (sc->sense_buf_q_host_index + 1));
291 	sc->sense_buf_q[old_idx] = sense_buf_phys;
292 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
293 		sc->sense_buf_q_host_index);
294 	mtx_unlock_spin(&sc->sense_buf_q_lock);
295 
296 }
297 
mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc * sc,struct mpi3mr_throttle_group_info * tg,U8 divert_value)298 void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
299 	struct mpi3mr_throttle_group_info *tg, U8 divert_value)
300 {
301 	struct mpi3mr_target *target;
302 
303 	mtx_lock_spin(&sc->target_lock);
304 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
305 		if (target->throttle_group == tg)
306 			target->io_divert = divert_value;
307 	}
308 	mtx_unlock_spin(&sc->target_lock);
309 }
310 
311 /**
312  * mpi3mr_submit_admin_cmd - Submit request to admin queue
313  * @mrioc: Adapter reference
314  * @admin_req: MPI3 request
315  * @admin_req_sz: Request size
316  *
317  * Post the MPI3 request into admin request queue and
318  * inform the controller, if the queue is full return
319  * appropriate error.
320  *
321  * Return: 0 on success, non-zero on failure.
322  */
mpi3mr_submit_admin_cmd(struct mpi3mr_softc * sc,void * admin_req,U16 admin_req_sz)323 int mpi3mr_submit_admin_cmd(struct mpi3mr_softc *sc, void *admin_req,
324     U16 admin_req_sz)
325 {
326 	U16 areq_pi = 0, areq_ci = 0, max_entries = 0;
327 	int retval = 0;
328 	U8 *areq_entry;
329 
330 	mtx_lock_spin(&sc->admin_req_lock);
331 	areq_pi = sc->admin_req_pi;
332 	areq_ci = sc->admin_req_ci;
333 	max_entries = sc->num_admin_reqs;
334 
335 	if (sc->unrecoverable)
336 		return -EFAULT;
337 
338 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
339 					   (areq_pi == (max_entries - 1)))) {
340 		printf(IOCNAME "AdminReqQ full condition detected\n",
341 		    sc->name);
342 		retval = -EAGAIN;
343 		goto out;
344 	}
345 	areq_entry = (U8 *)sc->admin_req + (areq_pi *
346 						     MPI3MR_AREQ_FRAME_SZ);
347 	memset(areq_entry, 0, MPI3MR_AREQ_FRAME_SZ);
348 	memcpy(areq_entry, (U8 *)admin_req, admin_req_sz);
349 
350 	if (++areq_pi == max_entries)
351 		areq_pi = 0;
352 	sc->admin_req_pi = areq_pi;
353 
354 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
355 
356 out:
357 	mtx_unlock_spin(&sc->admin_req_lock);
358 	return retval;
359 }
360 
361 /**
362  * mpi3mr_check_req_qfull - Check request queue is full or not
363  * @op_req_q: Operational reply queue info
364  *
365  * Return: true when queue full, false otherwise.
366  */
367 static inline bool
mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue * op_req_q)368 mpi3mr_check_req_qfull(struct mpi3mr_op_req_queue *op_req_q)
369 {
370 	U16 pi, ci, max_entries;
371 	bool is_qfull = false;
372 
373 	pi = op_req_q->pi;
374 	ci = op_req_q->ci;
375 	max_entries = op_req_q->num_reqs;
376 
377 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
378 		is_qfull = true;
379 
380 	return is_qfull;
381 }
382 
383 /**
384  * mpi3mr_submit_io - Post IO command to firmware
385  * @sc:		      Adapter instance reference
386  * @op_req_q:	      Operational Request queue reference
387  * @req:	      MPT request data
388  *
389  * This function submits IO command to firmware.
390  *
391  * Return: Nothing
392  */
mpi3mr_submit_io(struct mpi3mr_softc * sc,struct mpi3mr_op_req_queue * op_req_q,U8 * req)393 int mpi3mr_submit_io(struct mpi3mr_softc *sc,
394     struct mpi3mr_op_req_queue *op_req_q, U8 *req)
395 {
396 	U16 pi, max_entries;
397 	int retval = 0;
398 	U8 *req_entry;
399 	U16 req_sz = sc->facts.op_req_sz;
400 	struct mpi3mr_irq_context *irq_ctx;
401 
402 	mtx_lock_spin(&op_req_q->q_lock);
403 
404 	pi = op_req_q->pi;
405 	max_entries = op_req_q->num_reqs;
406 	if (mpi3mr_check_req_qfull(op_req_q)) {
407 		irq_ctx = &sc->irq_ctx[op_req_q->reply_qid - 1];
408 		mpi3mr_complete_io_cmd(sc, irq_ctx);
409 
410 		if (mpi3mr_check_req_qfull(op_req_q)) {
411 			printf(IOCNAME "OpReqQ full condition detected\n",
412 				sc->name);
413 			retval = -EBUSY;
414 			goto out;
415 		}
416 	}
417 
418 	req_entry = (U8 *)op_req_q->q_base + (pi * req_sz);
419 	memset(req_entry, 0, req_sz);
420 	memcpy(req_entry, req, MPI3MR_AREQ_FRAME_SZ);
421 	if (++pi == max_entries)
422 		pi = 0;
423 	op_req_q->pi = pi;
424 
425 	mpi3mr_atomic_inc(&sc->op_reply_q[op_req_q->reply_qid - 1].pend_ios);
426 
427 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(op_req_q->qid), op_req_q->pi);
428 	if (sc->mpi3mr_debug & MPI3MR_TRACE) {
429 		device_printf(sc->mpi3mr_dev, "IO submission: QID:%d PI:0x%x\n", op_req_q->qid, op_req_q->pi);
430 		mpi3mr_hexdump(req_entry, MPI3MR_AREQ_FRAME_SZ, 8);
431 	}
432 
433 out:
434 	mtx_unlock_spin(&op_req_q->q_lock);
435 	return retval;
436 }
437 
438 inline void
mpi3mr_add_sg_single(void * paddr,U8 flags,U32 length,bus_addr_t dma_addr)439 mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
440 		     bus_addr_t dma_addr)
441 {
442 	Mpi3SGESimple_t *sgel = paddr;
443 
444 	sgel->Flags = flags;
445 	sgel->Length = (length);
446 	sgel->Address = (U64)dma_addr;
447 }
448 
mpi3mr_build_zero_len_sge(void * paddr)449 void mpi3mr_build_zero_len_sge(void *paddr)
450 {
451 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
452 		MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_LIST);
453 
454 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
455 
456 }
457 
mpi3mr_enable_interrupts(struct mpi3mr_softc * sc)458 void mpi3mr_enable_interrupts(struct mpi3mr_softc *sc)
459 {
460 	sc->intr_enabled = 1;
461 }
462 
mpi3mr_disable_interrupts(struct mpi3mr_softc * sc)463 void mpi3mr_disable_interrupts(struct mpi3mr_softc *sc)
464 {
465 	sc->intr_enabled = 0;
466 }
467 
468 void
mpi3mr_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)469 mpi3mr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
470 {
471 	bus_addr_t *addr;
472 
473 	addr = arg;
474 	*addr = segs[0].ds_addr;
475 }
476 
mpi3mr_delete_op_reply_queue(struct mpi3mr_softc * sc,U16 qid)477 static int mpi3mr_delete_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
478 {
479 	Mpi3DeleteReplyQueueRequest_t delq_req;
480 	struct mpi3mr_op_reply_queue *op_reply_q;
481 	int retval = 0;
482 
483 
484 	op_reply_q = &sc->op_reply_q[qid - 1];
485 
486 	if (!op_reply_q->qid)
487 	{
488 		retval = -1;
489 		printf(IOCNAME "Issue DelRepQ: called with invalid Reply QID\n",
490 		    sc->name);
491 		goto out;
492 	}
493 
494 	memset(&delq_req, 0, sizeof(delq_req));
495 
496 	mtx_lock(&sc->init_cmds.completion.lock);
497 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
498 		retval = -1;
499 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
500 		    sc->name);
501 		mtx_unlock(&sc->init_cmds.completion.lock);
502 		goto out;
503 	}
504 
505 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
506 		retval = -1;
507 		printf(IOCNAME "Issue DelRepQ: Init command is in use\n",
508 		    sc->name);
509 		goto out;
510 	}
511 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
512 	sc->init_cmds.is_waiting = 1;
513 	sc->init_cmds.callback = NULL;
514 	delq_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
515 	delq_req.Function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
516 	delq_req.QueueID = qid;
517 
518 	init_completion(&sc->init_cmds.completion);
519 	retval = mpi3mr_submit_admin_cmd(sc, &delq_req, sizeof(delq_req));
520 	if (retval) {
521 		printf(IOCNAME "Issue DelRepQ: Admin Post failed\n",
522 		    sc->name);
523 		goto out_unlock;
524 	}
525 	wait_for_completion_timeout(&sc->init_cmds.completion,
526 	    (MPI3MR_INTADMCMD_TIMEOUT));
527 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
528 		printf(IOCNAME "Issue DelRepQ: command timed out\n",
529 		    sc->name);
530 		mpi3mr_check_rh_fault_ioc(sc,
531 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
532 		sc->unrecoverable = 1;
533 
534 		retval = -1;
535 		goto out_unlock;
536 	}
537 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
538 	     != MPI3_IOCSTATUS_SUCCESS ) {
539 		printf(IOCNAME "Issue DelRepQ: Failed IOCStatus(0x%04x) "
540 		    " Loginfo(0x%08x) \n" , sc->name,
541 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
542 		    sc->init_cmds.ioc_loginfo);
543 		retval = -1;
544 		goto out_unlock;
545 	}
546 	sc->irq_ctx[qid - 1].op_reply_q = NULL;
547 
548 	if (sc->op_reply_q[qid - 1].q_base_phys != 0)
549 		bus_dmamap_unload(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base_dmamap);
550 	if (sc->op_reply_q[qid - 1].q_base != NULL)
551 		bus_dmamem_free(sc->op_reply_q[qid - 1].q_base_tag, sc->op_reply_q[qid - 1].q_base, sc->op_reply_q[qid - 1].q_base_dmamap);
552 	if (sc->op_reply_q[qid - 1].q_base_tag != NULL)
553 		bus_dma_tag_destroy(sc->op_reply_q[qid - 1].q_base_tag);
554 
555 	sc->op_reply_q[qid - 1].q_base = NULL;
556 	sc->op_reply_q[qid - 1].qid = 0;
557 out_unlock:
558 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
559 	mtx_unlock(&sc->init_cmds.completion.lock);
560 out:
561 	return retval;
562 }
563 
564 /**
565  * mpi3mr_create_op_reply_queue - create operational reply queue
566  * @sc: Adapter instance reference
567  * @qid: operational reply queue id
568  *
569  * Create operatinal reply queue by issuing MPI request
570  * through admin queue.
571  *
572  * Return:  0 on success, non-zero on failure.
573  */
mpi3mr_create_op_reply_queue(struct mpi3mr_softc * sc,U16 qid)574 static int mpi3mr_create_op_reply_queue(struct mpi3mr_softc *sc, U16 qid)
575 {
576 	Mpi3CreateReplyQueueRequest_t create_req;
577 	struct mpi3mr_op_reply_queue *op_reply_q;
578 	int retval = 0;
579 	char q_lock_name[32];
580 
581 	op_reply_q = &sc->op_reply_q[qid - 1];
582 
583 	if (op_reply_q->qid)
584 	{
585 		retval = -1;
586 		printf(IOCNAME "CreateRepQ: called for duplicate qid %d\n",
587 		    sc->name, op_reply_q->qid);
588 		return retval;
589 	}
590 
591 	op_reply_q->ci = 0;
592 	if (pci_get_revid(sc->mpi3mr_dev) == SAS4116_CHIP_REV_A0)
593 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD_A0;
594 	else
595 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
596 
597 	op_reply_q->qsz = op_reply_q->num_replies * sc->op_reply_sz;
598 	op_reply_q->ephase = 1;
599 
600         if (!op_reply_q->q_base) {
601 		snprintf(q_lock_name, 32, "Reply Queue Lock[%d]", qid);
602 		mtx_init(&op_reply_q->q_lock, q_lock_name, NULL, MTX_SPIN);
603 
604 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
605 					4, 0,			/* algnmnt, boundary */
606 					sc->dma_loaddr,		/* lowaddr */
607 					BUS_SPACE_MAXADDR,	/* highaddr */
608 					NULL, NULL,		/* filter, filterarg */
609 					op_reply_q->qsz,		/* maxsize */
610 					1,			/* nsegments */
611 					op_reply_q->qsz,		/* maxsegsize */
612 					0,			/* flags */
613 					NULL, NULL,		/* lockfunc, lockarg */
614 					&op_reply_q->q_base_tag)) {
615 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Operational reply DMA tag\n");
616 			return (ENOMEM);
617 		}
618 
619 		if (bus_dmamem_alloc(op_reply_q->q_base_tag, (void **)&op_reply_q->q_base,
620 		    BUS_DMA_NOWAIT, &op_reply_q->q_base_dmamap)) {
621 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
622 			return (ENOMEM);
623 		}
624 		bzero(op_reply_q->q_base, op_reply_q->qsz);
625 		bus_dmamap_load(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap, op_reply_q->q_base, op_reply_q->qsz,
626 		    mpi3mr_memaddr_cb, &op_reply_q->q_base_phys, BUS_DMA_NOWAIT);
627 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Reply queue ID: %d phys addr= %#016jx virt_addr: %pa size= %d\n",
628 		    qid, (uintmax_t)op_reply_q->q_base_phys, op_reply_q->q_base, op_reply_q->qsz);
629 
630 		if (!op_reply_q->q_base)
631 		{
632 			retval = -1;
633 			printf(IOCNAME "CreateRepQ: memory alloc failed for qid %d\n",
634 			    sc->name, qid);
635 			goto out;
636 		}
637 	}
638 
639 	memset(&create_req, 0, sizeof(create_req));
640 
641 	mtx_lock(&sc->init_cmds.completion.lock);
642 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
643 		retval = -1;
644 		printf(IOCNAME "CreateRepQ: Init command is in use\n",
645 		    sc->name);
646 		mtx_unlock(&sc->init_cmds.completion.lock);
647 		goto out;
648 	}
649 
650 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
651 	sc->init_cmds.is_waiting = 1;
652 	sc->init_cmds.callback = NULL;
653 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
654 	create_req.Function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
655 	create_req.QueueID = qid;
656 	create_req.Flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
657 	create_req.MSIxIndex = sc->irq_ctx[qid - 1].msix_index;
658 	create_req.BaseAddress = (U64)op_reply_q->q_base_phys;
659 	create_req.Size = op_reply_q->num_replies;
660 
661 	init_completion(&sc->init_cmds.completion);
662 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
663 	    sizeof(create_req));
664 	if (retval) {
665 		printf(IOCNAME "CreateRepQ: Admin Post failed\n",
666 		    sc->name);
667 		goto out_unlock;
668 	}
669 
670 	wait_for_completion_timeout(&sc->init_cmds.completion,
671 	  	MPI3MR_INTADMCMD_TIMEOUT);
672 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
673 		printf(IOCNAME "CreateRepQ: command timed out\n",
674 		    sc->name);
675 		mpi3mr_check_rh_fault_ioc(sc,
676 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
677 		sc->unrecoverable = 1;
678 		retval = -1;
679 		goto out_unlock;
680 	}
681 
682 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
683 	     != MPI3_IOCSTATUS_SUCCESS ) {
684 		printf(IOCNAME "CreateRepQ: Failed IOCStatus(0x%04x) "
685 		    " Loginfo(0x%08x) \n" , sc->name,
686 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
687 		    sc->init_cmds.ioc_loginfo);
688 		retval = -1;
689 		goto out_unlock;
690 	}
691 	op_reply_q->qid = qid;
692 	sc->irq_ctx[qid - 1].op_reply_q = op_reply_q;
693 
694 out_unlock:
695 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
696 	mtx_unlock(&sc->init_cmds.completion.lock);
697 out:
698 	if (retval) {
699 		if (op_reply_q->q_base_phys != 0)
700 			bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
701 		if (op_reply_q->q_base != NULL)
702 			bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
703 		if (op_reply_q->q_base_tag != NULL)
704 			bus_dma_tag_destroy(op_reply_q->q_base_tag);
705 		op_reply_q->q_base = NULL;
706 		op_reply_q->qid = 0;
707 	}
708 
709 	return retval;
710 }
711 
712 /**
713  * mpi3mr_create_op_req_queue - create operational request queue
714  * @sc: Adapter instance reference
715  * @req_qid: operational request queue id
716  * @reply_qid: Reply queue ID
717  *
718  * Create operatinal request queue by issuing MPI request
719  * through admin queue.
720  *
721  * Return:  0 on success, non-zero on failure.
722  */
mpi3mr_create_op_req_queue(struct mpi3mr_softc * sc,U16 req_qid,U8 reply_qid)723 static int mpi3mr_create_op_req_queue(struct mpi3mr_softc *sc, U16 req_qid, U8 reply_qid)
724 {
725 	Mpi3CreateRequestQueueRequest_t create_req;
726 	struct mpi3mr_op_req_queue *op_req_q;
727 	int retval = 0;
728 	char q_lock_name[32];
729 
730 	op_req_q = &sc->op_req_q[req_qid - 1];
731 
732 	if (op_req_q->qid)
733 	{
734 		retval = -1;
735 		printf(IOCNAME "CreateReqQ: called for duplicate qid %d\n",
736 		    sc->name, op_req_q->qid);
737 		return retval;
738 	}
739 
740 	op_req_q->ci = 0;
741 	op_req_q->pi = 0;
742 	op_req_q->num_reqs = MPI3MR_OP_REQ_Q_QD;
743 	op_req_q->qsz = op_req_q->num_reqs * sc->facts.op_req_sz;
744 	op_req_q->reply_qid = reply_qid;
745 
746 	if (!op_req_q->q_base) {
747 		snprintf(q_lock_name, 32, "Request Queue Lock[%d]", req_qid);
748 		mtx_init(&op_req_q->q_lock, q_lock_name, NULL, MTX_SPIN);
749 
750 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
751 					4, 0,			/* algnmnt, boundary */
752 					sc->dma_loaddr,		/* lowaddr */
753 					BUS_SPACE_MAXADDR,	/* highaddr */
754 					NULL, NULL,		/* filter, filterarg */
755 					op_req_q->qsz,		/* maxsize */
756 					1,			/* nsegments */
757 					op_req_q->qsz,		/* maxsegsize */
758 					0,			/* flags */
759 					NULL, NULL,		/* lockfunc, lockarg */
760 					&op_req_q->q_base_tag)) {
761 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
762 			return (ENOMEM);
763 		}
764 
765 		if (bus_dmamem_alloc(op_req_q->q_base_tag, (void **)&op_req_q->q_base,
766 		    BUS_DMA_NOWAIT, &op_req_q->q_base_dmamap)) {
767 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
768 			return (ENOMEM);
769 		}
770 
771 		bzero(op_req_q->q_base, op_req_q->qsz);
772 
773 		bus_dmamap_load(op_req_q->q_base_tag, op_req_q->q_base_dmamap, op_req_q->q_base, op_req_q->qsz,
774 		    mpi3mr_memaddr_cb, &op_req_q->q_base_phys, BUS_DMA_NOWAIT);
775 
776 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Operational Request QID: %d phys addr= %#016jx virt addr= %pa size= %d associated Reply QID: %d\n",
777 		    req_qid, (uintmax_t)op_req_q->q_base_phys, op_req_q->q_base, op_req_q->qsz, reply_qid);
778 
779 		if (!op_req_q->q_base) {
780 			retval = -1;
781 			printf(IOCNAME "CreateReqQ: memory alloc failed for qid %d\n",
782 			    sc->name, req_qid);
783 			goto out;
784 		}
785 	}
786 
787 	memset(&create_req, 0, sizeof(create_req));
788 
789 	mtx_lock(&sc->init_cmds.completion.lock);
790 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
791 		retval = -1;
792 		printf(IOCNAME "CreateReqQ: Init command is in use\n",
793 		    sc->name);
794 		mtx_unlock(&sc->init_cmds.completion.lock);
795 		goto out;
796 	}
797 
798 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
799 	sc->init_cmds.is_waiting = 1;
800 	sc->init_cmds.callback = NULL;
801 	create_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
802 	create_req.Function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
803 	create_req.QueueID = req_qid;
804 	create_req.Flags = 0;
805 	create_req.ReplyQueueID = reply_qid;
806 	create_req.BaseAddress = (U64)op_req_q->q_base_phys;
807 	create_req.Size = op_req_q->num_reqs;
808 
809 	init_completion(&sc->init_cmds.completion);
810 	retval = mpi3mr_submit_admin_cmd(sc, &create_req,
811 	    sizeof(create_req));
812 	if (retval) {
813 		printf(IOCNAME "CreateReqQ: Admin Post failed\n",
814 		    sc->name);
815 		goto out_unlock;
816 	}
817 
818 	wait_for_completion_timeout(&sc->init_cmds.completion,
819 	    (MPI3MR_INTADMCMD_TIMEOUT));
820 
821 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
822 		printf(IOCNAME "CreateReqQ: command timed out\n",
823 		    sc->name);
824 		mpi3mr_check_rh_fault_ioc(sc,
825 			MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
826 		sc->unrecoverable = 1;
827 		retval = -1;
828 		goto out_unlock;
829 	}
830 
831 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
832 	     != MPI3_IOCSTATUS_SUCCESS ) {
833 		printf(IOCNAME "CreateReqQ: Failed IOCStatus(0x%04x) "
834 		    " Loginfo(0x%08x) \n" , sc->name,
835 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
836 		    sc->init_cmds.ioc_loginfo);
837 		retval = -1;
838 		goto out_unlock;
839 	}
840 	op_req_q->qid = req_qid;
841 
842 out_unlock:
843 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
844 	mtx_unlock(&sc->init_cmds.completion.lock);
845 out:
846 	if (retval) {
847 		if (op_req_q->q_base_phys != 0)
848 			bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
849 		if (op_req_q->q_base != NULL)
850 			bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
851 		if (op_req_q->q_base_tag != NULL)
852 			bus_dma_tag_destroy(op_req_q->q_base_tag);
853 		op_req_q->q_base = NULL;
854 		op_req_q->qid = 0;
855 	}
856 	return retval;
857 }
858 
859 /**
860  * mpi3mr_create_op_queues - create operational queues
861  * @sc: Adapter instance reference
862  *
863  * Create operatinal queues(request queues and reply queues).
864  * Return:  0 on success, non-zero on failure.
865  */
mpi3mr_create_op_queues(struct mpi3mr_softc * sc)866 static int mpi3mr_create_op_queues(struct mpi3mr_softc *sc)
867 {
868 	int retval = 0;
869 	U16 num_queues = 0, i = 0, qid;
870 
871 	num_queues = min(sc->facts.max_op_reply_q,
872 	    sc->facts.max_op_req_q);
873 	num_queues = min(num_queues, sc->msix_count);
874 
875 	/*
876 	 * During reset set the num_queues to the number of queues
877 	 * that was set before the reset.
878 	 */
879 	if (sc->num_queues)
880 		num_queues = sc->num_queues;
881 
882 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Trying to create %d Operational Q pairs\n",
883 	    num_queues);
884 
885 	if (!sc->op_req_q) {
886 		sc->op_req_q = malloc(sizeof(struct mpi3mr_op_req_queue) *
887 		    num_queues, M_MPI3MR, M_NOWAIT | M_ZERO);
888 
889 		if (!sc->op_req_q) {
890 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Request queue info\n");
891 			retval = -1;
892 			goto out_failed;
893 		}
894 	}
895 
896 	if (!sc->op_reply_q) {
897 		sc->op_reply_q = malloc(sizeof(struct mpi3mr_op_reply_queue) * num_queues,
898 			M_MPI3MR, M_NOWAIT | M_ZERO);
899 
900 		if (!sc->op_reply_q) {
901 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to alloc memory for Reply queue info\n");
902 			retval = -1;
903 			goto out_failed;
904 		}
905 	}
906 
907 	sc->num_hosttag_op_req_q = (sc->max_host_ios + 1) / num_queues;
908 
909 	/*Operational Request and reply queue ID starts with 1*/
910 	for (i = 0; i < num_queues; i++) {
911 		qid = i + 1;
912 		if (mpi3mr_create_op_reply_queue(sc, qid)) {
913 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Reply queue %d\n",
914 			    qid);
915 			break;
916 		}
917 		if (mpi3mr_create_op_req_queue(sc, qid,
918 		    sc->op_reply_q[qid - 1].qid)) {
919 			mpi3mr_delete_op_reply_queue(sc, qid);
920 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create Request queue %d\n",
921 			    qid);
922 			break;
923 		}
924 
925 	}
926 
927 	/* Not even one queue is created successfully*/
928         if (i == 0) {
929                 retval = -1;
930                 goto out_failed;
931         }
932 
933 	if (!sc->num_queues) {
934 		sc->num_queues = i;
935 	} else {
936 		if (num_queues != i) {
937 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Number of queues (%d) post reset are not same as"
938 					"queues allocated (%d) during driver init\n", i, num_queues);
939 			goto out_failed;
940 		}
941 	}
942 
943 	mpi3mr_dprint(sc, MPI3MR_INFO, "Successfully created %d Operational Queue pairs\n",
944 	    sc->num_queues);
945 	mpi3mr_dprint(sc, MPI3MR_INFO, "Request Queue QD: %d Reply queue QD: %d\n",
946 	    sc->op_req_q[0].num_reqs, sc->op_reply_q[0].num_replies);
947 
948 	return retval;
949 out_failed:
950 	if (sc->op_req_q) {
951 		free(sc->op_req_q, M_MPI3MR);
952 		sc->op_req_q = NULL;
953 	}
954 	if (sc->op_reply_q) {
955 		free(sc->op_reply_q, M_MPI3MR);
956 		sc->op_reply_q = NULL;
957 	}
958 	return retval;
959 }
960 
961 /**
962  * mpi3mr_setup_admin_qpair - Setup admin queue pairs
963  * @sc: Adapter instance reference
964  *
965  * Allocation and setup admin queues(request queues and reply queues).
966  * Return:  0 on success, non-zero on failure.
967  */
mpi3mr_setup_admin_qpair(struct mpi3mr_softc * sc)968 static int mpi3mr_setup_admin_qpair(struct mpi3mr_softc *sc)
969 {
970 	int retval = 0;
971 	U32 num_adm_entries = 0;
972 
973 	sc->admin_req_q_sz = MPI3MR_AREQQ_SIZE;
974 	sc->num_admin_reqs = sc->admin_req_q_sz / MPI3MR_AREQ_FRAME_SZ;
975 	sc->admin_req_ci = sc->admin_req_pi = 0;
976 
977 	sc->admin_reply_q_sz = MPI3MR_AREPQ_SIZE;
978 	sc->num_admin_replies = sc->admin_reply_q_sz/ MPI3MR_AREP_FRAME_SZ;
979 	sc->admin_reply_ci = 0;
980 	sc->admin_reply_ephase = 1;
981 
982 	if (!sc->admin_req) {
983 		/*
984 		 * We need to create the tag for the admin queue to get the
985 		 * iofacts to see how many bits the controller decodes.  Solve
986 		 * this chicken and egg problem by only doing lower 4GB DMA.
987 		 */
988 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
989 					4, 0,			/* algnmnt, boundary */
990 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
991 					BUS_SPACE_MAXADDR,	/* highaddr */
992 					NULL, NULL,		/* filter, filterarg */
993 					sc->admin_req_q_sz,	/* maxsize */
994 					1,			/* nsegments */
995 					sc->admin_req_q_sz,	/* maxsegsize */
996 					0,			/* flags */
997 					NULL, NULL,		/* lockfunc, lockarg */
998 					&sc->admin_req_tag)) {
999 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1000 			return (ENOMEM);
1001 		}
1002 
1003 		if (bus_dmamem_alloc(sc->admin_req_tag, (void **)&sc->admin_req,
1004 		    BUS_DMA_NOWAIT, &sc->admin_req_dmamap)) {
1005 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1006 			return (ENOMEM);
1007 		}
1008 		bzero(sc->admin_req, sc->admin_req_q_sz);
1009 		bus_dmamap_load(sc->admin_req_tag, sc->admin_req_dmamap, sc->admin_req, sc->admin_req_q_sz,
1010 		    mpi3mr_memaddr_cb, &sc->admin_req_phys, BUS_DMA_NOWAIT);
1011 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Req queue phys addr= %#016jx size= %d\n",
1012 		    (uintmax_t)sc->admin_req_phys, sc->admin_req_q_sz);
1013 
1014 		if (!sc->admin_req)
1015 		{
1016 			retval = -1;
1017 			printf(IOCNAME "Memory alloc for AdminReqQ: failed\n",
1018 			    sc->name);
1019 			goto out_failed;
1020 		}
1021 	}
1022 
1023 	if (!sc->admin_reply) {
1024 		mtx_init(&sc->admin_reply_lock, "Admin Reply Queue Lock", NULL, MTX_SPIN);
1025 
1026 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1027 					4, 0,			/* algnmnt, boundary */
1028 					BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1029 					BUS_SPACE_MAXADDR,	/* highaddr */
1030 					NULL, NULL,		/* filter, filterarg */
1031 					sc->admin_reply_q_sz,	/* maxsize */
1032 					1,			/* nsegments */
1033 					sc->admin_reply_q_sz,	/* maxsegsize */
1034 					0,			/* flags */
1035 					NULL, NULL,		/* lockfunc, lockarg */
1036 					&sc->admin_reply_tag)) {
1037 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply DMA tag\n");
1038 			return (ENOMEM);
1039 		}
1040 
1041 		if (bus_dmamem_alloc(sc->admin_reply_tag, (void **)&sc->admin_reply,
1042 		    BUS_DMA_NOWAIT, &sc->admin_reply_dmamap)) {
1043 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
1044 			return (ENOMEM);
1045 		}
1046 		bzero(sc->admin_reply, sc->admin_reply_q_sz);
1047 		bus_dmamap_load(sc->admin_reply_tag, sc->admin_reply_dmamap, sc->admin_reply, sc->admin_reply_q_sz,
1048 		    mpi3mr_memaddr_cb, &sc->admin_reply_phys, BUS_DMA_NOWAIT);
1049 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Admin Reply queue phys addr= %#016jx size= %d\n",
1050 		    (uintmax_t)sc->admin_reply_phys, sc->admin_req_q_sz);
1051 
1052 
1053 		if (!sc->admin_reply)
1054 		{
1055 			retval = -1;
1056 			printf(IOCNAME "Memory alloc for AdminRepQ: failed\n",
1057 			    sc->name);
1058 			goto out_failed;
1059 		}
1060 	}
1061 
1062 	num_adm_entries = (sc->num_admin_replies << 16) |
1063 				(sc->num_admin_reqs);
1064 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET, num_adm_entries);
1065 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET, sc->admin_req_phys);
1066 	mpi3mr_regwrite64(sc, MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET, sc->admin_reply_phys);
1067 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET, sc->admin_req_pi);
1068 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, sc->admin_reply_ci);
1069 
1070 	return retval;
1071 
1072 out_failed:
1073 	/* Free Admin reply*/
1074 	if (sc->admin_reply_phys)
1075 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
1076 
1077 	if (sc->admin_reply != NULL)
1078 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
1079 		    sc->admin_reply_dmamap);
1080 
1081 	if (sc->admin_reply_tag != NULL)
1082 		bus_dma_tag_destroy(sc->admin_reply_tag);
1083 
1084 	/* Free Admin request*/
1085 	if (sc->admin_req_phys)
1086 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
1087 
1088 	if (sc->admin_req != NULL)
1089 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
1090 		    sc->admin_req_dmamap);
1091 
1092 	if (sc->admin_req_tag != NULL)
1093 		bus_dma_tag_destroy(sc->admin_req_tag);
1094 
1095 	return retval;
1096 }
1097 
1098 /**
1099  * mpi3mr_print_fault_info - Display fault information
1100  * @sc: Adapter instance reference
1101  *
1102  * Display the controller fault information if there is a
1103  * controller fault.
1104  *
1105  * Return: Nothing.
1106  */
mpi3mr_print_fault_info(struct mpi3mr_softc * sc)1107 static void mpi3mr_print_fault_info(struct mpi3mr_softc *sc)
1108 {
1109 	U32 ioc_status, code, code1, code2, code3;
1110 
1111 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1112 
1113 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1114 		code = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
1115 			MPI3_SYSIF_FAULT_CODE_MASK;
1116 		code1 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO0_OFFSET);
1117 		code2 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO1_OFFSET);
1118 		code3 = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_INFO2_OFFSET);
1119 		printf(IOCNAME "fault codes 0x%04x:0x%04x:0x%04x:0x%04x\n",
1120 		    sc->name, code, code1, code2, code3);
1121 	}
1122 }
1123 
mpi3mr_get_iocstate(struct mpi3mr_softc * sc)1124 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
1125 {
1126 	U32 ioc_status, ioc_control;
1127 	U8 ready, enabled;
1128 
1129 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1130 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1131 
1132 	if(sc->unrecoverable)
1133 		return MRIOC_STATE_UNRECOVERABLE;
1134 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1135 		return MRIOC_STATE_FAULT;
1136 
1137 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1138 	enabled = (ioc_control & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1139 
1140 	if (ready && enabled)
1141 		return MRIOC_STATE_READY;
1142 	if ((!ready) && (!enabled))
1143 		return MRIOC_STATE_RESET;
1144 	if ((!ready) && (enabled))
1145 		return MRIOC_STATE_BECOMING_READY;
1146 
1147 	return MRIOC_STATE_RESET_REQUESTED;
1148 }
1149 
mpi3mr_clear_reset_history(struct mpi3mr_softc * sc)1150 static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
1151 {
1152         U32 ioc_status;
1153 
1154 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1155         if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1156 		mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
1157 
1158 }
1159 
1160 /**
1161  * mpi3mr_mur_ioc - Message unit Reset handler
1162  * @sc: Adapter instance reference
1163  * @reset_reason: Reset reason code
1164  *
1165  * Issue Message unit Reset to the controller and wait for it to
1166  * be complete.
1167  *
1168  * Return: 0 on success, -1 on failure.
1169  */
mpi3mr_mur_ioc(struct mpi3mr_softc * sc,U16 reset_reason)1170 static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U16 reset_reason)
1171 {
1172 	U32 ioc_config, timeout, ioc_status, scratch_pad0;
1173         int retval = -1;
1174 
1175         mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
1176         if (sc->unrecoverable) {
1177                 mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
1178                 return retval;
1179         }
1180 	mpi3mr_clear_reset_history(sc);
1181 
1182 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_FREEBSD <<
1183 			MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1184 			(sc->facts.ioc_num <<
1185 			MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1186 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, scratch_pad0);
1187 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1188         ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1189 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1190 
1191         timeout = MPI3MR_MUR_TIMEOUT * 10;
1192         do {
1193 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1194                 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1195 			mpi3mr_clear_reset_history(sc);
1196 			ioc_config =
1197 				mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1198                         if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1199                             (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1200                             (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
1201                                 retval = 0;
1202                                 break;
1203                         }
1204                 }
1205                 DELAY(100 * 1000);
1206         } while (--timeout);
1207 
1208 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1209 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1210 
1211         mpi3mr_dprint(sc, MPI3MR_INFO, "IOC Status/Config after %s MUR is (0x%x)/(0x%x)\n",
1212                 !retval ? "successful":"failed", ioc_status, ioc_config);
1213         return retval;
1214 }
1215 
1216 /**
1217  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1218  * @sc: Adapter instance reference
1219  *
1220  * Set Enable IOC bit in IOC configuration register and wait for
1221  * the controller to become ready.
1222  *
1223  * Return: 0 on success, appropriate error on failure.
1224  */
mpi3mr_bring_ioc_ready(struct mpi3mr_softc * sc,U64 * start_time)1225 static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc,
1226 				  U64 *start_time)
1227 {
1228 	enum mpi3mr_iocstate current_state;
1229 	U32 ioc_status;
1230 	int retval;
1231 
1232 	U32 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1233 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1234 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
1235 
1236 	if (*start_time == 0)
1237 		*start_time = ticks;
1238 
1239 	do {
1240 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
1241 		if (ioc_status & (MPI3_SYSIF_IOC_STATUS_FAULT | MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1242 		    if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1243 			    mpi3mr_print_fault_info(sc);
1244 			    retval = mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, MPI3MR_RESET_FROM_BRINGUP);
1245 			    if (retval) {
1246 				    mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Failed to soft reset the IOC, error 0x%d\n", __func__, retval);
1247 				    return -1;
1248 			    }
1249 		    }
1250 		    mpi3mr_clear_reset_history(sc);
1251 		    return EAGAIN;
1252 		}
1253 
1254 		current_state = mpi3mr_get_iocstate(sc);
1255 		if (current_state == MRIOC_STATE_READY)
1256 			return 0;
1257 
1258 		DELAY(100 * 1000);
1259 
1260 	} while (((ticks - *start_time) / hz) < sc->ready_timeout);
1261 
1262 	return -1;
1263 }
1264 
1265 static const struct {
1266 	enum mpi3mr_iocstate value;
1267 	char *name;
1268 } mrioc_states[] = {
1269 	{ MRIOC_STATE_READY, "ready" },
1270 	{ MRIOC_STATE_FAULT, "fault" },
1271 	{ MRIOC_STATE_RESET, "reset" },
1272 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
1273 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
1274 	{ MRIOC_STATE_COUNT, "Count" },
1275 };
1276 
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)1277 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
1278 {
1279 	int i;
1280 	char *name = NULL;
1281 
1282 	for (i = 0; i < MRIOC_STATE_COUNT; i++) {
1283 		if (mrioc_states[i].value == mrioc_state){
1284 			name = mrioc_states[i].name;
1285 			break;
1286 		}
1287 	}
1288 	return name;
1289 }
1290 
1291 /* Reset reason to name mapper structure*/
1292 static const struct {
1293 	enum mpi3mr_reset_reason value;
1294 	char *name;
1295 } mpi3mr_reset_reason_codes[] = {
1296 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
1297 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
1298 	{ MPI3MR_RESET_FROM_IOCTL, "application" },
1299 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
1300 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
1301 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
1302 	{ MPI3MR_RESET_FROM_SCSIIO_TIMEOUT, "SCSIIO timeout" },
1303 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
1304 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
1305 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
1306 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
1307 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
1308 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
1309 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
1310 	{
1311 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
1312 		"create request queue timeout"
1313 	},
1314 	{
1315 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
1316 		"create reply queue timeout"
1317 	},
1318 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
1319 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
1320 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
1321 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
1322 	{
1323 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
1324 		"component image activation timeout"
1325 	},
1326 	{
1327 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
1328 		"get package version timeout"
1329 	},
1330 	{
1331 		MPI3MR_RESET_FROM_PELABORT_TIMEOUT,
1332 		"persistent event log abort timeout"
1333 	},
1334 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
1335 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
1336 	{
1337 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
1338 		"diagnostic buffer post timeout"
1339 	},
1340 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
1341 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout" },
1342 	{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
1343 };
1344 
1345 /**
1346  * mpi3mr_reset_rc_name - get reset reason code name
1347  * @reason_code: reset reason code value
1348  *
1349  * Map reset reason to an NULL terminated ASCII string
1350  *
1351  * Return: Name corresponding to reset reason value or NULL.
1352  */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1353 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1354 {
1355 	int i;
1356 	char *name = NULL;
1357 
1358 	for (i = 0; i < MPI3MR_RESET_REASON_COUNT; i++) {
1359 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1360 			name = mpi3mr_reset_reason_codes[i].name;
1361 			break;
1362 		}
1363 	}
1364 	return name;
1365 }
1366 
1367 #define MAX_RESET_TYPE 3
1368 /* Reset type to name mapper structure*/
1369 static const struct {
1370 	U16 reset_type;
1371 	char *name;
1372 } mpi3mr_reset_types[] = {
1373 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1374 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1375 	{ MAX_RESET_TYPE, "count"}
1376 };
1377 
1378 /**
1379  * mpi3mr_reset_type_name - get reset type name
1380  * @reset_type: reset type value
1381  *
1382  * Map reset type to an NULL terminated ASCII string
1383  *
1384  * Return: Name corresponding to reset type value or NULL.
1385  */
mpi3mr_reset_type_name(U16 reset_type)1386 static const char *mpi3mr_reset_type_name(U16 reset_type)
1387 {
1388 	int i;
1389 	char *name = NULL;
1390 
1391 	for (i = 0; i < MAX_RESET_TYPE; i++) {
1392 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1393 			name = mpi3mr_reset_types[i].name;
1394 			break;
1395 		}
1396 	}
1397 	return name;
1398 }
1399 
1400 /**
1401  * mpi3mr_soft_reset_success - Check softreset is success or not
1402  * @ioc_status: IOC status register value
1403  * @ioc_config: IOC config register value
1404  *
1405  * Check whether the soft reset is successful or not based on
1406  * IOC status and IOC config register values.
1407  *
1408  * Return: True when the soft reset is success, false otherwise.
1409  */
1410 static inline bool
mpi3mr_soft_reset_success(U32 ioc_status,U32 ioc_config)1411 mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
1412 {
1413 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1414 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1415 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1416 		return true;
1417 	return false;
1418 }
1419 
1420 /**
1421  * mpi3mr_diagfault_success - Check diag fault is success or not
1422  * @sc: Adapter reference
1423  * @ioc_status: IOC status register value
1424  *
1425  * Check whether the controller hit diag reset fault code.
1426  *
1427  * Return: True when there is diag fault, false otherwise.
1428  */
mpi3mr_diagfault_success(struct mpi3mr_softc * sc,U32 ioc_status)1429 static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
1430 	U32 ioc_status)
1431 {
1432 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1433 		return false;
1434 	mpi3mr_print_fault_info(sc);
1435 	return true;
1436 }
1437 
1438 /**
1439  * mpi3mr_issue_iocfacts - Send IOC Facts
1440  * @sc: Adapter instance reference
1441  * @facts_data: Cached IOC facts data
1442  *
1443  * Issue IOC Facts MPI request through admin queue and wait for
1444  * the completion of it or time out.
1445  *
1446  * Return: 0 on success, non-zero on failures.
1447  */
mpi3mr_issue_iocfacts(struct mpi3mr_softc * sc,Mpi3IOCFactsData_t * facts_data)1448 static int mpi3mr_issue_iocfacts(struct mpi3mr_softc *sc,
1449     Mpi3IOCFactsData_t *facts_data)
1450 {
1451 	Mpi3IOCFactsRequest_t iocfacts_req;
1452 	bus_dma_tag_t data_tag = NULL;
1453 	bus_dmamap_t data_map = NULL;
1454 	bus_addr_t data_phys = 0;
1455 	void *data = NULL;
1456 	U32 data_len = sizeof(*facts_data);
1457 	int retval = 0;
1458 
1459 	U8 sgl_flags = (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1460                 	MPI3_SGE_FLAGS_DLAS_SYSTEM |
1461 			MPI3_SGE_FLAGS_END_OF_LIST);
1462 
1463 
1464 	/*
1465 	 * We can't use sc->dma_loaddr here.  We set those only after we get the
1466 	 * iocfacts.  So allocate in the lower 4GB.  The amount of data is tiny
1467 	 * and we don't do this that often, so any bouncing we might have to do
1468 	 * isn't a cause for concern.
1469 	 */
1470         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1471 				4, 0,			/* algnmnt, boundary */
1472 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1473 				BUS_SPACE_MAXADDR,	/* highaddr */
1474 				NULL, NULL,		/* filter, filterarg */
1475                                 data_len,		/* maxsize */
1476                                 1,			/* nsegments */
1477                                 data_len,		/* maxsegsize */
1478                                 0,			/* flags */
1479                                 NULL, NULL,		/* lockfunc, lockarg */
1480                                 &data_tag)) {
1481 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1482 		return (ENOMEM);
1483         }
1484 
1485         if (bus_dmamem_alloc(data_tag, (void **)&data,
1486 	    BUS_DMA_NOWAIT, &data_map)) {
1487 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
1488 			__func__, __LINE__);
1489 		return (ENOMEM);
1490         }
1491 
1492         bzero(data, data_len);
1493         bus_dmamap_load(data_tag, data_map, data, data_len,
1494 	    mpi3mr_memaddr_cb, &data_phys, BUS_DMA_NOWAIT);
1495 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts data phys addr= %#016jx size= %d\n",
1496 	    __func__, __LINE__, (uintmax_t)data_phys, data_len);
1497 
1498 	if (!data)
1499 	{
1500 		retval = -1;
1501 		printf(IOCNAME "Memory alloc for IOCFactsData: failed\n",
1502 		    sc->name);
1503 		goto out;
1504 	}
1505 
1506 	mtx_lock(&sc->init_cmds.completion.lock);
1507 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
1508 
1509 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
1510 		retval = -1;
1511 		printf(IOCNAME "Issue IOCFacts: Init command is in use\n",
1512 		    sc->name);
1513 		mtx_unlock(&sc->init_cmds.completion.lock);
1514 		goto out;
1515 	}
1516 
1517 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
1518 	sc->init_cmds.is_waiting = 1;
1519 	sc->init_cmds.callback = NULL;
1520 	iocfacts_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
1521 	iocfacts_req.Function = MPI3_FUNCTION_IOC_FACTS;
1522 
1523 	mpi3mr_add_sg_single(&iocfacts_req.SGL, sgl_flags, data_len,
1524 	    data_phys);
1525 
1526 	init_completion(&sc->init_cmds.completion);
1527 
1528 	retval = mpi3mr_submit_admin_cmd(sc, &iocfacts_req,
1529 	    sizeof(iocfacts_req));
1530 
1531 	if (retval) {
1532 		printf(IOCNAME "Issue IOCFacts: Admin Post failed\n",
1533 		    sc->name);
1534 		goto out_unlock;
1535 	}
1536 
1537 	wait_for_completion_timeout(&sc->init_cmds.completion,
1538 	    (MPI3MR_INTADMCMD_TIMEOUT));
1539 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1540 		printf(IOCNAME "Issue IOCFacts: command timed out\n",
1541 		    sc->name);
1542 		mpi3mr_check_rh_fault_ioc(sc,
1543 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
1544 		sc->unrecoverable = 1;
1545 		retval = -1;
1546 		goto out_unlock;
1547 	}
1548 
1549 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1550 	     != MPI3_IOCSTATUS_SUCCESS ) {
1551 		printf(IOCNAME "Issue IOCFacts: Failed IOCStatus(0x%04x) "
1552 		    " Loginfo(0x%08x) \n" , sc->name,
1553 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1554 		    sc->init_cmds.ioc_loginfo);
1555 		retval = -1;
1556 		goto out_unlock;
1557 	}
1558 
1559 	memcpy(facts_data, (U8 *)data, data_len);
1560 out_unlock:
1561 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1562 	mtx_unlock(&sc->init_cmds.completion.lock);
1563 
1564 out:
1565 	if (data_phys != 0)
1566 		bus_dmamap_unload(data_tag, data_map);
1567 	if (data != NULL)
1568 		bus_dmamem_free(data_tag, data, data_map);
1569 	if (data_tag != NULL)
1570 		bus_dma_tag_destroy(data_tag);
1571 	return retval;
1572 }
1573 
1574 /**
1575  * mpi3mr_process_factsdata - Process IOC facts data
1576  * @sc: Adapter instance reference
1577  * @facts_data: Cached IOC facts data
1578  *
1579  * Convert IOC facts data into cpu endianness and cache it in
1580  * the driver .
1581  *
1582  * Return: Nothing.
1583  */
mpi3mr_process_factsdata(struct mpi3mr_softc * sc,Mpi3IOCFactsData_t * facts_data)1584 static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
1585     Mpi3IOCFactsData_t *facts_data)
1586 {
1587 	int retval = 0;
1588 	U32 ioc_config, req_sz, facts_flags;
1589         struct mpi3mr_compimg_ver *fwver;
1590 
1591 	if (le16toh(facts_data->IOCFactsDataLength) !=
1592 	    (sizeof(*facts_data) / 4)) {
1593 		mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data length mismatch "
1594 		    " driver_sz(%ld) firmware_sz(%d) \n",
1595 		    sizeof(*facts_data),
1596 		    facts_data->IOCFactsDataLength);
1597 	}
1598 
1599 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
1600         req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
1601                   MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
1602 
1603 	if (facts_data->IOCRequestFrameSize != (req_sz/4)) {
1604 		 mpi3mr_dprint(sc, MPI3MR_INFO, "IOCFacts data reqFrameSize mismatch "
1605 		    " hw_size(%d) firmware_sz(%d) \n" , req_sz/4,
1606 		    facts_data->IOCRequestFrameSize);
1607 	}
1608 
1609 	memset(&sc->facts, 0, sizeof(sc->facts));
1610 
1611 	facts_flags = le32toh(facts_data->Flags);
1612 	sc->facts.op_req_sz = req_sz;
1613 	sc->op_reply_sz = 1 << ((ioc_config &
1614                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
1615                                   MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
1616 
1617 	sc->facts.ioc_num = facts_data->IOCNumber;
1618         sc->facts.who_init = facts_data->WhoInit;
1619         sc->facts.max_msix_vectors = facts_data->MaxMSIxVectors;
1620 	sc->facts.personality = (facts_flags &
1621 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
1622 	sc->facts.dma_mask = (facts_flags &
1623 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
1624 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
1625         sc->facts.protocol_flags = facts_data->ProtocolFlags;
1626         sc->facts.mpi_version = (facts_data->MPIVersion.Word);
1627         sc->facts.max_reqs = (facts_data->MaxOutstandingRequests);
1628         sc->facts.product_id = (facts_data->ProductID);
1629 	sc->facts.reply_sz = (facts_data->ReplyFrameSize) * 4;
1630         sc->facts.exceptions = (facts_data->IOCExceptions);
1631         sc->facts.max_perids = (facts_data->MaxPersistentID);
1632         sc->facts.max_vds = (facts_data->MaxVDs);
1633         sc->facts.max_hpds = (facts_data->MaxHostPDs);
1634         sc->facts.max_advhpds = (facts_data->MaxAdvHostPDs);
1635         sc->facts.max_raidpds = (facts_data->MaxRAIDPDs);
1636         sc->facts.max_nvme = (facts_data->MaxNVMe);
1637         sc->facts.max_pcieswitches =
1638                 (facts_data->MaxPCIeSwitches);
1639         sc->facts.max_sasexpanders =
1640                 (facts_data->MaxSASExpanders);
1641         sc->facts.max_data_length = facts_data->MaxDataLength;
1642         sc->facts.max_sasinitiators =
1643                 (facts_data->MaxSASInitiators);
1644         sc->facts.max_enclosures = (facts_data->MaxEnclosures);
1645         sc->facts.min_devhandle = (facts_data->MinDevHandle);
1646         sc->facts.max_devhandle = (facts_data->MaxDevHandle);
1647 	sc->facts.max_op_req_q =
1648                 (facts_data->MaxOperationalRequestQueues);
1649 	sc->facts.max_op_reply_q =
1650                 (facts_data->MaxOperationalReplyQueues);
1651         sc->facts.ioc_capabilities =
1652                 (facts_data->IOCCapabilities);
1653         sc->facts.fw_ver.build_num =
1654                 (facts_data->FWVersion.BuildNum);
1655         sc->facts.fw_ver.cust_id =
1656                 (facts_data->FWVersion.CustomerID);
1657         sc->facts.fw_ver.ph_minor = facts_data->FWVersion.PhaseMinor;
1658         sc->facts.fw_ver.ph_major = facts_data->FWVersion.PhaseMajor;
1659         sc->facts.fw_ver.gen_minor = facts_data->FWVersion.GenMinor;
1660         sc->facts.fw_ver.gen_major = facts_data->FWVersion.GenMajor;
1661         sc->max_msix_vectors = min(sc->max_msix_vectors,
1662             sc->facts.max_msix_vectors);
1663         sc->facts.sge_mod_mask = facts_data->SGEModifierMask;
1664         sc->facts.sge_mod_value = facts_data->SGEModifierValue;
1665         sc->facts.sge_mod_shift = facts_data->SGEModifierShift;
1666         sc->facts.shutdown_timeout =
1667                 (facts_data->ShutdownTimeout);
1668 	sc->facts.max_dev_per_tg = facts_data->MaxDevicesPerThrottleGroup;
1669 	sc->facts.io_throttle_data_length =
1670 	    facts_data->IOThrottleDataLength;
1671 	sc->facts.max_io_throttle_group =
1672 	    facts_data->MaxIOThrottleGroup;
1673 	sc->facts.io_throttle_low = facts_data->IOThrottleLow;
1674 	sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
1675 
1676 	if (sc->facts.max_data_length == MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
1677 		sc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
1678 	else
1679 		sc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
1680 	/*Store in 512b block count*/
1681 	if (sc->facts.io_throttle_data_length)
1682 		sc->io_throttle_data_length =
1683 		    (sc->facts.io_throttle_data_length * 2 * 4);
1684 	else
1685 		/* set the length to 1MB + 1K to disable throttle*/
1686 		sc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
1687 
1688 	sc->io_throttle_high = (sc->facts.io_throttle_high * 2 * 1024);
1689 	sc->io_throttle_low = (sc->facts.io_throttle_low * 2 * 1024);
1690 
1691 	fwver = &sc->facts.fw_ver;
1692 	snprintf(sc->fw_version, sizeof(sc->fw_version),
1693 	    "%d.%d.%d.%d.%05d-%05d",
1694 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
1695 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
1696 
1697 	mpi3mr_dprint(sc, MPI3MR_INFO, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),"
1698             "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
1699 	    sc->facts.ioc_num, sc->facts.max_op_req_q,
1700 	    sc->facts.max_op_reply_q, sc->facts.max_devhandle,
1701             sc->facts.max_reqs, sc->facts.min_devhandle,
1702             sc->facts.max_pds, sc->facts.max_msix_vectors,
1703             sc->facts.max_perids);
1704         mpi3mr_dprint(sc, MPI3MR_INFO, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x\n",
1705             sc->facts.sge_mod_mask, sc->facts.sge_mod_value,
1706             sc->facts.sge_mod_shift);
1707 	mpi3mr_dprint(sc, MPI3MR_INFO,
1708 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d), io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
1709 	    sc->facts.max_dev_per_tg, sc->facts.max_io_throttle_group,
1710 	    sc->facts.io_throttle_data_length * 4,
1711 	    sc->facts.io_throttle_high, sc->facts.io_throttle_low);
1712 
1713 	sc->max_host_ios = sc->facts.max_reqs -
1714 	    (MPI3MR_INTERNALCMDS_RESVD + 1);
1715 
1716 	/*
1717 	 * Set the DMA mask for the card.  dma_mask is the number of bits that
1718 	 * can have bits set in them.  Translate this into bus_dma loaddr args.
1719 	 * Add sanity for more bits than address space or other overflow
1720 	 * situations.
1721 	 */
1722 	if (sc->facts.dma_mask == 0 ||
1723 	    (sc->facts.dma_mask >= sizeof(bus_addr_t) * 8))
1724 		sc->dma_loaddr = BUS_SPACE_MAXADDR;
1725 	else
1726 		sc->dma_loaddr = ~((1ull << sc->facts.dma_mask) - 1);
1727 	mpi3mr_dprint(sc, MPI3MR_INFO,
1728 	    "dma_mask bits: %d loaddr 0x%jx\n",
1729 	    sc->facts.dma_mask, sc->dma_loaddr);
1730 
1731 	return retval;
1732 }
1733 
mpi3mr_setup_reply_free_queues(struct mpi3mr_softc * sc)1734 static inline void mpi3mr_setup_reply_free_queues(struct mpi3mr_softc *sc)
1735 {
1736 	int i;
1737 	bus_addr_t phys_addr;
1738 
1739 	/* initialize Reply buffer Queue */
1740 	for (i = 0, phys_addr = sc->reply_buf_phys;
1741 	    i < sc->num_reply_bufs; i++, phys_addr += sc->reply_sz)
1742 		sc->reply_free_q[i] = phys_addr;
1743 	sc->reply_free_q[i] = (0);
1744 
1745 	/* initialize Sense Buffer Queue */
1746 	for (i = 0, phys_addr = sc->sense_buf_phys;
1747 	    i < sc->num_sense_bufs; i++, phys_addr += MPI3MR_SENSEBUF_SZ)
1748 		sc->sense_buf_q[i] = phys_addr;
1749 	sc->sense_buf_q[i] = (0);
1750 
1751 }
1752 
mpi3mr_reply_dma_alloc(struct mpi3mr_softc * sc)1753 static int mpi3mr_reply_dma_alloc(struct mpi3mr_softc *sc)
1754 {
1755 	U32 sz;
1756 
1757 	sc->num_reply_bufs = sc->facts.max_reqs + MPI3MR_NUM_EVTREPLIES;
1758 	sc->reply_free_q_sz = sc->num_reply_bufs + 1;
1759 	sc->num_sense_bufs = sc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
1760 	sc->sense_buf_q_sz = sc->num_sense_bufs + 1;
1761 
1762 	sz = sc->num_reply_bufs * sc->reply_sz;
1763 
1764 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
1765 				16, 0,			/* algnmnt, boundary */
1766 				sc->dma_loaddr,		/* lowaddr */
1767 				BUS_SPACE_MAXADDR,	/* highaddr */
1768 				NULL, NULL,		/* filter, filterarg */
1769                                 sz,			/* maxsize */
1770                                 1,			/* nsegments */
1771                                 sz,			/* maxsegsize */
1772                                 0,			/* flags */
1773                                 NULL, NULL,		/* lockfunc, lockarg */
1774                                 &sc->reply_buf_tag)) {
1775 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
1776 		return (ENOMEM);
1777         }
1778 
1779 	if (bus_dmamem_alloc(sc->reply_buf_tag, (void **)&sc->reply_buf,
1780 	    BUS_DMA_NOWAIT, &sc->reply_buf_dmamap)) {
1781 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1782 			__func__, __LINE__);
1783 		return (ENOMEM);
1784         }
1785 
1786 	bzero(sc->reply_buf, sz);
1787         bus_dmamap_load(sc->reply_buf_tag, sc->reply_buf_dmamap, sc->reply_buf, sz,
1788 	    mpi3mr_memaddr_cb, &sc->reply_buf_phys, BUS_DMA_NOWAIT);
1789 
1790 	sc->reply_buf_dma_min_address = sc->reply_buf_phys;
1791 	sc->reply_buf_dma_max_address = sc->reply_buf_phys + sz;
1792 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply buf (0x%p): depth(%d), frame_size(%d), "
1793 	    "pool_size(%d kB), reply_buf_dma(0x%llx)\n",
1794 	    sc->reply_buf, sc->num_reply_bufs, sc->reply_sz,
1795 	    (sz / 1024), (unsigned long long)sc->reply_buf_phys);
1796 
1797 	/* reply free queue, 8 byte align */
1798 	sz = sc->reply_free_q_sz * 8;
1799 
1800         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1801 				8, 0,			/* algnmnt, boundary */
1802 				sc->dma_loaddr,		/* lowaddr */
1803 				BUS_SPACE_MAXADDR,	/* highaddr */
1804 				NULL, NULL,		/* filter, filterarg */
1805                                 sz,			/* maxsize */
1806                                 1,			/* nsegments */
1807                                 sz,			/* maxsegsize */
1808                                 0,			/* flags */
1809                                 NULL, NULL,		/* lockfunc, lockarg */
1810                                 &sc->reply_free_q_tag)) {
1811 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate reply free queue DMA tag\n");
1812 		return (ENOMEM);
1813         }
1814 
1815         if (bus_dmamem_alloc(sc->reply_free_q_tag, (void **)&sc->reply_free_q,
1816 	    BUS_DMA_NOWAIT, &sc->reply_free_q_dmamap)) {
1817 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1818 			__func__, __LINE__);
1819 		return (ENOMEM);
1820         }
1821 
1822 	bzero(sc->reply_free_q, sz);
1823         bus_dmamap_load(sc->reply_free_q_tag, sc->reply_free_q_dmamap, sc->reply_free_q, sz,
1824 	    mpi3mr_memaddr_cb, &sc->reply_free_q_phys, BUS_DMA_NOWAIT);
1825 
1826 	mpi3mr_dprint(sc, MPI3MR_XINFO, "reply_free_q (0x%p): depth(%d), frame_size(%d), "
1827 	    "pool_size(%d kB), reply_free_q_dma(0x%llx)\n",
1828 	    sc->reply_free_q, sc->reply_free_q_sz, 8, (sz / 1024),
1829 	    (unsigned long long)sc->reply_free_q_phys);
1830 
1831 	/* sense buffer pool,  4 byte align */
1832 	sz = sc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
1833 
1834         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1835 				4, 0,			/* algnmnt, boundary */
1836 				sc->dma_loaddr,		/* lowaddr */
1837 				BUS_SPACE_MAXADDR,	/* highaddr */
1838 				NULL, NULL,		/* filter, filterarg */
1839                                 sz,			/* maxsize */
1840                                 1,			/* nsegments */
1841                                 sz,			/* maxsegsize */
1842                                 0,			/* flags */
1843                                 NULL, NULL,		/* lockfunc, lockarg */
1844                                 &sc->sense_buf_tag)) {
1845 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer DMA tag\n");
1846 		return (ENOMEM);
1847         }
1848 
1849 	if (bus_dmamem_alloc(sc->sense_buf_tag, (void **)&sc->sense_buf,
1850 	    BUS_DMA_NOWAIT, &sc->sense_buf_dmamap)) {
1851 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1852 			__func__, __LINE__);
1853 		return (ENOMEM);
1854         }
1855 
1856 	bzero(sc->sense_buf, sz);
1857         bus_dmamap_load(sc->sense_buf_tag, sc->sense_buf_dmamap, sc->sense_buf, sz,
1858 	    mpi3mr_memaddr_cb, &sc->sense_buf_phys, BUS_DMA_NOWAIT);
1859 
1860 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf (0x%p): depth(%d), frame_size(%d), "
1861 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1862 	    sc->sense_buf, sc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
1863 	    (sz / 1024), (unsigned long long)sc->sense_buf_phys);
1864 
1865 	/* sense buffer queue, 8 byte align */
1866 	sz = sc->sense_buf_q_sz * 8;
1867 
1868         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
1869 				8, 0,			/* algnmnt, boundary */
1870 				sc->dma_loaddr,		/* lowaddr */
1871 				BUS_SPACE_MAXADDR,	/* highaddr */
1872 				NULL, NULL,		/* filter, filterarg */
1873                                 sz,			/* maxsize */
1874                                 1,			/* nsegments */
1875                                 sz,			/* maxsegsize */
1876                                 0,			/* flags */
1877                                 NULL, NULL,		/* lockfunc, lockarg */
1878                                 &sc->sense_buf_q_tag)) {
1879 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Sense buffer Queue DMA tag\n");
1880 		return (ENOMEM);
1881         }
1882 
1883 	if (bus_dmamem_alloc(sc->sense_buf_q_tag, (void **)&sc->sense_buf_q,
1884 	    BUS_DMA_NOWAIT, &sc->sense_buf_q_dmamap)) {
1885 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
1886 			__func__, __LINE__);
1887 		return (ENOMEM);
1888         }
1889 
1890 	bzero(sc->sense_buf_q, sz);
1891         bus_dmamap_load(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap, sc->sense_buf_q, sz,
1892 	    mpi3mr_memaddr_cb, &sc->sense_buf_q_phys, BUS_DMA_NOWAIT);
1893 
1894 	mpi3mr_dprint(sc, MPI3MR_XINFO, "sense_buf_q (0x%p): depth(%d), frame_size(%d), "
1895 	    "pool_size(%d kB), sense_dma(0x%llx)\n",
1896 	    sc->sense_buf_q, sc->sense_buf_q_sz, 8, (sz / 1024),
1897 	    (unsigned long long)sc->sense_buf_q_phys);
1898 
1899 	return 0;
1900 }
1901 
mpi3mr_reply_alloc(struct mpi3mr_softc * sc)1902 static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
1903 {
1904 	int retval = 0;
1905 	U32 i;
1906 
1907 	if (sc->init_cmds.reply)
1908 		goto post_reply_sbuf;
1909 
1910 	sc->init_cmds.reply = malloc(sc->reply_sz,
1911 		M_MPI3MR, M_NOWAIT | M_ZERO);
1912 
1913 	if (!sc->init_cmds.reply) {
1914 		printf(IOCNAME "Cannot allocate memory for init_cmds.reply\n",
1915 		    sc->name);
1916 		goto out_failed;
1917 	}
1918 
1919 	sc->cfg_cmds.reply = malloc(sc->reply_sz,
1920 		M_MPI3MR, M_NOWAIT | M_ZERO);
1921 
1922 	if (!sc->cfg_cmds.reply) {
1923 		printf(IOCNAME "Cannot allocate memory for cfg_cmds.reply\n",
1924 		    sc->name);
1925 		goto out_failed;
1926 	}
1927 
1928 	sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1929 	if (!sc->ioctl_cmds.reply) {
1930 		printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
1931 		    sc->name);
1932 		goto out_failed;
1933 	}
1934 
1935 	sc->host_tm_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
1936 	if (!sc->host_tm_cmds.reply) {
1937 		printf(IOCNAME "Cannot allocate memory for host_tm.reply\n",
1938 		    sc->name);
1939 		goto out_failed;
1940 	}
1941 	for (i=0; i<MPI3MR_NUM_DEVRMCMD; i++) {
1942 		sc->dev_rmhs_cmds[i].reply = malloc(sc->reply_sz,
1943 		    M_MPI3MR, M_NOWAIT | M_ZERO);
1944 		if (!sc->dev_rmhs_cmds[i].reply) {
1945 			printf(IOCNAME "Cannot allocate memory for"
1946 			    " dev_rmhs_cmd[%d].reply\n",
1947 			    sc->name, i);
1948 			goto out_failed;
1949 		}
1950 	}
1951 
1952 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
1953 		sc->evtack_cmds[i].reply = malloc(sc->reply_sz,
1954 			M_MPI3MR, M_NOWAIT | M_ZERO);
1955 		if (!sc->evtack_cmds[i].reply)
1956 			goto out_failed;
1957 	}
1958 
1959 	sc->dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
1960 
1961 	sc->removepend_bitmap = malloc(sc->dev_handle_bitmap_sz,
1962 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1963 	if (!sc->removepend_bitmap) {
1964 		printf(IOCNAME "Cannot alloc memory for remove pend bitmap\n",
1965 		    sc->name);
1966 		goto out_failed;
1967 	}
1968 
1969 	sc->devrem_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_DEVRMCMD, 8);
1970 	sc->devrem_bitmap = malloc(sc->devrem_bitmap_sz,
1971 	    M_MPI3MR, M_NOWAIT | M_ZERO);
1972 	if (!sc->devrem_bitmap) {
1973 		printf(IOCNAME "Cannot alloc memory for dev remove bitmap\n",
1974 		    sc->name);
1975 		goto out_failed;
1976 	}
1977 
1978 	sc->evtack_cmds_bitmap_sz = MPI3MR_DIV_ROUND_UP(MPI3MR_NUM_EVTACKCMD, 8);
1979 
1980 	sc->evtack_cmds_bitmap = malloc(sc->evtack_cmds_bitmap_sz,
1981 		M_MPI3MR, M_NOWAIT | M_ZERO);
1982 	if (!sc->evtack_cmds_bitmap)
1983 		goto out_failed;
1984 
1985 	if (mpi3mr_reply_dma_alloc(sc)) {
1986 		printf(IOCNAME "func:%s line:%d DMA memory allocation failed\n",
1987 		    sc->name, __func__, __LINE__);
1988 		goto out_failed;
1989 	}
1990 
1991 post_reply_sbuf:
1992 	mpi3mr_setup_reply_free_queues(sc);
1993 	return retval;
1994 out_failed:
1995 	mpi3mr_cleanup_interrupts(sc);
1996 	mpi3mr_free_mem(sc);
1997 	retval = -1;
1998 	return retval;
1999 }
2000 
2001 static void
mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc * sc)2002 mpi3mr_print_fw_pkg_ver(struct mpi3mr_softc *sc)
2003 {
2004 	int retval = 0;
2005 	void *fw_pkg_ver = NULL;
2006 	bus_dma_tag_t fw_pkg_ver_tag;
2007 	bus_dmamap_t fw_pkg_ver_map;
2008 	bus_addr_t fw_pkg_ver_dma;
2009 	Mpi3CIUploadRequest_t ci_upload;
2010 	Mpi3ComponentImageHeader_t *ci_header;
2011 	U32 fw_pkg_ver_len = sizeof(*ci_header);
2012 	U8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2013 
2014 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2015 				4, 0,			/* algnmnt, boundary */
2016 				sc->dma_loaddr,		/* lowaddr */
2017 				BUS_SPACE_MAXADDR,	/* highaddr */
2018 				NULL, NULL,		/* filter, filterarg */
2019 				fw_pkg_ver_len,		/* maxsize */
2020 				1,			/* nsegments */
2021 				fw_pkg_ver_len,		/* maxsegsize */
2022 				0,			/* flags */
2023 				NULL, NULL,		/* lockfunc, lockarg */
2024 				&fw_pkg_ver_tag)) {
2025 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate fw package version request DMA tag\n");
2026 		return;
2027 	}
2028 
2029 	if (bus_dmamem_alloc(fw_pkg_ver_tag, (void **)&fw_pkg_ver, BUS_DMA_NOWAIT, &fw_pkg_ver_map)) {
2030 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d fw package version DMA mem alloc failed\n",
2031 			      __func__, __LINE__);
2032 		return;
2033 	}
2034 
2035 	bzero(fw_pkg_ver, fw_pkg_ver_len);
2036 
2037 	bus_dmamap_load(fw_pkg_ver_tag, fw_pkg_ver_map, fw_pkg_ver, fw_pkg_ver_len,
2038 	    mpi3mr_memaddr_cb, &fw_pkg_ver_dma, BUS_DMA_NOWAIT);
2039 
2040 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d fw package version phys addr= %#016jx size= %d\n",
2041 		      __func__, __LINE__, (uintmax_t)fw_pkg_ver_dma, fw_pkg_ver_len);
2042 
2043 	if (!fw_pkg_ver) {
2044 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Memory alloc for fw package version failed\n");
2045 		goto out;
2046 	}
2047 
2048 	memset(&ci_upload, 0, sizeof(ci_upload));
2049 	mtx_lock(&sc->init_cmds.completion.lock);
2050 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2051 		mpi3mr_dprint(sc, MPI3MR_INFO,"Issue CI Header Upload: command is in use\n");
2052 		mtx_unlock(&sc->init_cmds.completion.lock);
2053 		goto out;
2054 	}
2055 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2056 	sc->init_cmds.is_waiting = 1;
2057 	sc->init_cmds.callback = NULL;
2058 	ci_upload.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2059 	ci_upload.Function = MPI3_FUNCTION_CI_UPLOAD;
2060 	ci_upload.MsgFlags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2061 	ci_upload.ImageOffset = MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET;
2062 	ci_upload.SegmentSize = MPI3_IMAGE_HEADER_SIZE;
2063 
2064 	mpi3mr_add_sg_single(&ci_upload.SGL, sgl_flags, fw_pkg_ver_len,
2065 	    fw_pkg_ver_dma);
2066 
2067 	init_completion(&sc->init_cmds.completion);
2068 	if ((retval = mpi3mr_submit_admin_cmd(sc, &ci_upload, sizeof(ci_upload)))) {
2069 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: Admin Post failed\n");
2070 		goto out_unlock;
2071 	}
2072 	wait_for_completion_timeout(&sc->init_cmds.completion,
2073 		(MPI3MR_INTADMCMD_TIMEOUT));
2074 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2075 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue CI Header Upload: command timed out\n");
2076 		sc->init_cmds.is_waiting = 0;
2077 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
2078 			mpi3mr_check_rh_fault_ioc(sc,
2079 				MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2080 		goto out_unlock;
2081 	}
2082 	if ((GET_IOC_STATUS(sc->init_cmds.ioc_status)) != MPI3_IOCSTATUS_SUCCESS) {
2083 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2084 			      "Issue CI Header Upload: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
2085 			      GET_IOC_STATUS(sc->init_cmds.ioc_status), sc->init_cmds.ioc_loginfo);
2086 		goto out_unlock;
2087 	}
2088 
2089 	ci_header = (Mpi3ComponentImageHeader_t *) fw_pkg_ver;
2090 	mpi3mr_dprint(sc, MPI3MR_XINFO,
2091 		      "Issue CI Header Upload:EnvVariableOffset(0x%x) \
2092 		      HeaderSize(0x%x) Signature1(0x%x)\n",
2093 		      ci_header->EnvironmentVariableOffset,
2094 		      ci_header->HeaderSize,
2095 		      ci_header->Signature1);
2096 	mpi3mr_dprint(sc, MPI3MR_INFO, "FW Package Version: %02d.%02d.%02d.%02d\n",
2097 		      ci_header->ComponentImageVersion.GenMajor,
2098 		      ci_header->ComponentImageVersion.GenMinor,
2099 		      ci_header->ComponentImageVersion.PhaseMajor,
2100 		      ci_header->ComponentImageVersion.PhaseMinor);
2101 out_unlock:
2102 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2103 	mtx_unlock(&sc->init_cmds.completion.lock);
2104 
2105 out:
2106 	if (fw_pkg_ver_dma != 0)
2107 		bus_dmamap_unload(fw_pkg_ver_tag, fw_pkg_ver_map);
2108 	if (fw_pkg_ver)
2109 		bus_dmamem_free(fw_pkg_ver_tag, fw_pkg_ver, fw_pkg_ver_map);
2110 	if (fw_pkg_ver_tag)
2111 		bus_dma_tag_destroy(fw_pkg_ver_tag);
2112 
2113 }
2114 
2115 /**
2116  * mpi3mr_issue_iocinit - Send IOC Init
2117  * @sc: Adapter instance reference
2118  *
2119  * Issue IOC Init MPI request through admin queue and wait for
2120  * the completion of it or time out.
2121  *
2122  * Return: 0 on success, non-zero on failures.
2123  */
mpi3mr_issue_iocinit(struct mpi3mr_softc * sc)2124 static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
2125 {
2126 	Mpi3IOCInitRequest_t iocinit_req;
2127 	Mpi3DriverInfoLayout_t *drvr_info = NULL;
2128 	bus_dma_tag_t drvr_info_tag;
2129 	bus_dmamap_t drvr_info_map;
2130 	bus_addr_t drvr_info_phys;
2131 	U32 drvr_info_len = sizeof(*drvr_info);
2132 	int retval = 0;
2133 	struct timeval now;
2134 	uint64_t time_in_msec;
2135 
2136 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2137 				4, 0,			/* algnmnt, boundary */
2138 				sc->dma_loaddr,		/* lowaddr */
2139 				BUS_SPACE_MAXADDR,	/* highaddr */
2140 				NULL, NULL,		/* filter, filterarg */
2141                                 drvr_info_len,		/* maxsize */
2142                                 1,			/* nsegments */
2143                                 drvr_info_len,		/* maxsegsize */
2144                                 0,			/* flags */
2145                                 NULL, NULL,		/* lockfunc, lockarg */
2146                                 &drvr_info_tag)) {
2147 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
2148 		return (ENOMEM);
2149         }
2150 
2151 	if (bus_dmamem_alloc(drvr_info_tag, (void **)&drvr_info,
2152 	    BUS_DMA_NOWAIT, &drvr_info_map)) {
2153 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d Data  DMA mem alloc failed\n",
2154 			__func__, __LINE__);
2155 		return (ENOMEM);
2156         }
2157 
2158 	bzero(drvr_info, drvr_info_len);
2159         bus_dmamap_load(drvr_info_tag, drvr_info_map, drvr_info, drvr_info_len,
2160 	    mpi3mr_memaddr_cb, &drvr_info_phys, BUS_DMA_NOWAIT);
2161 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d IOCfacts drvr_info phys addr= %#016jx size= %d\n",
2162 	    __func__, __LINE__, (uintmax_t)drvr_info_phys, drvr_info_len);
2163 
2164 	if (!drvr_info)
2165 	{
2166 		retval = -1;
2167 		printf(IOCNAME "Memory alloc for Driver Info failed\n",
2168 		    sc->name);
2169 		goto out;
2170 	}
2171 	drvr_info->InformationLength = (drvr_info_len);
2172 	strcpy(drvr_info->DriverSignature, "Broadcom");
2173 	strcpy(drvr_info->OsName, "FreeBSD");
2174 	strcpy(drvr_info->OsVersion, fmt_os_ver);
2175 	strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
2176 	strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
2177 	strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
2178 	drvr_info->DriverCapabilities = MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL;
2179 	memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
2180 
2181 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2182 	mtx_lock(&sc->init_cmds.completion.lock);
2183 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2184 		retval = -1;
2185 		printf(IOCNAME "Issue IOCInit: Init command is in use\n",
2186 		    sc->name);
2187 		mtx_unlock(&sc->init_cmds.completion.lock);
2188 		goto out;
2189 	}
2190 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2191 	sc->init_cmds.is_waiting = 1;
2192 	sc->init_cmds.callback = NULL;
2193         iocinit_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
2194         iocinit_req.Function = MPI3_FUNCTION_IOC_INIT;
2195         iocinit_req.MPIVersion.Struct.Dev = MPI3_VERSION_DEV;
2196         iocinit_req.MPIVersion.Struct.Unit = MPI3_VERSION_UNIT;
2197         iocinit_req.MPIVersion.Struct.Major = MPI3_VERSION_MAJOR;
2198         iocinit_req.MPIVersion.Struct.Minor = MPI3_VERSION_MINOR;
2199         iocinit_req.WhoInit = MPI3_WHOINIT_HOST_DRIVER;
2200         iocinit_req.ReplyFreeQueueDepth = sc->reply_free_q_sz;
2201         iocinit_req.ReplyFreeQueueAddress =
2202                 sc->reply_free_q_phys;
2203         iocinit_req.SenseBufferLength = MPI3MR_SENSEBUF_SZ;
2204         iocinit_req.SenseBufferFreeQueueDepth =
2205                 sc->sense_buf_q_sz;
2206         iocinit_req.SenseBufferFreeQueueAddress =
2207                 sc->sense_buf_q_phys;
2208         iocinit_req.DriverInformationAddress = drvr_info_phys;
2209 
2210 	getmicrotime(&now);
2211 	time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
2212 	iocinit_req.TimeStamp = htole64(time_in_msec);
2213 
2214 	iocinit_req.MsgFlags |= MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
2215 
2216 	init_completion(&sc->init_cmds.completion);
2217 	retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
2218 	    sizeof(iocinit_req));
2219 
2220 	if (retval) {
2221 		printf(IOCNAME "Issue IOCInit: Admin Post failed\n",
2222 		    sc->name);
2223 		goto out_unlock;
2224 	}
2225 
2226 	wait_for_completion_timeout(&sc->init_cmds.completion,
2227 	    (MPI3MR_INTADMCMD_TIMEOUT));
2228 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2229 		printf(IOCNAME "Issue IOCInit: command timed out\n",
2230 		    sc->name);
2231 		mpi3mr_check_rh_fault_ioc(sc,
2232 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2233 		sc->unrecoverable = 1;
2234 		retval = -1;
2235 		goto out_unlock;
2236 	}
2237 
2238 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2239 	     != MPI3_IOCSTATUS_SUCCESS ) {
2240 		printf(IOCNAME "Issue IOCInit: Failed IOCStatus(0x%04x) "
2241 		    " Loginfo(0x%08x) \n" , sc->name,
2242 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2243 		    sc->init_cmds.ioc_loginfo);
2244 		retval = -1;
2245 		goto out_unlock;
2246 	}
2247 
2248 out_unlock:
2249 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2250 	mtx_unlock(&sc->init_cmds.completion.lock);
2251 
2252 out:
2253 	if (drvr_info_phys != 0)
2254 		bus_dmamap_unload(drvr_info_tag, drvr_info_map);
2255 	if (drvr_info != NULL)
2256 		bus_dmamem_free(drvr_info_tag, drvr_info, drvr_info_map);
2257 	if (drvr_info_tag != NULL)
2258 		bus_dma_tag_destroy(drvr_info_tag);
2259 	return retval;
2260 }
2261 
2262 static void
mpi3mr_display_ioc_info(struct mpi3mr_softc * sc)2263 mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
2264 {
2265         int i = 0;
2266         char personality[16];
2267 
2268         switch (sc->facts.personality) {
2269         case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
2270                 strcpy(personality, "Enhanced HBA");
2271                 break;
2272         case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
2273                 strcpy(personality, "RAID");
2274                 break;
2275         default:
2276                 strcpy(personality, "Unknown");
2277                 break;
2278         }
2279 
2280 	mpi3mr_dprint(sc, MPI3MR_INFO, "Current Personality: %s\n", personality);
2281 
2282 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s\n", sc->fw_version);
2283 
2284         mpi3mr_dprint(sc, MPI3MR_INFO, "Protocol=(");
2285 
2286         if (sc->facts.protocol_flags &
2287             MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2288                 printf("Initiator");
2289                 i++;
2290         }
2291 
2292         if (sc->facts.protocol_flags &
2293             MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2294                 printf("%sTarget", i ? "," : "");
2295                 i++;
2296         }
2297 
2298         if (sc->facts.protocol_flags &
2299             MPI3_IOCFACTS_PROTOCOL_NVME) {
2300                 printf("%sNVMe attachment", i ? "," : "");
2301                 i++;
2302         }
2303         i = 0;
2304         printf("), ");
2305         printf("Capabilities=(");
2306 
2307         if (sc->facts.ioc_capabilities &
2308 	    MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED) {
2309                 printf("RAID");
2310                 i++;
2311         }
2312 
2313         printf(")\n");
2314 }
2315 
2316 /**
2317  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2318  * @sc: Adapter instance reference
2319  * @event: MPI event ID
2320  *
2321  * Un mask the specific event by resetting the event_mask
2322  * bitmap.
2323  *
2324  * Return: None.
2325  */
mpi3mr_unmask_events(struct mpi3mr_softc * sc,U16 event)2326 static void mpi3mr_unmask_events(struct mpi3mr_softc *sc, U16 event)
2327 {
2328 	U32 desired_event;
2329 
2330 	if (event >= 128)
2331 		return;
2332 
2333 	desired_event = (1 << (event % 32));
2334 
2335 	if (event < 32)
2336 		sc->event_masks[0] &= ~desired_event;
2337 	else if (event < 64)
2338 		sc->event_masks[1] &= ~desired_event;
2339 	else if (event < 96)
2340 		sc->event_masks[2] &= ~desired_event;
2341 	else if (event < 128)
2342 		sc->event_masks[3] &= ~desired_event;
2343 }
2344 
mpi3mr_set_events_mask(struct mpi3mr_softc * sc)2345 static void mpi3mr_set_events_mask(struct mpi3mr_softc *sc)
2346 {
2347 	int i;
2348 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2349 		sc->event_masks[i] = -1;
2350 
2351         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_ADDED);
2352         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_INFO_CHANGED);
2353         mpi3mr_unmask_events(sc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
2354 
2355         mpi3mr_unmask_events(sc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
2356 
2357         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
2358         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DISCOVERY);
2359         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
2360         mpi3mr_unmask_events(sc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
2361 
2362         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
2363         mpi3mr_unmask_events(sc, MPI3_EVENT_PCIE_ENUMERATION);
2364 
2365         mpi3mr_unmask_events(sc, MPI3_EVENT_PREPARE_FOR_RESET);
2366         mpi3mr_unmask_events(sc, MPI3_EVENT_CABLE_MGMT);
2367         mpi3mr_unmask_events(sc, MPI3_EVENT_ENERGY_PACK_CHANGE);
2368 }
2369 
2370 /**
2371  * mpi3mr_issue_event_notification - Send event notification
2372  * @sc: Adapter instance reference
2373  *
2374  * Issue event notification MPI request through admin queue and
2375  * wait for the completion of it or time out.
2376  *
2377  * Return: 0 on success, non-zero on failures.
2378  */
mpi3mr_issue_event_notification(struct mpi3mr_softc * sc)2379 int mpi3mr_issue_event_notification(struct mpi3mr_softc *sc)
2380 {
2381 	Mpi3EventNotificationRequest_t evtnotify_req;
2382 	int retval = 0;
2383 	U8 i;
2384 
2385 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2386 	mtx_lock(&sc->init_cmds.completion.lock);
2387 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2388 		retval = -1;
2389 		printf(IOCNAME "Issue EvtNotify: Init command is in use\n",
2390 		    sc->name);
2391 		mtx_unlock(&sc->init_cmds.completion.lock);
2392 		goto out;
2393 	}
2394 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2395 	sc->init_cmds.is_waiting = 1;
2396 	sc->init_cmds.callback = NULL;
2397 	evtnotify_req.HostTag = (MPI3MR_HOSTTAG_INITCMDS);
2398 	evtnotify_req.Function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2399 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2400 		evtnotify_req.EventMasks[i] =
2401 		    (sc->event_masks[i]);
2402 	init_completion(&sc->init_cmds.completion);
2403 	retval = mpi3mr_submit_admin_cmd(sc, &evtnotify_req,
2404 	    sizeof(evtnotify_req));
2405 	if (retval) {
2406 		printf(IOCNAME "Issue EvtNotify: Admin Post failed\n",
2407 		    sc->name);
2408 		goto out_unlock;
2409 	}
2410 
2411 	poll_for_command_completion(sc,
2412 				    &sc->init_cmds,
2413 				    (MPI3MR_INTADMCMD_TIMEOUT));
2414 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2415 		printf(IOCNAME "Issue EvtNotify: command timed out\n",
2416 		    sc->name);
2417 		mpi3mr_check_rh_fault_ioc(sc,
2418 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2419 		retval = -1;
2420 		goto out_unlock;
2421 	}
2422 
2423 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2424 	     != MPI3_IOCSTATUS_SUCCESS ) {
2425 		printf(IOCNAME "Issue EvtNotify: Failed IOCStatus(0x%04x) "
2426 		    " Loginfo(0x%08x) \n" , sc->name,
2427 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2428 		    sc->init_cmds.ioc_loginfo);
2429 		retval = -1;
2430 		goto out_unlock;
2431 	}
2432 
2433 out_unlock:
2434 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2435 	mtx_unlock(&sc->init_cmds.completion.lock);
2436 
2437 out:
2438 	return retval;
2439 }
2440 
2441 int
mpi3mr_register_events(struct mpi3mr_softc * sc)2442 mpi3mr_register_events(struct mpi3mr_softc *sc)
2443 {
2444 	int error;
2445 
2446 	mpi3mr_set_events_mask(sc);
2447 
2448 	error = mpi3mr_issue_event_notification(sc);
2449 
2450 	if (error) {
2451 		printf(IOCNAME "Failed to issue event notification %d\n",
2452 		    sc->name, error);
2453 	}
2454 
2455 	return error;
2456 }
2457 
2458 /**
2459  * mpi3mr_process_event_ack - Process event acknowledgment
2460  * @sc: Adapter instance reference
2461  * @event: MPI3 event ID
2462  * @event_ctx: Event context
2463  *
2464  * Send event acknowledgement through admin queue and wait for
2465  * it to complete.
2466  *
2467  * Return: 0 on success, non-zero on failures.
2468  */
mpi3mr_process_event_ack(struct mpi3mr_softc * sc,U8 event,U32 event_ctx)2469 int mpi3mr_process_event_ack(struct mpi3mr_softc *sc, U8 event,
2470 	U32 event_ctx)
2471 {
2472 	Mpi3EventAckRequest_t evtack_req;
2473 	int retval = 0;
2474 
2475 	memset(&evtack_req, 0, sizeof(evtack_req));
2476 	mtx_lock(&sc->init_cmds.completion.lock);
2477 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
2478 		retval = -1;
2479 		printf(IOCNAME "Issue EvtAck: Init command is in use\n",
2480 		    sc->name);
2481 		mtx_unlock(&sc->init_cmds.completion.lock);
2482 		goto out;
2483 	}
2484 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
2485 	sc->init_cmds.is_waiting = 1;
2486 	sc->init_cmds.callback = NULL;
2487 	evtack_req.HostTag = htole16(MPI3MR_HOSTTAG_INITCMDS);
2488 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
2489 	evtack_req.Event = event;
2490 	evtack_req.EventContext = htole32(event_ctx);
2491 
2492 	init_completion(&sc->init_cmds.completion);
2493 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
2494 	    sizeof(evtack_req));
2495 	if (retval) {
2496 		printf(IOCNAME "Issue EvtAck: Admin Post failed\n",
2497 		    sc->name);
2498 		goto out_unlock;
2499 	}
2500 
2501 	wait_for_completion_timeout(&sc->init_cmds.completion,
2502 	    (MPI3MR_INTADMCMD_TIMEOUT));
2503 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2504 		printf(IOCNAME "Issue EvtAck: command timed out\n",
2505 		    sc->name);
2506 		retval = -1;
2507 		goto out_unlock;
2508 	}
2509 
2510 	if ((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2511 	     != MPI3_IOCSTATUS_SUCCESS ) {
2512 		printf(IOCNAME "Issue EvtAck: Failed IOCStatus(0x%04x) "
2513 		    " Loginfo(0x%08x) \n" , sc->name,
2514 		    (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2515 		    sc->init_cmds.ioc_loginfo);
2516 		retval = -1;
2517 		goto out_unlock;
2518 	}
2519 
2520 out_unlock:
2521 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2522 	mtx_unlock(&sc->init_cmds.completion.lock);
2523 
2524 out:
2525 	return retval;
2526 }
2527 
2528 
mpi3mr_alloc_chain_bufs(struct mpi3mr_softc * sc)2529 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
2530 {
2531 	int retval = 0;
2532 	U32 sz, i;
2533 	U16 num_chains;
2534 
2535 	num_chains = sc->max_host_ios;
2536 
2537 	sc->chain_buf_count = num_chains;
2538 	sz = sizeof(struct mpi3mr_chain) * num_chains;
2539 
2540 	sc->chain_sgl_list = malloc(sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2541 
2542 	if (!sc->chain_sgl_list) {
2543 		printf(IOCNAME "Cannot allocate memory for chain SGL list\n",
2544 		    sc->name);
2545 		retval = -1;
2546 		goto out_failed;
2547 	}
2548 
2549 	if (sc->max_sgl_entries > sc->facts.max_data_length / PAGE_SIZE)
2550 		sc->max_sgl_entries = sc->facts.max_data_length / PAGE_SIZE;
2551 	sz = sc->max_sgl_entries * sizeof(Mpi3SGESimple_t);
2552 
2553         if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
2554 				4096, 0,		/* algnmnt, boundary */
2555 				sc->dma_loaddr,		/* lowaddr */
2556 				BUS_SPACE_MAXADDR,	/* highaddr */
2557 				NULL, NULL,		/* filter, filterarg */
2558                                 sz,			/* maxsize */
2559                                 1,			/* nsegments */
2560                                 sz,			/* maxsegsize */
2561                                 0,			/* flags */
2562                                 NULL, NULL,		/* lockfunc, lockarg */
2563                                 &sc->chain_sgl_list_tag)) {
2564 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate Chain buffer DMA tag\n");
2565 		return (ENOMEM);
2566         }
2567 
2568 	for (i = 0; i < num_chains; i++) {
2569 		if (bus_dmamem_alloc(sc->chain_sgl_list_tag, (void **)&sc->chain_sgl_list[i].buf,
2570 		    BUS_DMA_NOWAIT, &sc->chain_sgl_list[i].buf_dmamap)) {
2571 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Func: %s line: %d  DMA mem alloc failed\n",
2572 				__func__, __LINE__);
2573 			return (ENOMEM);
2574 		}
2575 
2576 		bzero(sc->chain_sgl_list[i].buf, sz);
2577 		bus_dmamap_load(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap, sc->chain_sgl_list[i].buf, sz,
2578 		    mpi3mr_memaddr_cb, &sc->chain_sgl_list[i].buf_phys, BUS_DMA_NOWAIT);
2579 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Func: %s line: %d phys addr= %#016jx size= %d\n",
2580 		    __func__, __LINE__, (uintmax_t)sc->chain_sgl_list[i].buf_phys, sz);
2581 	}
2582 
2583 	sc->chain_bitmap_sz = MPI3MR_DIV_ROUND_UP(num_chains, 8);
2584 
2585 	sc->chain_bitmap = malloc(sc->chain_bitmap_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2586 	if (!sc->chain_bitmap) {
2587 		mpi3mr_dprint(sc, MPI3MR_INFO, "Cannot alloc memory for chain bitmap\n");
2588 		retval = -1;
2589 		goto out_failed;
2590 	}
2591 	return retval;
2592 
2593 out_failed:
2594 	for (i = 0; i < num_chains; i++) {
2595 		if (sc->chain_sgl_list[i].buf_phys != 0)
2596 			bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
2597 		if (sc->chain_sgl_list[i].buf != NULL)
2598 			bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf, sc->chain_sgl_list[i].buf_dmamap);
2599 	}
2600 	if (sc->chain_sgl_list_tag != NULL)
2601 		bus_dma_tag_destroy(sc->chain_sgl_list_tag);
2602 	return retval;
2603 }
2604 
mpi3mr_pel_alloc(struct mpi3mr_softc * sc)2605 static int mpi3mr_pel_alloc(struct mpi3mr_softc *sc)
2606 {
2607 	int retval = 0;
2608 
2609 	if (!sc->pel_cmds.reply) {
2610 		sc->pel_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2611 		if (!sc->pel_cmds.reply) {
2612 			printf(IOCNAME "Cannot allocate memory for pel_cmds.reply\n",
2613 			    sc->name);
2614 			goto out_failed;
2615 		}
2616 	}
2617 
2618 	if (!sc->pel_abort_cmd.reply) {
2619 		sc->pel_abort_cmd.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
2620 		if (!sc->pel_abort_cmd.reply) {
2621 			printf(IOCNAME "Cannot allocate memory for pel_abort_cmd.reply\n",
2622 			    sc->name);
2623 			goto out_failed;
2624 		}
2625 	}
2626 
2627 	if (!sc->pel_seq_number) {
2628 		sc->pel_seq_number_sz = sizeof(Mpi3PELSeq_t);
2629 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,   /* parent */
2630 				 4, 0,                           /* alignment, boundary */
2631 				 sc->dma_loaddr,	         /* lowaddr */
2632 				 BUS_SPACE_MAXADDR,		 /* highaddr */
2633 				 NULL, NULL,                     /* filter, filterarg */
2634 				 sc->pel_seq_number_sz,		 /* maxsize */
2635 				 1,                              /* nsegments */
2636 				 sc->pel_seq_number_sz,          /* maxsegsize */
2637 				 0,                              /* flags */
2638 				 NULL, NULL,                     /* lockfunc, lockarg */
2639 				 &sc->pel_seq_num_dmatag)) {
2640 			 mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot create PEL seq number dma memory tag\n");
2641 			 retval = -ENOMEM;
2642 			 goto out_failed;
2643 		}
2644 
2645 		if (bus_dmamem_alloc(sc->pel_seq_num_dmatag, (void **)&sc->pel_seq_number,
2646 		    BUS_DMA_NOWAIT, &sc->pel_seq_num_dmamap)) {
2647 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate PEL seq number kernel buffer dma memory\n");
2648 			retval = -ENOMEM;
2649 			goto out_failed;
2650 		}
2651 
2652 		bzero(sc->pel_seq_number, sc->pel_seq_number_sz);
2653 
2654 		bus_dmamap_load(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap, sc->pel_seq_number,
2655 		    sc->pel_seq_number_sz, mpi3mr_memaddr_cb, &sc->pel_seq_number_dma, BUS_DMA_NOWAIT);
2656 
2657 		if (!sc->pel_seq_number) {
2658 			printf(IOCNAME "%s:%d Cannot load PEL seq number dma memory for size: %d\n", sc->name,
2659 				__func__, __LINE__, sc->pel_seq_number_sz);
2660 			retval = -ENOMEM;
2661 			goto out_failed;
2662 		}
2663 	}
2664 
2665 out_failed:
2666 	return retval;
2667 }
2668 
2669 /**
2670  * mpi3mr_validate_fw_update - validate IOCFacts post adapter reset
2671  * @sc: Adapter instance reference
2672  *
2673  * Return zero if the new IOCFacts is compatible with previous values
2674  * else return appropriate error
2675  */
2676 static int
mpi3mr_validate_fw_update(struct mpi3mr_softc * sc)2677 mpi3mr_validate_fw_update(struct mpi3mr_softc *sc)
2678 {
2679 	U16 dev_handle_bitmap_sz;
2680 	U8 *removepend_bitmap;
2681 
2682 	if (sc->facts.reply_sz > sc->reply_sz) {
2683 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2684 		    "Cannot increase reply size from %d to %d\n",
2685 		    sc->reply_sz, sc->reply_sz);
2686 		return -EPERM;
2687 	}
2688 
2689 	if (sc->num_io_throttle_group != sc->facts.max_io_throttle_group) {
2690 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2691 		    "max io throttle group doesn't match old(%d), new(%d)\n",
2692 		    sc->num_io_throttle_group,
2693 		    sc->facts.max_io_throttle_group);
2694 		return -EPERM;
2695 	}
2696 
2697 	if (sc->facts.max_op_reply_q < sc->num_queues) {
2698 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2699 		    "Cannot reduce number of operational reply queues from %d to %d\n",
2700 		    sc->num_queues,
2701 		    sc->facts.max_op_reply_q);
2702 		return -EPERM;
2703 	}
2704 
2705 	if (sc->facts.max_op_req_q < sc->num_queues) {
2706 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2707 		    "Cannot reduce number of operational request queues from %d to %d\n",
2708 		    sc->num_queues, sc->facts.max_op_req_q);
2709 		return -EPERM;
2710 	}
2711 
2712 	dev_handle_bitmap_sz = MPI3MR_DIV_ROUND_UP(sc->facts.max_devhandle, 8);
2713 
2714 	if (dev_handle_bitmap_sz > sc->dev_handle_bitmap_sz) {
2715 		removepend_bitmap = realloc(sc->removepend_bitmap,
2716 		    dev_handle_bitmap_sz, M_MPI3MR, M_NOWAIT);
2717 
2718 		if (!removepend_bitmap) {
2719 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2720 			    "failed to increase removepend_bitmap sz from: %d to %d\n",
2721 			    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2722 			return -ENOMEM;
2723 		}
2724 
2725 		memset(removepend_bitmap + sc->dev_handle_bitmap_sz, 0,
2726 		    dev_handle_bitmap_sz - sc->dev_handle_bitmap_sz);
2727 		sc->removepend_bitmap = removepend_bitmap;
2728 		mpi3mr_dprint(sc, MPI3MR_INFO,
2729 		    "increased dev_handle_bitmap_sz from %d to %d\n",
2730 		    sc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
2731 		sc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
2732 	}
2733 
2734 	return 0;
2735 }
2736 
2737 /*
2738  * mpi3mr_initialize_ioc - Controller initialization
2739  * @dev: pointer to device struct
2740  *
2741  * This function allocates the controller wide resources and brings
2742  * the controller to operational state
2743  *
2744  * Return: 0 on success and proper error codes on failure
2745  */
mpi3mr_initialize_ioc(struct mpi3mr_softc * sc,U8 init_type)2746 int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
2747 {
2748 	int retval = 0;
2749 	enum mpi3mr_iocstate ioc_state;
2750 	U64 ioc_info, start_ticks = 0;
2751 	U32 ioc_status, ioc_control, i, timeout;
2752 	Mpi3IOCFactsData_t facts_data;
2753 	char str[32];
2754 	U32 size;
2755 	U8 retry = 0;
2756 
2757 	sc->cpu_count = mp_ncpus;
2758 
2759 retry_init:
2760 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
2761 	ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
2762 	ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
2763 
2764 	mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
2765 	    "ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
2766 
2767 	/*The timeout value is in 2sec unit, changing it to seconds*/
2768 	sc->ready_timeout =
2769                 ((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
2770                     MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
2771 
2772 	ioc_state = mpi3mr_get_iocstate(sc);
2773 	mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s   IOC ready timeout: %d\n",
2774 	    mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
2775 
2776 	timeout = sc->ready_timeout * 10;
2777 	do {
2778 		ioc_state = mpi3mr_get_iocstate(sc);
2779 
2780 		if (ioc_state != MRIOC_STATE_BECOMING_READY &&
2781 		    ioc_state != MRIOC_STATE_RESET_REQUESTED)
2782 			break;
2783 
2784 		DELAY(1000 * 100);
2785 	} while (--timeout);
2786 
2787 	if (ioc_state == MRIOC_STATE_READY) {
2788                 retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
2789                 if (retval) {
2790                         mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to MU reset IOC, error 0x%x\n",
2791                                 retval);
2792                 }
2793                 ioc_state = mpi3mr_get_iocstate(sc);
2794         }
2795 
2796         if (ioc_state != MRIOC_STATE_RESET) {
2797 		if (ioc_state == MRIOC_STATE_FAULT) {
2798 			mpi3mr_print_fault_info(sc);
2799 
2800 			U32 fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
2801 						   MPI3_SYSIF_FAULT_CODE_MASK;
2802 			if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER) {
2803 				mpi3mr_dprint(sc, MPI3MR_INFO,
2804 					      "controller faulted due to insufficient power, try by connecting it in a different slot\n");
2805 				goto err;
2806 			}
2807 
2808 			U32 host_diagnostic;
2809 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2810 			do {
2811 				host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
2812 				if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2813 					break;
2814 				DELAY(100 * 1000);
2815 			} while (--timeout);
2816 		}
2817 		mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
2818 		retval = mpi3mr_issue_reset(sc,
2819 		     MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
2820 		     MPI3MR_RESET_FROM_BRINGUP);
2821 		if (retval) {
2822 			mpi3mr_dprint(sc, MPI3MR_ERROR,
2823 			    "%s :Failed to soft reset IOC, error 0x%d\n",
2824 			    __func__, retval);
2825 			goto err_retry;
2826 		}
2827 	}
2828 
2829 	ioc_state = mpi3mr_get_iocstate(sc);
2830 
2831         if (ioc_state != MRIOC_STATE_RESET) {
2832 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
2833 		goto err_retry;
2834         }
2835 
2836 	retval = mpi3mr_setup_admin_qpair(sc);
2837 	if (retval) {
2838 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
2839 		    retval);
2840 		if (retval == ENOMEM)
2841 			goto err;
2842 		goto err_retry;
2843 	}
2844 
2845 	retval = mpi3mr_bring_ioc_ready(sc, &start_ticks);
2846 	if (retval) {
2847 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n", retval);
2848 		if (retval == EAGAIN)
2849 			goto err_retry;
2850 		goto err;
2851 	}
2852 
2853 
2854 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2855 		retval = mpi3mr_alloc_interrupts(sc, 1);
2856 		if (retval) {
2857 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
2858 			    retval);
2859 			goto err;
2860 		}
2861 
2862 		retval = mpi3mr_setup_irqs(sc);
2863 		if (retval) {
2864 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
2865 			    retval);
2866 			goto err;
2867 		}
2868 	}
2869 
2870 	mpi3mr_enable_interrupts(sc);
2871 
2872 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2873 		mtx_init(&sc->mpi3mr_mtx, "SIM lock", NULL, MTX_DEF);
2874 		mtx_init(&sc->io_lock, "IO lock", NULL, MTX_DEF);
2875 		mtx_init(&sc->admin_req_lock, "Admin Request Queue lock", NULL, MTX_SPIN);
2876 		mtx_init(&sc->reply_free_q_lock, "Reply free Queue lock", NULL, MTX_SPIN);
2877 		mtx_init(&sc->sense_buf_q_lock, "Sense buffer Queue lock", NULL, MTX_SPIN);
2878 		mtx_init(&sc->chain_buf_lock, "Chain buffer lock", NULL, MTX_SPIN);
2879 		mtx_init(&sc->cmd_pool_lock, "Command pool lock", NULL, MTX_DEF);
2880 		mtx_init(&sc->fwevt_lock, "Firmware Event lock", NULL, MTX_DEF);
2881 		mtx_init(&sc->target_lock, "Target lock", NULL, MTX_SPIN);
2882 		mtx_init(&sc->reset_mutex, "Reset lock", NULL, MTX_DEF);
2883 
2884 		mtx_init(&sc->init_cmds.completion.lock, "Init commands lock", NULL, MTX_DEF);
2885 		sc->init_cmds.reply = NULL;
2886 		sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2887 		sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2888 		sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
2889 
2890 		mtx_init(&sc->cfg_cmds.completion.lock, "CFG commands lock", NULL, MTX_DEF);
2891 		sc->cfg_cmds.reply = NULL;
2892 		sc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
2893 		sc->cfg_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2894 		sc->cfg_cmds.host_tag = MPI3MR_HOSTTAG_CFGCMDS;
2895 
2896 		mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
2897 		sc->ioctl_cmds.reply = NULL;
2898 		sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
2899 		sc->ioctl_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2900 		sc->ioctl_cmds.host_tag = MPI3MR_HOSTTAG_IOCTLCMDS;
2901 
2902 		mtx_init(&sc->pel_abort_cmd.completion.lock, "PEL Abort command lock", NULL, MTX_DEF);
2903 		sc->pel_abort_cmd.reply = NULL;
2904 		sc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
2905 		sc->pel_abort_cmd.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2906 		sc->pel_abort_cmd.host_tag = MPI3MR_HOSTTAG_PELABORT;
2907 
2908 		mtx_init(&sc->host_tm_cmds.completion.lock, "TM commands lock", NULL, MTX_DEF);
2909 		sc->host_tm_cmds.reply = NULL;
2910 		sc->host_tm_cmds.state = MPI3MR_CMD_NOTUSED;
2911 		sc->host_tm_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2912 		sc->host_tm_cmds.host_tag = MPI3MR_HOSTTAG_TMS;
2913 
2914 		TAILQ_INIT(&sc->cmd_list_head);
2915 		TAILQ_INIT(&sc->event_list);
2916 		TAILQ_INIT(&sc->delayed_rmhs_list);
2917 		TAILQ_INIT(&sc->delayed_evtack_cmds_list);
2918 
2919 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2920 			snprintf(str, 32, "Dev REMHS commands lock[%d]", i);
2921 			mtx_init(&sc->dev_rmhs_cmds[i].completion.lock, str, NULL, MTX_DEF);
2922 			sc->dev_rmhs_cmds[i].reply = NULL;
2923 			sc->dev_rmhs_cmds[i].state = MPI3MR_CMD_NOTUSED;
2924 			sc->dev_rmhs_cmds[i].dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2925 			sc->dev_rmhs_cmds[i].host_tag = MPI3MR_HOSTTAG_DEVRMCMD_MIN
2926 							    + i;
2927 		}
2928 	}
2929 
2930 	retval = mpi3mr_issue_iocfacts(sc, &facts_data);
2931 	if (retval) {
2932 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, error: 0x%x\n",
2933 		    retval);
2934 		if (retval == ENOMEM)
2935 			goto err;
2936 		goto err_retry;
2937 	}
2938 
2939 	retval = mpi3mr_process_factsdata(sc, &facts_data);
2940 	if (retval) {
2941 		mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failed, error: 0x%x\n",
2942 		    retval);
2943 		goto err_retry;
2944 	}
2945 
2946 	sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
2947 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
2948 
2949 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
2950 		retval = mpi3mr_validate_fw_update(sc);
2951 		if (retval) {
2952 			if (retval == ENOMEM)
2953 				goto err;
2954 			goto err_retry;
2955 		}
2956 	} else {
2957 		sc->reply_sz = sc->facts.reply_sz;
2958 	}
2959 
2960 	mpi3mr_display_ioc_info(sc);
2961 
2962 	retval = mpi3mr_reply_alloc(sc);
2963 	if (retval) {
2964 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, error: 0x%x\n",
2965 		    retval);
2966 		goto err;
2967 	}
2968 
2969 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2970 		retval = mpi3mr_alloc_chain_bufs(sc);
2971 		if (retval) {
2972 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, error: 0x%x\n",
2973 				      retval);
2974 			goto err;
2975 		}
2976 	}
2977 
2978 	retval = mpi3mr_issue_iocinit(sc);
2979 	if (retval) {
2980 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, error: 0x%x\n",
2981 			      retval);
2982 		if (retval == ENOMEM)
2983 			goto err;
2984 		goto err_retry;
2985 	}
2986 
2987 	mpi3mr_print_fw_pkg_ver(sc);
2988 
2989 	sc->reply_free_q_host_index = sc->num_reply_bufs;
2990 	mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
2991 		sc->reply_free_q_host_index);
2992 
2993 	sc->sense_buf_q_host_index = sc->num_sense_bufs;
2994 
2995 	mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
2996 		sc->sense_buf_q_host_index);
2997 
2998 	if (init_type == MPI3MR_INIT_TYPE_INIT) {
2999 		retval = mpi3mr_alloc_interrupts(sc, 0);
3000 		if (retval) {
3001 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error: 0x%x\n",
3002 			    retval);
3003 			goto err;
3004 		}
3005 
3006 		retval = mpi3mr_setup_irqs(sc);
3007 		if (retval) {
3008 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error: 0x%x\n", retval);
3009 			goto err;
3010 		}
3011 
3012 		mpi3mr_enable_interrupts(sc);
3013 
3014 	} else
3015 		mpi3mr_enable_interrupts(sc);
3016 
3017 	retval = mpi3mr_create_op_queues(sc);
3018 	if (retval) {
3019 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
3020 		    retval);
3021 		if (retval == ENOMEM)
3022 			goto err;
3023 		goto err_retry;
3024 	}
3025 
3026 	if (!sc->throttle_groups && sc->num_io_throttle_group) {
3027 		size = sizeof(struct mpi3mr_throttle_group_info);
3028 		sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
3029 					  malloc(sc->num_io_throttle_group *
3030 					      size, M_MPI3MR, M_NOWAIT | M_ZERO);
3031 		if (!sc->throttle_groups) {
3032 			mpi3mr_dprint(sc, MPI3MR_ERROR, "throttle groups memory allocation failed\n");
3033 			goto err;
3034 		}
3035 	}
3036 
3037 	if (init_type == MPI3MR_INIT_TYPE_RESET) {
3038 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Re-register events\n");
3039 		retval = mpi3mr_register_events(sc);
3040 		if (retval) {
3041 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, error: 0x%x\n",
3042 			    retval);
3043 			goto err_retry;
3044 		}
3045 
3046 		mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
3047 		retval = mpi3mr_issue_port_enable(sc, 0);
3048 		if (retval) {
3049 			mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, error: 0x%x\n",
3050 			    retval);
3051 			goto err_retry;
3052 		}
3053 	}
3054 	retval = mpi3mr_pel_alloc(sc);
3055 	if (retval) {
3056 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, error: 0x%x\n",
3057 		    retval);
3058 		goto err;
3059 	}
3060 
3061 	if (mpi3mr_cfg_get_driver_pg1(sc) != 0)
3062 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to get the cfg driver page1\n");
3063 
3064 	return retval;
3065 
3066 err_retry:
3067 	if ((retry++ < 2) && (((ticks - start_ticks) / hz) < (sc->ready_timeout - 60))) {
3068 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Retrying controller initialization,"
3069 			      "retry_count: %d\n", retry);
3070 		goto retry_init;
3071 	}
3072 err:
3073 	retval = -1;
3074 	return retval;
3075 }
3076 
mpi3mr_port_enable_complete(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drvrcmd)3077 static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
3078     struct mpi3mr_drvr_cmd *drvrcmd)
3079 {
3080 	drvrcmd->state = MPI3MR_CMD_NOTUSED;
3081 	drvrcmd->callback = NULL;
3082 	printf(IOCNAME "Completing Port Enable Request\n", sc->name);
3083 	sc->mpi3mr_flags |= MPI3MR_FLAGS_PORT_ENABLE_DONE;
3084 	mpi3mr_startup_decrement(sc->cam_sc);
3085 }
3086 
mpi3mr_issue_port_enable(struct mpi3mr_softc * sc,U8 async)3087 int mpi3mr_issue_port_enable(struct mpi3mr_softc *sc, U8 async)
3088 {
3089 	Mpi3PortEnableRequest_t pe_req;
3090 	int retval = 0;
3091 
3092 	memset(&pe_req, 0, sizeof(pe_req));
3093 	mtx_lock(&sc->init_cmds.completion.lock);
3094 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
3095 		retval = -1;
3096 		printf(IOCNAME "Issue PortEnable: Init command is in use\n", sc->name);
3097 		mtx_unlock(&sc->init_cmds.completion.lock);
3098 		goto out;
3099 	}
3100 
3101 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
3102 
3103 	if (async) {
3104 		sc->init_cmds.is_waiting = 0;
3105 		sc->init_cmds.callback = mpi3mr_port_enable_complete;
3106 	} else {
3107 		sc->init_cmds.is_waiting = 1;
3108 		sc->init_cmds.callback = NULL;
3109 		init_completion(&sc->init_cmds.completion);
3110 	}
3111 	pe_req.HostTag = MPI3MR_HOSTTAG_INITCMDS;
3112 	pe_req.Function = MPI3_FUNCTION_PORT_ENABLE;
3113 
3114 	printf(IOCNAME "Sending Port Enable Request\n", sc->name);
3115 	retval = mpi3mr_submit_admin_cmd(sc, &pe_req, sizeof(pe_req));
3116 	if (retval) {
3117 		printf(IOCNAME "Issue PortEnable: Admin Post failed\n",
3118 		    sc->name);
3119 		goto out_unlock;
3120 	}
3121 
3122 	if (!async) {
3123 		wait_for_completion_timeout(&sc->init_cmds.completion,
3124 		    MPI3MR_PORTENABLE_TIMEOUT);
3125 		if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3126 			printf(IOCNAME "Issue PortEnable: command timed out\n",
3127 			    sc->name);
3128 			retval = -1;
3129 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3130 			goto out_unlock;
3131 		}
3132 		mpi3mr_port_enable_complete(sc, &sc->init_cmds);
3133 	}
3134 out_unlock:
3135 	mtx_unlock(&sc->init_cmds.completion.lock);
3136 
3137 out:
3138 	return retval;
3139 }
3140 
mpi3mr_timestamp_sync(struct mpi3mr_softc * sc)3141 static int mpi3mr_timestamp_sync(struct mpi3mr_softc *sc)
3142 {
3143 	int retval = 0;
3144 	struct timeval current_time;
3145 	int64_t time_in_msec;
3146 	Mpi3IoUnitControlRequest_t iou_ctrl = {0};
3147 
3148 	mtx_lock(&sc->init_cmds.completion.lock);
3149 	if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
3150 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue timestamp sync: command is in use\n");
3151 		mtx_unlock(&sc->init_cmds.completion.lock);
3152 		return -1;
3153 	}
3154 
3155 	sc->init_cmds.state = MPI3MR_CMD_PENDING;
3156 	sc->init_cmds.is_waiting = 1;
3157 	sc->init_cmds.callback = NULL;
3158 	iou_ctrl.HostTag = htole64(MPI3MR_HOSTTAG_INITCMDS);
3159 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3160 	iou_ctrl.Operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
3161 	getmicrotime(&current_time);
3162 	time_in_msec = (int64_t)current_time.tv_sec * 1000 + current_time.tv_usec/1000;
3163 	iou_ctrl.Param64[0] = htole64(time_in_msec);
3164 
3165 	init_completion(&sc->init_cmds.completion);
3166 
3167 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3168 	if (retval) {
3169 		mpi3mr_dprint(sc, MPI3MR_ERROR, "timestamp sync: Admin Post failed\n");
3170 		goto out_unlock;
3171 	}
3172 
3173 	wait_for_completion_timeout(&sc->init_cmds.completion,
3174 				    (MPI3MR_INTADMCMD_TIMEOUT));
3175 
3176 	if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3177 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue timestamp sync: command timed out\n");
3178 		sc->init_cmds.is_waiting = 0;
3179 
3180 		if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
3181 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_TSU_TIMEOUT);
3182 
3183 		retval = -1;
3184 		goto out_unlock;
3185 	}
3186 
3187 	if (((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) != MPI3_IOCSTATUS_SUCCESS) &&
3188 	     (sc->init_cmds.ioc_status != MPI3_IOCSTATUS_SUPERVISOR_ONLY)) {
3189 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue timestamp sync: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
3190 			      (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), sc->init_cmds.ioc_loginfo);
3191 		retval = -1;
3192 	}
3193 
3194 out_unlock:
3195 	sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3196 	mtx_unlock(&sc->init_cmds.completion.lock);
3197 
3198 	return retval;
3199 }
3200 
3201 void
mpi3mr_timestamp_thread(void * arg)3202 mpi3mr_timestamp_thread(void *arg)
3203 {
3204 	struct mpi3mr_softc *sc = (struct mpi3mr_softc *)arg;
3205 	U64 elapsed_time = 0;
3206 
3207 	sc->timestamp_thread_active = 1;
3208 	mtx_lock(&sc->reset_mutex);
3209 	while (1) {
3210 
3211 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3212 		    (sc->unrecoverable == 1)) {
3213 			mpi3mr_dprint(sc, MPI3MR_INFO,
3214 				      "Exit due to %s from %s\n",
3215 				      sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3216 				      "Hardware critical error", __func__);
3217 			break;
3218 		}
3219 		mtx_unlock(&sc->reset_mutex);
3220 
3221 		while (sc->reset_in_progress) {
3222 			if (elapsed_time)
3223 				elapsed_time = 0;
3224 			if (sc->unrecoverable)
3225 				break;
3226 			pause("mpi3mr_timestamp_thread", hz / 5);
3227 		}
3228 
3229 		if (elapsed_time++ >= sc->ts_update_interval * 60) {
3230 			mpi3mr_timestamp_sync(sc);
3231 			elapsed_time = 0;
3232 		}
3233 
3234 		/*
3235 		 * Sleep for 1 second if we're not exiting, then loop to top
3236 		 * to poll exit status and hardware health.
3237 		 */
3238 		mtx_lock(&sc->reset_mutex);
3239 		if (((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) == 0) &&
3240 		    (!sc->unrecoverable) && (!sc->reset_in_progress)) {
3241 			msleep(&sc->timestamp_chan, &sc->reset_mutex, PRIBIO,
3242 			       "mpi3mr_timestamp", 1 * hz);
3243 		}
3244 	}
3245 	mtx_unlock(&sc->reset_mutex);
3246 	sc->timestamp_thread_active = 0;
3247 	kproc_exit(0);
3248 }
3249 
3250 void
mpi3mr_watchdog_thread(void * arg)3251 mpi3mr_watchdog_thread(void *arg)
3252 {
3253 	struct mpi3mr_softc *sc;
3254 	enum mpi3mr_iocstate ioc_state;
3255 	U32 fault, host_diagnostic, ioc_status;
3256 
3257 	sc = (struct mpi3mr_softc *)arg;
3258 
3259 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s\n", __func__);
3260 
3261 	sc->watchdog_thread_active = 1;
3262 	mtx_lock(&sc->reset_mutex);
3263 	for (;;) {
3264 		if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
3265 		    (sc->unrecoverable == 1)) {
3266 			mpi3mr_dprint(sc, MPI3MR_INFO,
3267 			    "Exit due to %s from %s\n",
3268 			   sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
3269 			    "Hardware critical error", __func__);
3270 			break;
3271 		}
3272 		mtx_unlock(&sc->reset_mutex);
3273 
3274 		if ((sc->prepare_for_reset) &&
3275 		    ((sc->prepare_for_reset_timeout_counter++) >=
3276 		     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
3277 			mpi3mr_soft_reset_handler(sc,
3278 			    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
3279 			goto sleep;
3280 		}
3281 
3282 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
3283 
3284 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
3285 			mpi3mr_soft_reset_handler(sc, MPI3MR_RESET_FROM_FIRMWARE, 0);
3286 			goto sleep;
3287 		}
3288 
3289 		ioc_state = mpi3mr_get_iocstate(sc);
3290 		if (ioc_state == MRIOC_STATE_FAULT) {
3291 			fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
3292 			    MPI3_SYSIF_FAULT_CODE_MASK;
3293 
3294 			host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
3295 			if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
3296 				if (!sc->diagsave_timeout) {
3297 					mpi3mr_print_fault_info(sc);
3298 					mpi3mr_dprint(sc, MPI3MR_INFO,
3299 						"diag save in progress\n");
3300 				}
3301 				if ((sc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
3302 					goto sleep;
3303 			}
3304 			mpi3mr_print_fault_info(sc);
3305 			sc->diagsave_timeout = 0;
3306 
3307 			if ((fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) ||
3308 			    (fault == MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED)) {
3309 				mpi3mr_dprint(sc, MPI3MR_INFO,
3310 				    "Controller requires system power cycle or complete reset is needed,"
3311 				    "fault code: 0x%x. marking controller as unrecoverable\n", fault);
3312 				sc->unrecoverable = 1;
3313 				break;
3314 			}
3315 
3316 			if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER) {
3317 				mpi3mr_dprint(sc, MPI3MR_INFO,
3318 					      "controller faulted due to insufficient power, marking controller as unrecoverable\n");
3319 				sc->unrecoverable = 1;
3320 				break;
3321 			}
3322 
3323 			if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
3324 			    || (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
3325 			    || (sc->reset_in_progress))
3326 				break;
3327 			if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
3328 				mpi3mr_soft_reset_handler(sc,
3329 				    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
3330 			else
3331 				mpi3mr_soft_reset_handler(sc,
3332 				    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
3333 
3334 		}
3335 
3336 		if (sc->reset.type == MPI3MR_TRIGGER_SOFT_RESET) {
3337 			mpi3mr_print_fault_info(sc);
3338 			mpi3mr_soft_reset_handler(sc, sc->reset.reason, 1);
3339 		}
3340 sleep:
3341 		mtx_lock(&sc->reset_mutex);
3342 		/*
3343 		 * Sleep for 1 second if we're not exiting, then loop to top
3344 		 * to poll exit status and hardware health.
3345 		 */
3346 		if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) == 0 &&
3347 		    !sc->unrecoverable) {
3348 			msleep(&sc->watchdog_chan, &sc->reset_mutex, PRIBIO,
3349 			    "mpi3mr_watchdog", 1 * hz);
3350 		}
3351 	}
3352 	mtx_unlock(&sc->reset_mutex);
3353 	sc->watchdog_thread_active = 0;
3354 	mpi3mr_kproc_exit(0);
3355 }
3356 
mpi3mr_display_event_data(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_rep)3357 static void mpi3mr_display_event_data(struct mpi3mr_softc *sc,
3358 	Mpi3EventNotificationReply_t *event_rep)
3359 {
3360 	char *desc = NULL;
3361 	U16 event;
3362 
3363 	event = event_rep->Event;
3364 
3365 	switch (event) {
3366 	case MPI3_EVENT_LOG_DATA:
3367 		desc = "Log Data";
3368 		break;
3369 	case MPI3_EVENT_CHANGE:
3370 		desc = "Event Change";
3371 		break;
3372 	case MPI3_EVENT_GPIO_INTERRUPT:
3373 		desc = "GPIO Interrupt";
3374 		break;
3375 	case MPI3_EVENT_CABLE_MGMT:
3376 		desc = "Cable Management";
3377 		break;
3378 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3379 		desc = "Energy Pack Change";
3380 		break;
3381 	case MPI3_EVENT_DEVICE_ADDED:
3382 	{
3383 		Mpi3DevicePage0_t *event_data =
3384 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3385 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Added: Dev=0x%04x Form=0x%x Perst id: 0x%x\n",
3386 			event_data->DevHandle, event_data->DeviceForm, event_data->PersistentID);
3387 		return;
3388 	}
3389 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3390 	{
3391 		Mpi3DevicePage0_t *event_data =
3392 		    (Mpi3DevicePage0_t *)event_rep->EventData;
3393 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Info Changed: Dev=0x%04x Form=0x%x\n",
3394 			event_data->DevHandle, event_data->DeviceForm);
3395 		return;
3396 	}
3397 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3398 	{
3399 		Mpi3EventDataDeviceStatusChange_t *event_data =
3400 		    (Mpi3EventDataDeviceStatusChange_t *)event_rep->EventData;
3401 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Device Status Change: Dev=0x%04x RC=0x%x\n",
3402 			event_data->DevHandle, event_data->ReasonCode);
3403 		return;
3404 	}
3405 	case MPI3_EVENT_SAS_DISCOVERY:
3406 	{
3407 		Mpi3EventDataSasDiscovery_t *event_data =
3408 		    (Mpi3EventDataSasDiscovery_t *)event_rep->EventData;
3409 		mpi3mr_dprint(sc, MPI3MR_EVENT, "SAS Discovery: (%s)",
3410 			(event_data->ReasonCode == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
3411 		    "start" : "stop");
3412 		if (event_data->DiscoveryStatus &&
3413 		    (sc->mpi3mr_debug & MPI3MR_EVENT)) {
3414 			printf("discovery_status(0x%08x)",
3415 			    event_data->DiscoveryStatus);
3416 
3417 		}
3418 
3419 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3420 			printf("\n");
3421 		return;
3422 	}
3423 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3424 		desc = "SAS Broadcast Primitive";
3425 		break;
3426 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
3427 		desc = "SAS Notify Primitive";
3428 		break;
3429 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
3430 		desc = "SAS Init Device Status Change";
3431 		break;
3432 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
3433 		desc = "SAS Init Table Overflow";
3434 		break;
3435 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3436 		desc = "SAS Topology Change List";
3437 		break;
3438 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3439 		desc = "Enclosure Device Status Change";
3440 		break;
3441 	case MPI3_EVENT_HARD_RESET_RECEIVED:
3442 		desc = "Hard Reset Received";
3443 		break;
3444 	case MPI3_EVENT_SAS_PHY_COUNTER:
3445 		desc = "SAS PHY Counter";
3446 		break;
3447 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3448 		desc = "SAS Device Discovery Error";
3449 		break;
3450 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3451 		desc = "PCIE Topology Change List";
3452 		break;
3453 	case MPI3_EVENT_PCIE_ENUMERATION:
3454 	{
3455 		Mpi3EventDataPcieEnumeration_t *event_data =
3456 			(Mpi3EventDataPcieEnumeration_t *)event_rep->EventData;
3457 		mpi3mr_dprint(sc, MPI3MR_EVENT, "PCIE Enumeration: (%s)",
3458 			(event_data->ReasonCode ==
3459 			    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" :
3460 			    "stop");
3461 		if (event_data->EnumerationStatus)
3462 			mpi3mr_dprint(sc, MPI3MR_EVENT, "enumeration_status(0x%08x)",
3463 			   event_data->EnumerationStatus);
3464 		if (sc->mpi3mr_debug & MPI3MR_EVENT)
3465 			printf("\n");
3466 		return;
3467 	}
3468 	case MPI3_EVENT_PREPARE_FOR_RESET:
3469 		desc = "Prepare For Reset";
3470 		break;
3471 	}
3472 
3473 	if (!desc)
3474 		return;
3475 
3476 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s\n", desc);
3477 }
3478 
3479 struct mpi3mr_target *
mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc * cam_sc,uint16_t per_id)3480 mpi3mr_find_target_by_per_id(struct mpi3mr_cam_softc *cam_sc,
3481     uint16_t per_id)
3482 {
3483 	struct mpi3mr_target *target = NULL;
3484 
3485 	mtx_lock_spin(&cam_sc->sc->target_lock);
3486 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3487 		if (target->per_id == per_id)
3488 			break;
3489 	}
3490 
3491 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3492 	return target;
3493 }
3494 
3495 struct mpi3mr_target *
mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc * cam_sc,uint16_t handle)3496 mpi3mr_find_target_by_dev_handle(struct mpi3mr_cam_softc *cam_sc,
3497     uint16_t handle)
3498 {
3499 	struct mpi3mr_target *target = NULL;
3500 
3501 	mtx_lock_spin(&cam_sc->sc->target_lock);
3502 	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
3503 		if (target->dev_handle == handle)
3504 			break;
3505 
3506 	}
3507 	mtx_unlock_spin(&cam_sc->sc->target_lock);
3508 	return target;
3509 }
3510 
mpi3mr_update_device(struct mpi3mr_softc * sc,struct mpi3mr_target * tgtdev,Mpi3DevicePage0_t * dev_pg0,bool is_added)3511 void mpi3mr_update_device(struct mpi3mr_softc *sc,
3512     struct mpi3mr_target *tgtdev, Mpi3DevicePage0_t *dev_pg0,
3513     bool is_added)
3514 {
3515 	U16 flags = 0;
3516 
3517 	tgtdev->per_id = (dev_pg0->PersistentID);
3518 	tgtdev->dev_handle = (dev_pg0->DevHandle);
3519 	tgtdev->dev_type = dev_pg0->DeviceForm;
3520 	tgtdev->encl_handle = (dev_pg0->EnclosureHandle);
3521 	tgtdev->parent_handle = (dev_pg0->ParentDevHandle);
3522 	tgtdev->slot = (dev_pg0->Slot);
3523 	tgtdev->qdepth = (dev_pg0->QueueDepth);
3524 	tgtdev->wwid = (dev_pg0->WWID);
3525 
3526 	flags = (dev_pg0->Flags);
3527 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
3528 	if (is_added == true)
3529 		tgtdev->io_throttle_enabled =
3530 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
3531 
3532 	switch (dev_pg0->AccessStatus) {
3533 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
3534 	case MPI3_DEVICE0_ASTATUS_PREPARE:
3535 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
3536 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
3537 		break;
3538 	default:
3539 		tgtdev->is_hidden = 1;
3540 		break;
3541 	}
3542 
3543 	switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
3544 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
3545 		tgtdev->ws_len = 256;
3546 		break;
3547 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
3548 		tgtdev->ws_len = 2048;
3549 		break;
3550 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
3551 	default:
3552 		tgtdev->ws_len = 0;
3553 		break;
3554 	}
3555 
3556 	switch (tgtdev->dev_type) {
3557 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
3558 	{
3559 		Mpi3Device0SasSataFormat_t *sasinf =
3560 		    &dev_pg0->DeviceSpecific.SasSataFormat;
3561 		U16 dev_info = (sasinf->DeviceInfo);
3562 		tgtdev->dev_spec.sassata_inf.dev_info = dev_info;
3563 		tgtdev->dev_spec.sassata_inf.sas_address =
3564 		    (sasinf->SASAddress);
3565 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
3566 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
3567 			tgtdev->is_hidden = 1;
3568 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
3569 			    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
3570 			tgtdev->is_hidden = 1;
3571 		break;
3572 	}
3573 	case MPI3_DEVICE_DEVFORM_PCIE:
3574 	{
3575 		Mpi3Device0PcieFormat_t *pcieinf =
3576 		    &dev_pg0->DeviceSpecific.PcieFormat;
3577 		U16 dev_info = (pcieinf->DeviceInfo);
3578 
3579 		tgtdev->q_depth = dev_pg0->QueueDepth;
3580 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
3581 		tgtdev->dev_spec.pcie_inf.capb =
3582 		    (pcieinf->Capabilities);
3583 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
3584 		if (dev_pg0->AccessStatus == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
3585 			tgtdev->dev_spec.pcie_inf.mdts =
3586 			    (pcieinf->MaximumDataTransferSize);
3587 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->PageSize;
3588 			tgtdev->dev_spec.pcie_inf.reset_to =
3589 				pcieinf->ControllerResetTO;
3590 			tgtdev->dev_spec.pcie_inf.abort_to =
3591 				pcieinf->NVMeAbortTO;
3592 		}
3593 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
3594 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
3595 
3596 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3597 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
3598 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
3599 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
3600 			tgtdev->is_hidden = 1;
3601 
3602 		break;
3603 	}
3604 	case MPI3_DEVICE_DEVFORM_VD:
3605 	{
3606 		Mpi3Device0VdFormat_t *vdinf =
3607 		    &dev_pg0->DeviceSpecific.VdFormat;
3608 		struct mpi3mr_throttle_group_info *tg = NULL;
3609 
3610 		tgtdev->dev_spec.vol_inf.state = vdinf->VdState;
3611 		if (vdinf->VdState == MPI3_DEVICE0_VD_STATE_OFFLINE)
3612 			tgtdev->is_hidden = 1;
3613 		tgtdev->dev_spec.vol_inf.tg_id = vdinf->IOThrottleGroup;
3614 		tgtdev->dev_spec.vol_inf.tg_high =
3615 			vdinf->IOThrottleGroupHigh * 2048;
3616 		tgtdev->dev_spec.vol_inf.tg_low =
3617 			vdinf->IOThrottleGroupLow * 2048;
3618 		if (vdinf->IOThrottleGroup < sc->num_io_throttle_group) {
3619 			tg = sc->throttle_groups + vdinf->IOThrottleGroup;
3620 			tg->id = vdinf->IOThrottleGroup;
3621 			tg->high = tgtdev->dev_spec.vol_inf.tg_high;
3622 			tg->low = tgtdev->dev_spec.vol_inf.tg_low;
3623 			if (is_added == true)
3624 				tg->fw_qd = tgtdev->q_depth;
3625 			tg->modified_qd = tgtdev->q_depth;
3626 		}
3627 		tgtdev->dev_spec.vol_inf.tg = tg;
3628 		tgtdev->throttle_group = tg;
3629 		break;
3630 	}
3631 	default:
3632 		goto out;
3633 	}
3634 
3635 out:
3636 	return;
3637 }
3638 
mpi3mr_create_device(struct mpi3mr_softc * sc,Mpi3DevicePage0_t * dev_pg0)3639 int mpi3mr_create_device(struct mpi3mr_softc *sc,
3640     Mpi3DevicePage0_t *dev_pg0)
3641 {
3642 	int retval = 0;
3643 	struct mpi3mr_target *target = NULL;
3644 	U16 per_id = 0;
3645 
3646 	per_id = dev_pg0->PersistentID;
3647 
3648 	mtx_lock_spin(&sc->target_lock);
3649 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
3650 		if (target->per_id == per_id) {
3651 			target->state = MPI3MR_DEV_CREATED;
3652 			break;
3653 		}
3654 	}
3655 	mtx_unlock_spin(&sc->target_lock);
3656 
3657 	if (target) {
3658 			mpi3mr_update_device(sc, target, dev_pg0, true);
3659 	} else {
3660 			target = malloc(sizeof(*target), M_MPI3MR,
3661 				 M_NOWAIT | M_ZERO);
3662 
3663 			if (target == NULL) {
3664 				retval = -1;
3665 				goto out;
3666 			}
3667 
3668 			target->exposed_to_os = 0;
3669 			mpi3mr_update_device(sc, target, dev_pg0, true);
3670 			mtx_lock_spin(&sc->target_lock);
3671 			TAILQ_INSERT_TAIL(&sc->cam_sc->tgt_list, target, tgt_next);
3672 			target->state = MPI3MR_DEV_CREATED;
3673 			mtx_unlock_spin(&sc->target_lock);
3674 	}
3675 out:
3676 	return retval;
3677 }
3678 
3679 /**
3680  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
3681  * @sc: Adapter instance reference
3682  * @drv_cmd: Internal command tracker
3683  *
3684  * Issues a target reset TM to the firmware from the device
3685  * removal TM pend list or retry the removal handshake sequence
3686  * based on the IOU control request IOC status.
3687  *
3688  * Return: Nothing
3689  */
mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3690 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
3691 	struct mpi3mr_drvr_cmd *drv_cmd)
3692 {
3693 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3694 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3695 	struct mpi3mr_target *tgtdev = NULL;
3696 
3697 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3698 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
3699 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
3700 	    drv_cmd->ioc_loginfo);
3701 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3702 		if (drv_cmd->retry_count < MPI3MR_DEVRMHS_RETRYCOUNT) {
3703 			drv_cmd->retry_count++;
3704 			mpi3mr_dprint(sc, MPI3MR_EVENT,
3705 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
3706 			    __func__, drv_cmd->dev_handle,
3707 			    drv_cmd->retry_count);
3708 			mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle,
3709 			    drv_cmd, drv_cmd->iou_rc);
3710 			return;
3711 		}
3712 		mpi3mr_dprint(sc, MPI3MR_ERROR,
3713 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
3714 		    __func__, drv_cmd->dev_handle);
3715 	} else {
3716 		mtx_lock_spin(&sc->target_lock);
3717 		TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
3718 		       if (tgtdev->dev_handle == drv_cmd->dev_handle)
3719 			       tgtdev->state = MPI3MR_DEV_REMOVE_HS_COMPLETED;
3720 		}
3721 		mtx_unlock_spin(&sc->target_lock);
3722 
3723 		mpi3mr_dprint(sc, MPI3MR_INFO,
3724 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
3725 		    __func__, drv_cmd->dev_handle);
3726 		mpi3mr_clear_bit(drv_cmd->dev_handle, sc->removepend_bitmap);
3727 	}
3728 
3729 	if (!TAILQ_EMPTY(&sc->delayed_rmhs_list)) {
3730 		delayed_dev_rmhs = TAILQ_FIRST(&sc->delayed_rmhs_list);
3731 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
3732 		drv_cmd->retry_count = 0;
3733 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
3734 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3735 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
3736 		    __func__, drv_cmd->dev_handle);
3737 		mpi3mr_dev_rmhs_send_tm(sc, drv_cmd->dev_handle, drv_cmd,
3738 		    drv_cmd->iou_rc);
3739 		TAILQ_REMOVE(&sc->delayed_rmhs_list, delayed_dev_rmhs, list);
3740 		free(delayed_dev_rmhs, M_MPI3MR);
3741 		return;
3742 	}
3743 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3744 	drv_cmd->callback = NULL;
3745 	drv_cmd->retry_count = 0;
3746 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3747 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3748 }
3749 
3750 /**
3751  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
3752  * @sc: Adapter instance reference
3753  * @drv_cmd: Internal command tracker
3754  *
3755  * Issues a target reset TM to the firmware from the device
3756  * removal TM pend list or issue IO Unit control request as
3757  * part of device removal or hidden acknowledgment handshake.
3758  *
3759  * Return: Nothing
3760  */
mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3761 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_softc *sc,
3762 	struct mpi3mr_drvr_cmd *drv_cmd)
3763 {
3764 	Mpi3IoUnitControlRequest_t iou_ctrl;
3765 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3766 	Mpi3SCSITaskMgmtReply_t *tm_reply = NULL;
3767 	int retval;
3768 
3769 	if (drv_cmd->state & MPI3MR_CMD_REPLYVALID)
3770 		tm_reply = (Mpi3SCSITaskMgmtReply_t *)drv_cmd->reply;
3771 
3772 	if (tm_reply)
3773 		printf(IOCNAME
3774 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
3775 		    sc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
3776 		    drv_cmd->ioc_loginfo,
3777 		    le32toh(tm_reply->TerminationCount));
3778 
3779 	printf(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
3780 	    sc->name, drv_cmd->dev_handle, cmd_idx);
3781 
3782 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
3783 
3784 	drv_cmd->state = MPI3MR_CMD_PENDING;
3785 	drv_cmd->is_waiting = 0;
3786 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
3787 	iou_ctrl.Operation = drv_cmd->iou_rc;
3788 	iou_ctrl.Param16[0] = htole16(drv_cmd->dev_handle);
3789 	iou_ctrl.HostTag = htole16(drv_cmd->host_tag);
3790 	iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
3791 
3792 	retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
3793 	if (retval) {
3794 		printf(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
3795 		    sc->name);
3796 		goto out_failed;
3797 	}
3798 
3799 	return;
3800 out_failed:
3801 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3802 	drv_cmd->callback = NULL;
3803 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3804 	drv_cmd->retry_count = 0;
3805 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3806 }
3807 
3808 /**
3809  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
3810  * @sc: Adapter instance reference
3811  * @handle: Device handle
3812  * @cmdparam: Internal command tracker
3813  * @iou_rc: IO Unit reason code
3814  *
3815  * Issues a target reset TM to the firmware or add it to a pend
3816  * list as part of device removal or hidden acknowledgment
3817  * handshake.
3818  *
3819  * Return: Nothing
3820  */
mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc * sc,U16 handle,struct mpi3mr_drvr_cmd * cmdparam,U8 iou_rc)3821 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
3822 	struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc)
3823 {
3824 	Mpi3SCSITaskMgmtRequest_t tm_req;
3825 	int retval = 0;
3826 	U16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
3827 	U8 retrycount = 5;
3828 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3829 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
3830 
3831 	if (drv_cmd)
3832 		goto issue_cmd;
3833 	do {
3834 		cmd_idx = mpi3mr_find_first_zero_bit(sc->devrem_bitmap,
3835 		    MPI3MR_NUM_DEVRMCMD);
3836 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
3837 			if (!mpi3mr_test_and_set_bit(cmd_idx, sc->devrem_bitmap))
3838 				break;
3839 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
3840 		}
3841 	} while (retrycount--);
3842 
3843 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
3844 		delayed_dev_rmhs = malloc(sizeof(*delayed_dev_rmhs),M_MPI3MR,
3845 		     M_ZERO|M_NOWAIT);
3846 
3847 		if (!delayed_dev_rmhs)
3848 			return;
3849 		delayed_dev_rmhs->handle = handle;
3850 		delayed_dev_rmhs->iou_rc = iou_rc;
3851 		TAILQ_INSERT_TAIL(&(sc->delayed_rmhs_list), delayed_dev_rmhs, list);
3852 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
3853 		    __func__, handle);
3854 
3855 
3856 		return;
3857 	}
3858 	drv_cmd = &sc->dev_rmhs_cmds[cmd_idx];
3859 
3860 issue_cmd:
3861 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
3862 	mpi3mr_dprint(sc, MPI3MR_EVENT,
3863 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
3864 	    __func__, handle, cmd_idx);
3865 
3866 	memset(&tm_req, 0, sizeof(tm_req));
3867 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3868 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Issue TM: Command is in use\n", __func__);
3869 		goto out;
3870 	}
3871 	drv_cmd->state = MPI3MR_CMD_PENDING;
3872 	drv_cmd->is_waiting = 0;
3873 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
3874 	drv_cmd->dev_handle = handle;
3875 	drv_cmd->iou_rc = iou_rc;
3876 	tm_req.DevHandle = htole16(handle);
3877 	tm_req.TaskType = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3878 	tm_req.HostTag = htole16(drv_cmd->host_tag);
3879 	tm_req.TaskHostTag = htole16(MPI3MR_HOSTTAG_INVALID);
3880 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3881 
3882 	mpi3mr_set_bit(handle, sc->removepend_bitmap);
3883 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
3884 	if (retval) {
3885 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :Issue DevRmHsTM: Admin Post failed\n",
3886 		    __func__);
3887 		goto out_failed;
3888 	}
3889 out:
3890 	return;
3891 out_failed:
3892 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3893 	drv_cmd->callback = NULL;
3894 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
3895 	drv_cmd->retry_count = 0;
3896 	mpi3mr_clear_bit(cmd_idx, sc->devrem_bitmap);
3897 }
3898 
3899 /**
3900  * mpi3mr_complete_evt_ack - Event ack request completion
3901  * @sc: Adapter instance reference
3902  * @drv_cmd: Internal command tracker
3903  *
3904  * This is the completion handler for non blocking event
3905  * acknowledgment sent to the firmware and this will issue any
3906  * pending event acknowledgment request.
3907  *
3908  * Return: Nothing
3909  */
mpi3mr_complete_evt_ack(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * drv_cmd)3910 static void mpi3mr_complete_evt_ack(struct mpi3mr_softc *sc,
3911 	struct mpi3mr_drvr_cmd *drv_cmd)
3912 {
3913 	U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3914 	struct delayed_evtack_node *delayed_evtack = NULL;
3915 
3916 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
3917 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3918 		    "%s: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n", __func__,
3919 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3920 		    drv_cmd->ioc_loginfo);
3921 	}
3922 
3923 	if (!TAILQ_EMPTY(&sc->delayed_evtack_cmds_list)) {
3924 		delayed_evtack = TAILQ_FIRST(&sc->delayed_evtack_cmds_list);
3925 		mpi3mr_dprint(sc, MPI3MR_EVENT,
3926 		    "%s: processing delayed event ack for event %d\n",
3927 		    __func__, delayed_evtack->event);
3928 		mpi3mr_send_evt_ack(sc, delayed_evtack->event, drv_cmd,
3929 		    delayed_evtack->event_ctx);
3930 		TAILQ_REMOVE(&sc->delayed_evtack_cmds_list, delayed_evtack, list);
3931 		free(delayed_evtack, M_MPI3MR);
3932 		return;
3933 	}
3934 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3935 	drv_cmd->callback = NULL;
3936 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
3937 }
3938 
3939 /**
3940  * mpi3mr_send_evt_ack - Issue event acknwoledgment request
3941  * @sc: Adapter instance reference
3942  * @event: MPI3 event id
3943  * @cmdparam: Internal command tracker
3944  * @event_ctx: Event context
3945  *
3946  * Issues event acknowledgment request to the firmware if there
3947  * is a free command to send the event ack else it to a pend
3948  * list so that it will be processed on a completion of a prior
3949  * event acknowledgment .
3950  *
3951  * Return: Nothing
3952  */
mpi3mr_send_evt_ack(struct mpi3mr_softc * sc,U8 event,struct mpi3mr_drvr_cmd * cmdparam,U32 event_ctx)3953 static void mpi3mr_send_evt_ack(struct mpi3mr_softc *sc, U8 event,
3954 	struct mpi3mr_drvr_cmd *cmdparam, U32 event_ctx)
3955 {
3956 	Mpi3EventAckRequest_t evtack_req;
3957 	int retval = 0;
3958 	U8 retrycount = 5;
3959 	U16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
3960 	struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
3961 	struct delayed_evtack_node *delayed_evtack = NULL;
3962 
3963 	if (drv_cmd)
3964 		goto issue_cmd;
3965 	do {
3966 		cmd_idx = mpi3mr_find_first_zero_bit(sc->evtack_cmds_bitmap,
3967 		    MPI3MR_NUM_EVTACKCMD);
3968 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
3969 			if (!mpi3mr_test_and_set_bit(cmd_idx,
3970 			    sc->evtack_cmds_bitmap))
3971 				break;
3972 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
3973 		}
3974 	} while (retrycount--);
3975 
3976 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
3977 		delayed_evtack = malloc(sizeof(*delayed_evtack),M_MPI3MR,
3978 		     M_ZERO | M_NOWAIT);
3979 		if (!delayed_evtack)
3980 			return;
3981 		delayed_evtack->event = event;
3982 		delayed_evtack->event_ctx = event_ctx;
3983 		TAILQ_INSERT_TAIL(&(sc->delayed_evtack_cmds_list), delayed_evtack, list);
3984 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s : Event ack for event:%d is postponed\n",
3985 		    __func__, event);
3986 		return;
3987 	}
3988 	drv_cmd = &sc->evtack_cmds[cmd_idx];
3989 
3990 issue_cmd:
3991 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
3992 
3993 	memset(&evtack_req, 0, sizeof(evtack_req));
3994 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3995 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s: Command is in use\n", __func__);
3996 		goto out;
3997 	}
3998 	drv_cmd->state = MPI3MR_CMD_PENDING;
3999 	drv_cmd->is_waiting = 0;
4000 	drv_cmd->callback = mpi3mr_complete_evt_ack;
4001 	evtack_req.HostTag = htole16(drv_cmd->host_tag);
4002 	evtack_req.Function = MPI3_FUNCTION_EVENT_ACK;
4003 	evtack_req.Event = event;
4004 	evtack_req.EventContext = htole32(event_ctx);
4005 	retval = mpi3mr_submit_admin_cmd(sc, &evtack_req,
4006 	    sizeof(evtack_req));
4007 
4008 	if (retval) {
4009 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Admin Post failed\n", __func__);
4010 		goto out_failed;
4011 	}
4012 out:
4013 	return;
4014 out_failed:
4015 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
4016 	drv_cmd->callback = NULL;
4017 	mpi3mr_clear_bit(cmd_idx, sc->evtack_cmds_bitmap);
4018 }
4019 
4020 /*
4021  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
4022  * @sc: Adapter instance reference
4023  * @event_reply: Event data
4024  *
4025  * Checks for the reason code and based on that either block I/O
4026  * to device, or unblock I/O to the device, or start the device
4027  * removal handshake with reason as remove with the firmware for
4028  * PCIe devices.
4029  *
4030  * Return: Nothing
4031  */
mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4032 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_softc *sc,
4033 	Mpi3EventNotificationReply_t *event_reply)
4034 {
4035 	Mpi3EventDataPcieTopologyChangeList_t *topo_evt =
4036 	    (Mpi3EventDataPcieTopologyChangeList_t *) event_reply->EventData;
4037 	int i;
4038 	U16 handle;
4039 	U8 reason_code;
4040 	struct mpi3mr_target *tgtdev = NULL;
4041 
4042 	for (i = 0; i < topo_evt->NumEntries; i++) {
4043 		handle = le16toh(topo_evt->PortEntry[i].AttachedDevHandle);
4044 		if (!handle)
4045 			continue;
4046 		reason_code = topo_evt->PortEntry[i].PortStatus;
4047 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
4048 		switch (reason_code) {
4049 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
4050 			if (tgtdev) {
4051 				tgtdev->dev_removed = 1;
4052 				tgtdev->dev_removedelay = 0;
4053 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
4054 			}
4055 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
4056 			    MPI3_CTRL_OP_REMOVE_DEVICE);
4057 			break;
4058 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
4059 			if (tgtdev) {
4060 				tgtdev->dev_removedelay = 1;
4061 				mpi3mr_atomic_inc(&tgtdev->block_io);
4062 			}
4063 			break;
4064 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
4065 			if (tgtdev &&
4066 			    tgtdev->dev_removedelay) {
4067 				tgtdev->dev_removedelay = 0;
4068 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
4069 					mpi3mr_atomic_dec(&tgtdev->block_io);
4070 			}
4071 			break;
4072 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
4073 		default:
4074 			break;
4075 		}
4076 	}
4077 }
4078 
4079 /**
4080  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
4081  * @sc: Adapter instance reference
4082  * @event_reply: Event data
4083  *
4084  * Checks for the reason code and based on that either block I/O
4085  * to device, or unblock I/O to the device, or start the device
4086  * removal handshake with reason as remove with the firmware for
4087  * SAS/SATA devices.
4088  *
4089  * Return: Nothing
4090  */
mpi3mr_sastopochg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4091 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
4092 	Mpi3EventNotificationReply_t *event_reply)
4093 {
4094 	Mpi3EventDataSasTopologyChangeList_t *topo_evt =
4095 	    (Mpi3EventDataSasTopologyChangeList_t *)event_reply->EventData;
4096 	int i;
4097 	U16 handle;
4098 	U8 reason_code;
4099 	struct mpi3mr_target *tgtdev = NULL;
4100 
4101 	for (i = 0; i < topo_evt->NumEntries; i++) {
4102 		handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
4103 		if (!handle)
4104 			continue;
4105 		reason_code = topo_evt->PhyEntry[i].PhyStatus &
4106 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
4107 		tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
4108 		switch (reason_code) {
4109 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
4110 			if (tgtdev) {
4111 				tgtdev->dev_removed = 1;
4112 				tgtdev->dev_removedelay = 0;
4113 				mpi3mr_atomic_set(&tgtdev->block_io, 0);
4114 			}
4115 			mpi3mr_dev_rmhs_send_tm(sc, handle, NULL,
4116 			    MPI3_CTRL_OP_REMOVE_DEVICE);
4117 			break;
4118 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
4119 			if (tgtdev) {
4120 				tgtdev->dev_removedelay = 1;
4121 				mpi3mr_atomic_inc(&tgtdev->block_io);
4122 			}
4123 			break;
4124 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
4125 			if (tgtdev &&
4126 			    tgtdev->dev_removedelay) {
4127 				tgtdev->dev_removedelay = 0;
4128 				if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
4129 					mpi3mr_atomic_dec(&tgtdev->block_io);
4130 			}
4131 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
4132 		default:
4133 			break;
4134 		}
4135 	}
4136 
4137 }
4138 /**
4139  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
4140  * @sc: Adapter instance reference
4141  * @event_reply: Event data
4142  *
4143  * Checks for the reason code and based on that either block I/O
4144  * to device, or unblock I/O to the device, or start the device
4145  * removal handshake with reason as remove/hide acknowledgment
4146  * with the firmware.
4147  *
4148  * Return: Nothing
4149  */
mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4150 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_softc *sc,
4151 	Mpi3EventNotificationReply_t *event_reply)
4152 {
4153 	U16 dev_handle = 0;
4154 	U8 ublock = 0, block = 0, hide = 0, uhide = 0, delete = 0, remove = 0;
4155 	struct mpi3mr_target *tgtdev = NULL;
4156 	Mpi3EventDataDeviceStatusChange_t *evtdata =
4157 	    (Mpi3EventDataDeviceStatusChange_t *) event_reply->EventData;
4158 
4159 	dev_handle = le16toh(evtdata->DevHandle);
4160 
4161 	switch (evtdata->ReasonCode) {
4162 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
4163 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
4164 		block = 1;
4165 		break;
4166 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
4167 		delete = 1;
4168 		hide = 1;
4169 		break;
4170 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
4171 		uhide = 1;
4172 		break;
4173 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
4174 		delete = 1;
4175 		remove = 1;
4176 		break;
4177 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
4178 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
4179 		ublock = 1;
4180 		break;
4181 	default:
4182 		break;
4183 	}
4184 
4185 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
4186 
4187 	if (!tgtdev) {
4188 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s :target with dev_handle:0x%x not found\n",
4189 		    __func__, dev_handle);
4190 		return;
4191 	}
4192 
4193 	if (block)
4194 		mpi3mr_atomic_inc(&tgtdev->block_io);
4195 
4196 	if (hide)
4197 		tgtdev->is_hidden = hide;
4198 
4199 	if (uhide) {
4200 		tgtdev->is_hidden = 0;
4201 		tgtdev->dev_removed = 0;
4202 	}
4203 
4204 	if (delete)
4205 		tgtdev->dev_removed = 1;
4206 
4207 	if (ublock) {
4208 		if (mpi3mr_atomic_read(&tgtdev->block_io) > 0)
4209 			mpi3mr_atomic_dec(&tgtdev->block_io);
4210 	}
4211 
4212 	if (remove) {
4213 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
4214 					MPI3_CTRL_OP_REMOVE_DEVICE);
4215 	}
4216 	if (hide)
4217 		mpi3mr_dev_rmhs_send_tm(sc, dev_handle, NULL,
4218 					MPI3_CTRL_OP_HIDDEN_ACK);
4219 }
4220 
4221 /**
4222  * mpi3mr_preparereset_evt_th - Prepareforreset evt tophalf
4223  * @sc: Adapter instance reference
4224  * @event_reply: Event data
4225  *
4226  * Blocks and unblocks host level I/O based on the reason code
4227  *
4228  * Return: Nothing
4229  */
mpi3mr_preparereset_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4230 static void mpi3mr_preparereset_evt_th(struct mpi3mr_softc *sc,
4231 	Mpi3EventNotificationReply_t *event_reply)
4232 {
4233 	Mpi3EventDataPrepareForReset_t *evtdata =
4234 	    (Mpi3EventDataPrepareForReset_t *)event_reply->EventData;
4235 
4236 	if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_START) {
4237 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=START\n",
4238 		    __func__);
4239 		if (sc->prepare_for_reset)
4240 			return;
4241 		sc->prepare_for_reset = 1;
4242 		sc->prepare_for_reset_timeout_counter = 0;
4243 	} else if (evtdata->ReasonCode == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
4244 		mpi3mr_dprint(sc, MPI3MR_EVENT, "%s :Recieved PrepForReset Event with RC=ABORT\n",
4245 		    __func__);
4246 		sc->prepare_for_reset = 0;
4247 		sc->prepare_for_reset_timeout_counter = 0;
4248 	}
4249 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4250 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4251 		mpi3mr_send_evt_ack(sc, event_reply->Event, NULL,
4252 		    le32toh(event_reply->EventContext));
4253 }
4254 
4255 /**
4256  * mpi3mr_energypackchg_evt_th - Energypackchange evt tophalf
4257  * @sc: Adapter instance reference
4258  * @event_reply: Event data
4259  *
4260  * Identifies the new shutdown timeout value and update.
4261  *
4262  * Return: Nothing
4263  */
mpi3mr_energypackchg_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4264 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_softc *sc,
4265 	Mpi3EventNotificationReply_t *event_reply)
4266 {
4267 	Mpi3EventDataEnergyPackChange_t *evtdata =
4268 	    (Mpi3EventDataEnergyPackChange_t *)event_reply->EventData;
4269 	U16 shutdown_timeout = le16toh(evtdata->ShutdownTimeout);
4270 
4271 	if (shutdown_timeout <= 0) {
4272 		mpi3mr_dprint(sc, MPI3MR_ERROR,
4273 		    "%s :Invalid Shutdown Timeout received = %d\n",
4274 		    __func__, shutdown_timeout);
4275 		return;
4276 	}
4277 
4278 	mpi3mr_dprint(sc, MPI3MR_EVENT,
4279 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
4280 	    __func__, sc->facts.shutdown_timeout, shutdown_timeout);
4281 	sc->facts.shutdown_timeout = shutdown_timeout;
4282 }
4283 
4284 /**
4285  * mpi3mr_cablemgmt_evt_th - Cable mgmt evt tophalf
4286  * @sc: Adapter instance reference
4287  * @event_reply: Event data
4288  *
4289  * Displays Cable manegemt event details.
4290  *
4291  * Return: Nothing
4292  */
mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc * sc,Mpi3EventNotificationReply_t * event_reply)4293 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_softc *sc,
4294 	Mpi3EventNotificationReply_t *event_reply)
4295 {
4296 	Mpi3EventDataCableManagement_t *evtdata =
4297 	    (Mpi3EventDataCableManagement_t *)event_reply->EventData;
4298 
4299 	switch (evtdata->Status) {
4300 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
4301 	{
4302 		mpi3mr_dprint(sc, MPI3MR_INFO, "An active cable with ReceptacleID %d cannot be powered.\n"
4303 		    "Devices connected to this cable are not detected.\n"
4304 		    "This cable requires %d mW of power.\n",
4305 		    evtdata->ReceptacleID,
4306 		    le32toh(evtdata->ActiveCablePowerRequirement));
4307 		break;
4308 	}
4309 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
4310 	{
4311 		mpi3mr_dprint(sc, MPI3MR_INFO, "A cable with ReceptacleID %d is not running at optimal speed\n",
4312 		    evtdata->ReceptacleID);
4313 		break;
4314 	}
4315 	default:
4316 		break;
4317 	}
4318 }
4319 
4320 /**
4321  * mpi3mr_process_events - Event's toph-half handler
4322  * @sc: Adapter instance reference
4323  * @event_reply: Event data
4324  *
4325  * Top half of event processing.
4326  *
4327  * Return: Nothing
4328  */
mpi3mr_process_events(struct mpi3mr_softc * sc,uintptr_t data,Mpi3EventNotificationReply_t * event_reply)4329 static void mpi3mr_process_events(struct mpi3mr_softc *sc,
4330     uintptr_t data, Mpi3EventNotificationReply_t *event_reply)
4331 {
4332 	U16 evt_type;
4333 	bool ack_req = 0, process_evt_bh = 0;
4334 	struct mpi3mr_fw_event_work *fw_event;
4335 	U16 sz;
4336 
4337 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
4338 		goto out;
4339 
4340 	if ((event_reply->MsgFlags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
4341 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
4342 		ack_req = 1;
4343 
4344 	evt_type = event_reply->Event;
4345 
4346 	switch (evt_type) {
4347 	case MPI3_EVENT_DEVICE_ADDED:
4348 	{
4349 		Mpi3DevicePage0_t *dev_pg0 =
4350 			(Mpi3DevicePage0_t *) event_reply->EventData;
4351 		if (mpi3mr_create_device(sc, dev_pg0))
4352 			mpi3mr_dprint(sc, MPI3MR_ERROR,
4353 			"%s :Failed to add device in the device add event\n",
4354 			__func__);
4355 		else
4356 			process_evt_bh = 1;
4357 		break;
4358 	}
4359 
4360 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
4361 	{
4362 		process_evt_bh = 1;
4363 		mpi3mr_devstatuschg_evt_th(sc, event_reply);
4364 		break;
4365 	}
4366 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
4367 	{
4368 		process_evt_bh = 1;
4369 		mpi3mr_sastopochg_evt_th(sc, event_reply);
4370 		break;
4371 	}
4372 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
4373 	{
4374 		process_evt_bh = 1;
4375 		mpi3mr_pcietopochg_evt_th(sc, event_reply);
4376 		break;
4377 	}
4378 	case MPI3_EVENT_PREPARE_FOR_RESET:
4379 	{
4380 		mpi3mr_preparereset_evt_th(sc, event_reply);
4381 		ack_req = 0;
4382 		break;
4383 	}
4384 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
4385 	{
4386 		process_evt_bh = 1;
4387 		break;
4388 	}
4389 	case MPI3_EVENT_LOG_DATA:
4390 	{
4391 		mpi3mr_app_save_logdata(sc, (char*)event_reply->EventData,
4392 					le16toh(event_reply->EventDataLength) * 4);
4393 		break;
4394 	}
4395 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
4396 	{
4397 		mpi3mr_energypackchg_evt_th(sc, event_reply);
4398 		break;
4399 	}
4400 	case MPI3_EVENT_CABLE_MGMT:
4401 	{
4402 		mpi3mr_cablemgmt_evt_th(sc, event_reply);
4403 		break;
4404 	}
4405 
4406 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
4407 	case MPI3_EVENT_SAS_DISCOVERY:
4408 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
4409 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
4410 	case MPI3_EVENT_PCIE_ENUMERATION:
4411 		break;
4412 	default:
4413 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Event 0x%02x is not handled by driver\n",
4414 		    __func__, evt_type);
4415 		break;
4416 	}
4417 
4418 	if (process_evt_bh || ack_req) {
4419 		fw_event = malloc(sizeof(struct mpi3mr_fw_event_work), M_MPI3MR,
4420 		     M_ZERO|M_NOWAIT);
4421 
4422 		if (!fw_event) {
4423 			printf("%s: allocate failed for fw_event\n", __func__);
4424 			return;
4425 		}
4426 
4427 		sz = le16toh(event_reply->EventDataLength) * 4;
4428 		fw_event->event_data = malloc(sz, M_MPI3MR, M_ZERO|M_NOWAIT);
4429 
4430 		if (!fw_event->event_data) {
4431 			printf("%s: allocate failed for event_data\n", __func__);
4432 			free(fw_event, M_MPI3MR);
4433 			return;
4434 		}
4435 
4436 		bcopy(event_reply->EventData, fw_event->event_data, sz);
4437 		fw_event->event = event_reply->Event;
4438 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4439 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4440 		    event_reply->Event == MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE ) &&
4441 		    sc->track_mapping_events)
4442 			sc->pending_map_events++;
4443 
4444 		/*
4445 		 * Events should be processed after Port enable is completed.
4446 		 */
4447 		if ((event_reply->Event == MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4448 		    event_reply->Event == MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ) &&
4449 		    !(sc->mpi3mr_flags & MPI3MR_FLAGS_PORT_ENABLE_DONE))
4450 			mpi3mr_startup_increment(sc->cam_sc);
4451 
4452 		fw_event->send_ack = ack_req;
4453 		fw_event->event_context = le32toh(event_reply->EventContext);
4454 		fw_event->event_data_size = sz;
4455 		fw_event->process_event = process_evt_bh;
4456 
4457 		mtx_lock(&sc->fwevt_lock);
4458 		TAILQ_INSERT_TAIL(&sc->cam_sc->ev_queue, fw_event, ev_link);
4459 		taskqueue_enqueue(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
4460 		mtx_unlock(&sc->fwevt_lock);
4461 
4462 	}
4463 out:
4464 	return;
4465 }
4466 
mpi3mr_handle_events(struct mpi3mr_softc * sc,uintptr_t data,Mpi3DefaultReply_t * def_reply)4467 static void mpi3mr_handle_events(struct mpi3mr_softc *sc, uintptr_t data,
4468     Mpi3DefaultReply_t *def_reply)
4469 {
4470 	Mpi3EventNotificationReply_t *event_reply =
4471 		(Mpi3EventNotificationReply_t *)def_reply;
4472 
4473 	sc->change_count = event_reply->IOCChangeCount;
4474 	mpi3mr_display_event_data(sc, event_reply);
4475 
4476 	mpi3mr_process_events(sc, data, event_reply);
4477 }
4478 
mpi3mr_process_admin_reply_desc(struct mpi3mr_softc * sc,Mpi3DefaultReplyDescriptor_t * reply_desc,U64 * reply_dma)4479 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
4480     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4481 {
4482 	U16 reply_desc_type, host_tag = 0, idx;
4483 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4484 	U32 ioc_loginfo = 0;
4485 	Mpi3StatusReplyDescriptor_t *status_desc;
4486 	Mpi3AddressReplyDescriptor_t *addr_desc;
4487 	Mpi3SuccessReplyDescriptor_t *success_desc;
4488 	Mpi3DefaultReply_t *def_reply = NULL;
4489 	struct mpi3mr_drvr_cmd *cmdptr = NULL;
4490 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4491 	U8 *sense_buf = NULL;
4492 
4493 	*reply_dma = 0;
4494 	reply_desc_type = reply_desc->ReplyFlags &
4495 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4496 	switch (reply_desc_type) {
4497 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4498 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4499 		host_tag = status_desc->HostTag;
4500 		ioc_status = status_desc->IOCStatus;
4501 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4502 			ioc_loginfo = status_desc->IOCLogInfo;
4503 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4504 		break;
4505 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4506 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4507 		*reply_dma = addr_desc->ReplyFrameAddress;
4508 		def_reply = mpi3mr_get_reply_virt_addr(sc, *reply_dma);
4509 		if (def_reply == NULL)
4510 			goto out;
4511 		host_tag = def_reply->HostTag;
4512 		ioc_status = def_reply->IOCStatus;
4513 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4514 			ioc_loginfo = def_reply->IOCLogInfo;
4515 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4516 		if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
4517 			scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
4518 			sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4519 			    scsi_reply->SenseDataBufferAddress);
4520 		}
4521 		break;
4522 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4523 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4524 		host_tag = success_desc->HostTag;
4525 		break;
4526 	default:
4527 		break;
4528 	}
4529 	switch (host_tag) {
4530 	case MPI3MR_HOSTTAG_INITCMDS:
4531 		cmdptr = &sc->init_cmds;
4532 		break;
4533 	case MPI3MR_HOSTTAG_CFGCMDS:
4534 		cmdptr = &sc->cfg_cmds;
4535 		break;
4536 	case MPI3MR_HOSTTAG_IOCTLCMDS:
4537 		cmdptr = &sc->ioctl_cmds;
4538 		break;
4539 	case MPI3MR_HOSTTAG_TMS:
4540 		cmdptr = &sc->host_tm_cmds;
4541 		wakeup((void *)&sc->tm_chan);
4542 		break;
4543 	case MPI3MR_HOSTTAG_PELABORT:
4544 		cmdptr = &sc->pel_abort_cmd;
4545 		break;
4546 	case MPI3MR_HOSTTAG_PELWAIT:
4547 		cmdptr = &sc->pel_cmds;
4548 		break;
4549 	case MPI3MR_HOSTTAG_INVALID:
4550 		if (def_reply && def_reply->Function ==
4551 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
4552 			mpi3mr_handle_events(sc, *reply_dma ,def_reply);
4553 	default:
4554 		break;
4555 	}
4556 
4557 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
4558 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX ) {
4559 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
4560 		cmdptr = &sc->dev_rmhs_cmds[idx];
4561 	}
4562 
4563 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
4564 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
4565 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
4566 		cmdptr = &sc->evtack_cmds[idx];
4567 	}
4568 
4569 	if (cmdptr) {
4570 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
4571 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
4572 			cmdptr->ioc_loginfo = ioc_loginfo;
4573 			cmdptr->ioc_status = ioc_status;
4574 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
4575 			if (def_reply) {
4576 				cmdptr->state |= MPI3MR_CMD_REPLYVALID;
4577 				memcpy((U8 *)cmdptr->reply, (U8 *)def_reply,
4578 				    sc->reply_sz);
4579 			}
4580 			if (sense_buf && cmdptr->sensebuf) {
4581 				cmdptr->is_senseprst = 1;
4582 				memcpy(cmdptr->sensebuf, sense_buf,
4583 				    MPI3MR_SENSEBUF_SZ);
4584 			}
4585 			if (cmdptr->is_waiting) {
4586 				complete(&cmdptr->completion);
4587 				cmdptr->is_waiting = 0;
4588 			} else if (cmdptr->callback)
4589 				cmdptr->callback(sc, cmdptr);
4590 		}
4591 	}
4592 out:
4593 	if (scsi_reply != NULL && sense_buf != NULL)
4594 		mpi3mr_repost_sense_buf(sc,
4595 		    scsi_reply->SenseDataBufferAddress);
4596 	return;
4597 }
4598 
4599 /*
4600  * mpi3mr_complete_admin_cmd:	ISR routine for admin commands
4601  * @sc:				Adapter's soft instance
4602  *
4603  * This function processes admin command completions.
4604  */
mpi3mr_complete_admin_cmd(struct mpi3mr_softc * sc)4605 static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
4606 {
4607 	U32 exp_phase = sc->admin_reply_ephase;
4608 	U32 adm_reply_ci = sc->admin_reply_ci;
4609 	U32 num_adm_reply = 0;
4610 	U64 reply_dma = 0;
4611 	Mpi3DefaultReplyDescriptor_t *reply_desc;
4612 	U16 threshold_comps = 0;
4613 
4614 	mtx_lock_spin(&sc->admin_reply_lock);
4615 	if (sc->admin_in_use == false) {
4616 		sc->admin_in_use = true;
4617 		mtx_unlock_spin(&sc->admin_reply_lock);
4618 	} else {
4619 		mtx_unlock_spin(&sc->admin_reply_lock);
4620 		return 0;
4621 	}
4622 
4623 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4624 		adm_reply_ci;
4625 
4626 	if ((reply_desc->ReplyFlags &
4627 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
4628 		mtx_lock_spin(&sc->admin_reply_lock);
4629 		sc->admin_in_use = false;
4630 		mtx_unlock_spin(&sc->admin_reply_lock);
4631 		return 0;
4632 	}
4633 
4634 	do {
4635 		sc->admin_req_ci = reply_desc->RequestQueueCI;
4636 		mpi3mr_process_admin_reply_desc(sc, reply_desc, &reply_dma);
4637 		if (reply_dma)
4638 			mpi3mr_repost_reply_buf(sc, reply_dma);
4639 		num_adm_reply++;
4640 		if (++adm_reply_ci == sc->num_admin_replies) {
4641 			adm_reply_ci = 0;
4642 			exp_phase ^= 1;
4643 		}
4644 		reply_desc =
4645 			(Mpi3DefaultReplyDescriptor_t *)sc->admin_reply +
4646 			    adm_reply_ci;
4647 		if ((reply_desc->ReplyFlags &
4648 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
4649 			break;
4650 
4651 		if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
4652 			mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4653 			threshold_comps = 0;
4654 		}
4655 	} while (1);
4656 
4657 	mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
4658 	sc->admin_reply_ci = adm_reply_ci;
4659 	sc->admin_reply_ephase = exp_phase;
4660 	mtx_lock_spin(&sc->admin_reply_lock);
4661 	sc->admin_in_use = false;
4662 	mtx_unlock_spin(&sc->admin_reply_lock);
4663 	return num_adm_reply;
4664 }
4665 
4666 static void
mpi3mr_cmd_done(struct mpi3mr_softc * sc,struct mpi3mr_cmd * cmd)4667 mpi3mr_cmd_done(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
4668 {
4669 	mpi3mr_unmap_request(sc, cmd);
4670 
4671 	mtx_lock(&sc->mpi3mr_mtx);
4672 	if (cmd->callout_owner) {
4673 		callout_stop(&cmd->callout);
4674 		cmd->callout_owner = false;
4675 	}
4676 
4677 	if (sc->unrecoverable)
4678 		mpi3mr_set_ccbstatus(cmd->ccb, CAM_DEV_NOT_THERE);
4679 
4680 	xpt_done(cmd->ccb);
4681 	cmd->ccb = NULL;
4682 	mtx_unlock(&sc->mpi3mr_mtx);
4683 	mpi3mr_release_command(cmd);
4684 }
4685 
mpi3mr_process_op_reply_desc(struct mpi3mr_softc * sc,Mpi3DefaultReplyDescriptor_t * reply_desc,U64 * reply_dma)4686 void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
4687     Mpi3DefaultReplyDescriptor_t *reply_desc, U64 *reply_dma)
4688 {
4689 	U16 reply_desc_type, host_tag = 0;
4690 	U16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
4691 	U32 ioc_loginfo = 0;
4692 	Mpi3StatusReplyDescriptor_t *status_desc = NULL;
4693 	Mpi3AddressReplyDescriptor_t *addr_desc = NULL;
4694 	Mpi3SuccessReplyDescriptor_t *success_desc = NULL;
4695 	Mpi3SCSIIOReply_t *scsi_reply = NULL;
4696 	U8 *sense_buf = NULL;
4697 	U8 scsi_state = 0, scsi_status = 0, sense_state = 0;
4698 	U32 xfer_count = 0, sense_count =0, resp_data = 0;
4699 	struct mpi3mr_cmd *cm = NULL;
4700 	union ccb *ccb;
4701 	struct ccb_scsiio *csio;
4702 	struct mpi3mr_cam_softc *cam_sc;
4703 	U32 target_id;
4704 	U8 *scsi_cdb;
4705 	struct mpi3mr_target *target = NULL;
4706 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
4707 	struct mpi3mr_throttle_group_info *tg = NULL;
4708 	U8 throttle_enabled_dev = 0;
4709 	static int ratelimit;
4710 
4711 	*reply_dma = 0;
4712 	reply_desc_type = reply_desc->ReplyFlags &
4713 			    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
4714 	switch (reply_desc_type) {
4715 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
4716 		status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
4717 		host_tag = status_desc->HostTag;
4718 		ioc_status = status_desc->IOCStatus;
4719 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4720 			ioc_loginfo = status_desc->IOCLogInfo;
4721 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4722 		break;
4723 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
4724 		addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
4725 		*reply_dma = addr_desc->ReplyFrameAddress;
4726 		scsi_reply = mpi3mr_get_reply_virt_addr(sc,
4727 		    *reply_dma);
4728 		if (scsi_reply == NULL) {
4729 			mpi3mr_dprint(sc, MPI3MR_ERROR, "scsi_reply is NULL, "
4730 			    "this shouldn't happen, reply_desc: %p\n",
4731 			    reply_desc);
4732 			goto out;
4733 		}
4734 
4735 		host_tag = scsi_reply->HostTag;
4736 		ioc_status = scsi_reply->IOCStatus;
4737 		scsi_status = scsi_reply->SCSIStatus;
4738 		scsi_state = scsi_reply->SCSIState;
4739 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
4740 		xfer_count = scsi_reply->TransferCount;
4741 		sense_count = scsi_reply->SenseCount;
4742 		resp_data = scsi_reply->ResponseData;
4743 		sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
4744 		    scsi_reply->SenseDataBufferAddress);
4745 		if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
4746 			ioc_loginfo = scsi_reply->IOCLogInfo;
4747 		ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
4748 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
4749 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
4750 
4751 		break;
4752 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
4753 		success_desc = (Mpi3SuccessReplyDescriptor_t *)reply_desc;
4754 		host_tag = success_desc->HostTag;
4755 
4756 	default:
4757 		break;
4758 	}
4759 
4760 	cm = sc->cmd_list[host_tag];
4761 
4762 	if (cm->state == MPI3MR_CMD_STATE_FREE)
4763 		goto out;
4764 
4765 	cam_sc = sc->cam_sc;
4766 	ccb = cm->ccb;
4767 	csio = &ccb->csio;
4768 	target_id = csio->ccb_h.target_id;
4769 
4770 	scsi_cdb = scsiio_cdb_ptr(csio);
4771 
4772 	target = mpi3mr_find_target_by_per_id(cam_sc, target_id);
4773 	if (sc->iot_enable) {
4774 		data_len_blks = csio->dxfer_len >> 9;
4775 
4776 		if (target) {
4777 			tg = target->throttle_group;
4778 			throttle_enabled_dev =
4779 				target->io_throttle_enabled;
4780 		}
4781 
4782 		if ((data_len_blks >= sc->io_throttle_data_length) &&
4783 		     throttle_enabled_dev) {
4784 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, data_len_blks);
4785 			ioc_pend_data_len = mpi3mr_atomic_read(
4786 			    &sc->pend_large_data_sz);
4787 			if (tg) {
4788 				mpi3mr_atomic_sub(&tg->pend_large_data_sz,
4789 					data_len_blks);
4790 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4791 				if (ratelimit % 1000) {
4792 					mpi3mr_dprint(sc, MPI3MR_IOT,
4793 						"large vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4794 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4795 						    target->per_id,
4796 						    target->dev_handle,
4797 						    data_len_blks, ioc_pend_data_len,
4798 						    tg_pend_data_len,
4799 						    sc->io_throttle_low,
4800 						    tg->low);
4801 					ratelimit++;
4802 				}
4803 				if (tg->io_divert  && ((ioc_pend_data_len <=
4804 				    sc->io_throttle_low) &&
4805 				    (tg_pend_data_len <= tg->low))) {
4806 					tg->io_divert = 0;
4807 					mpi3mr_dprint(sc, MPI3MR_IOT,
4808 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4809 						target->per_id, tg->id);
4810 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4811 					    sc, tg, 0);
4812 				}
4813 			} else {
4814 				if (ratelimit % 1000) {
4815 					mpi3mr_dprint(sc, MPI3MR_IOT,
4816 					    "large pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4817 					    target->per_id,
4818 					    target->dev_handle,
4819 					    data_len_blks, ioc_pend_data_len,
4820 					    sc->io_throttle_low);
4821 					ratelimit++;
4822 				}
4823 
4824 				if (ioc_pend_data_len <= sc->io_throttle_low) {
4825 					target->io_divert = 0;
4826 					mpi3mr_dprint(sc, MPI3MR_IOT,
4827 						"PD: Coming out of divert perst_id(%d)\n",
4828 						target->per_id);
4829 				}
4830 			}
4831 
4832 			} else if (target->io_divert) {
4833 			ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
4834 			if (!tg) {
4835 				if (ratelimit % 1000) {
4836 					mpi3mr_dprint(sc, MPI3MR_IOT,
4837 					    "pd_io completion persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_low(%d)\n",
4838 					    target->per_id,
4839 					    target->dev_handle,
4840 					    data_len_blks, ioc_pend_data_len,
4841 					    sc->io_throttle_low);
4842 					ratelimit++;
4843 				}
4844 
4845 				if ( ioc_pend_data_len <= sc->io_throttle_low) {
4846 					mpi3mr_dprint(sc, MPI3MR_IOT,
4847 						"PD: Coming out of divert perst_id(%d)\n",
4848 						target->per_id);
4849 					target->io_divert = 0;
4850 				}
4851 
4852 			} else if (ioc_pend_data_len <= sc->io_throttle_low) {
4853 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
4854 				if (ratelimit % 1000) {
4855 					mpi3mr_dprint(sc, MPI3MR_IOT,
4856 						"vd_io completion persist_id(%d), handle(0x%04x), data_len(%d),"
4857 						"ioc_pending(%d), tg_pending(%d), ioc_low(%d), tg_low(%d)\n",
4858 						    target->per_id,
4859 						    target->dev_handle,
4860 						    data_len_blks, ioc_pend_data_len,
4861 						    tg_pend_data_len,
4862 						    sc->io_throttle_low,
4863 						    tg->low);
4864 					ratelimit++;
4865 				}
4866 				if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
4867 					tg->io_divert = 0;
4868 					mpi3mr_dprint(sc, MPI3MR_IOT,
4869 						"VD: Coming out of divert perst_id(%d) tg_id(%d)\n",
4870 						target->per_id, tg->id);
4871 					mpi3mr_set_io_divert_for_all_vd_in_tg(
4872 					    sc, tg, 0);
4873 				}
4874 
4875 			}
4876 		}
4877 	}
4878 
4879 	if (success_desc) {
4880 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4881 		goto out_success;
4882 	}
4883 
4884 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN
4885 	    && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
4886 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
4887 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
4888 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
4889 
4890 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count
4891 	    && sense_buf) {
4892 		int sense_len, returned_sense_len;
4893 
4894 		returned_sense_len = min(le32toh(sense_count),
4895 		    sizeof(struct scsi_sense_data));
4896 		if (returned_sense_len < csio->sense_len)
4897 			csio->sense_resid = csio->sense_len -
4898 			    returned_sense_len;
4899 		else
4900 			csio->sense_resid = 0;
4901 
4902 		sense_len = min(returned_sense_len,
4903 		    csio->sense_len - csio->sense_resid);
4904 		bzero(&csio->sense_data, sizeof(csio->sense_data));
4905 		bcopy(sense_buf, &csio->sense_data, sense_len);
4906 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4907 	}
4908 
4909 	switch (ioc_status) {
4910 	case MPI3_IOCSTATUS_BUSY:
4911 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
4912 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
4913 		break;
4914 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4915 		/*
4916 		 * If devinfo is 0 this will be a volume.  In that case don't
4917 		 * tell CAM that the volume is not there.  We want volumes to
4918 		 * be enumerated until they are deleted/removed, not just
4919 		 * failed.
4920 		 */
4921 		if (cm->targ->devinfo == 0)
4922 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4923 		else
4924 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
4925 		break;
4926 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
4927 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
4928 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
4929 		mpi3mr_set_ccbstatus(ccb, CAM_SCSI_BUSY);
4930 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4931 		    "func: %s line:%d tgt %u Hosttag %u loginfo %x\n",
4932 		    __func__, __LINE__,
4933 		    target_id, cm->hosttag,
4934 		    le32toh(scsi_reply->IOCLogInfo));
4935 		mpi3mr_dprint(sc, MPI3MR_TRACE,
4936 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
4937 		    scsi_reply->SCSIStatus, scsi_reply->SCSIState,
4938 		    le32toh(xfer_count));
4939 		break;
4940 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
4941 		/* resid is ignored for this condition */
4942 		csio->resid = 0;
4943 		mpi3mr_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
4944 		break;
4945 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
4946 		csio->resid = cm->length - le32toh(xfer_count);
4947 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
4948 	case MPI3_IOCSTATUS_SUCCESS:
4949 		if ((scsi_reply->IOCStatus & MPI3_IOCSTATUS_STATUS_MASK) ==
4950 		    MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
4951 			mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n",  __func__, __LINE__);
4952 
4953 		/* Completion failed at the transport level. */
4954 		if (scsi_reply->SCSIState & (MPI3_SCSI_STATE_NO_SCSI_STATUS |
4955 		    MPI3_SCSI_STATE_TERMINATED)) {
4956 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
4957 			break;
4958 		}
4959 
4960 		/* In a modern packetized environment, an autosense failure
4961 		 * implies that there's not much else that can be done to
4962 		 * recover the command.
4963 		 */
4964 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4965 			mpi3mr_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
4966 			break;
4967 		}
4968 
4969 		/*
4970 		 * Intentionally override the normal SCSI status reporting
4971 		 * for these two cases.  These are likely to happen in a
4972 		 * multi-initiator environment, and we want to make sure that
4973 		 * CAM retries these commands rather than fail them.
4974 		 */
4975 		if ((scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_COMMAND_TERMINATED) ||
4976 		    (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_TASK_ABORTED)) {
4977 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_ABORTED);
4978 			break;
4979 		}
4980 
4981 		/* Handle normal status and sense */
4982 		csio->scsi_status = scsi_reply->SCSIStatus;
4983 		if (scsi_reply->SCSIStatus == MPI3_SCSI_STATUS_GOOD)
4984 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
4985 		else
4986 			mpi3mr_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
4987 
4988 		if (scsi_reply->SCSIState & MPI3_SCSI_STATE_SENSE_VALID) {
4989 			int sense_len, returned_sense_len;
4990 
4991 			returned_sense_len = min(le32toh(scsi_reply->SenseCount),
4992 			    sizeof(struct scsi_sense_data));
4993 			if (returned_sense_len < csio->sense_len)
4994 				csio->sense_resid = csio->sense_len -
4995 				    returned_sense_len;
4996 			else
4997 				csio->sense_resid = 0;
4998 
4999 			sense_len = min(returned_sense_len,
5000 			    csio->sense_len - csio->sense_resid);
5001 			bzero(&csio->sense_data, sizeof(csio->sense_data));
5002 			bcopy(cm->sense, &csio->sense_data, sense_len);
5003 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
5004 		}
5005 
5006 		break;
5007 	case MPI3_IOCSTATUS_INVALID_SGL:
5008 		mpi3mr_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
5009 		break;
5010 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
5011 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
5012 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
5013 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5014 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
5015 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
5016 	case MPI3_IOCSTATUS_INVALID_FIELD:
5017 	case MPI3_IOCSTATUS_INVALID_STATE:
5018 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
5019 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5020 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
5021 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5022 	default:
5023 		csio->resid = cm->length;
5024 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
5025 		break;
5026 	}
5027 
5028 out_success:
5029 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_CMP) {
5030 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
5031 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
5032 	}
5033 
5034 	mpi3mr_atomic_dec(&cm->targ->outstanding);
5035 	mpi3mr_cmd_done(sc, cm);
5036 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Completion IO path :"
5037 		" cdb[0]: %x targetid: 0x%x SMID: %x ioc_status: 0x%x ioc_loginfo: 0x%x scsi_status: 0x%x "
5038 		"scsi_state: 0x%x response_data: 0x%x\n", scsi_cdb[0], target_id, host_tag,
5039 		ioc_status, ioc_loginfo, scsi_status, scsi_state, resp_data);
5040 	mpi3mr_atomic_dec(&sc->fw_outstanding);
5041 out:
5042 
5043 	if (sense_buf)
5044 		mpi3mr_repost_sense_buf(sc,
5045 		    scsi_reply->SenseDataBufferAddress);
5046 	return;
5047 }
5048 
5049 /*
5050  * mpi3mr_complete_io_cmd:	ISR routine for IO commands
5051  * @sc:				Adapter's soft instance
5052  * @irq_ctx:			Driver's internal per IRQ structure
5053  *
5054  * This function processes IO command completions.
5055  */
mpi3mr_complete_io_cmd(struct mpi3mr_softc * sc,struct mpi3mr_irq_context * irq_ctx)5056 int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
5057     struct mpi3mr_irq_context *irq_ctx)
5058 {
5059 	struct mpi3mr_op_reply_queue *op_reply_q = irq_ctx->op_reply_q;
5060 	U32 exp_phase = op_reply_q->ephase;
5061 	U32 reply_ci = op_reply_q->ci;
5062 	U32 num_op_replies = 0;
5063 	U64 reply_dma = 0;
5064 	Mpi3DefaultReplyDescriptor_t *reply_desc;
5065 	U16 req_qid = 0, threshold_comps = 0;
5066 
5067 	mtx_lock_spin(&op_reply_q->q_lock);
5068 	if (op_reply_q->in_use == false) {
5069 		op_reply_q->in_use = true;
5070 		mtx_unlock_spin(&op_reply_q->q_lock);
5071 	} else {
5072 		mtx_unlock_spin(&op_reply_q->q_lock);
5073 		return 0;
5074 	}
5075 
5076 	reply_desc = (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
5077 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]:reply_desc: (%pa) reply_ci: %x"
5078 		" reply_desc->ReplyFlags: 0x%x\n"
5079 		"reply_q_base_phys: %#016jx reply_q_base: (%pa) exp_phase: %x\n",
5080 		op_reply_q->qid, reply_desc, reply_ci, reply_desc->ReplyFlags, op_reply_q->q_base_phys,
5081 		op_reply_q->q_base, exp_phase);
5082 
5083 	if (((reply_desc->ReplyFlags &
5084 	     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) || !op_reply_q->qid) {
5085 		mtx_lock_spin(&op_reply_q->q_lock);
5086 		op_reply_q->in_use = false;
5087 		mtx_unlock_spin(&op_reply_q->q_lock);
5088 		return 0;
5089 	}
5090 
5091 	do {
5092 		req_qid = reply_desc->RequestQueueID;
5093 		sc->op_req_q[req_qid - 1].ci =
5094 		    reply_desc->RequestQueueCI;
5095 
5096 		mpi3mr_process_op_reply_desc(sc, reply_desc, &reply_dma);
5097 		mpi3mr_atomic_dec(&op_reply_q->pend_ios);
5098 		if (reply_dma)
5099 			mpi3mr_repost_reply_buf(sc, reply_dma);
5100 		num_op_replies++;
5101 		if (++reply_ci == op_reply_q->num_replies) {
5102 			reply_ci = 0;
5103 			exp_phase ^= 1;
5104 		}
5105 		reply_desc =
5106 		    (Mpi3DefaultReplyDescriptor_t *)op_reply_q->q_base + reply_ci;
5107 		if ((reply_desc->ReplyFlags &
5108 		     MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
5109 			break;
5110 
5111 		if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
5112 			mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
5113 			threshold_comps = 0;
5114 		}
5115 
5116 	} while (1);
5117 
5118 
5119 	mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
5120 	op_reply_q->ci = reply_ci;
5121 	op_reply_q->ephase = exp_phase;
5122 	mtx_lock_spin(&op_reply_q->q_lock);
5123 	op_reply_q->in_use = false;
5124 	mtx_unlock_spin(&op_reply_q->q_lock);
5125 	return num_op_replies;
5126 }
5127 
5128 /*
5129  * mpi3mr_isr:			Primary ISR function
5130  * privdata:			Driver's internal per IRQ structure
5131  *
5132  * This is driver's primary ISR function which is being called whenever any admin/IO
5133  * command completion.
5134  */
mpi3mr_isr(void * privdata)5135 void mpi3mr_isr(void *privdata)
5136 {
5137 	struct mpi3mr_irq_context *irq_ctx = (struct mpi3mr_irq_context *)privdata;
5138 	struct mpi3mr_softc *sc = irq_ctx->sc;
5139 	U16 msi_idx;
5140 
5141 	if (!irq_ctx)
5142 		return;
5143 
5144 	msi_idx = irq_ctx->msix_index;
5145 
5146 	if (!sc->intr_enabled)
5147 		return;
5148 
5149 	if (!msi_idx)
5150 		mpi3mr_complete_admin_cmd(sc);
5151 
5152 	if (irq_ctx->op_reply_q && irq_ctx->op_reply_q->qid) {
5153 		mpi3mr_complete_io_cmd(sc, irq_ctx);
5154 	}
5155 }
5156 
5157 /*
5158  * mpi3mr_alloc_requests - Allocates host commands
5159  * @sc: Adapter reference
5160  *
5161  * This function allocates controller supported host commands
5162  *
5163  * Return: 0 on success and proper error codes on failure
5164  */
5165 int
mpi3mr_alloc_requests(struct mpi3mr_softc * sc)5166 mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
5167 {
5168 	struct mpi3mr_cmd *cmd;
5169 	int i, j, nsegs, ret;
5170 
5171 	nsegs = sc->max_sgl_entries;
5172 	ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat,    /* parent */
5173 				1, 0,			/* algnmnt, boundary */
5174 				sc->dma_loaddr,		/* lowaddr */
5175 				BUS_SPACE_MAXADDR,	/* highaddr */
5176 				NULL, NULL,		/* filter, filterarg */
5177 				BUS_SPACE_MAXSIZE,	/* maxsize */
5178                                 nsegs,			/* nsegments */
5179 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
5180                                 BUS_DMA_ALLOCNOW,	/* flags */
5181                                 busdma_lock_mutex,	/* lockfunc */
5182 				&sc->io_lock,	/* lockarg */
5183 				&sc->buffer_dmat);
5184 	if (ret) {
5185 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate buffer DMA tag ret: %d\n", ret);
5186 		return (ENOMEM);
5187         }
5188 
5189 	/*
5190 	 * sc->cmd_list is an array of struct mpi3mr_cmd pointers.
5191 	 * Allocate the dynamic array first and then allocate individual
5192 	 * commands.
5193 	 */
5194 	sc->cmd_list = malloc(sizeof(struct mpi3mr_cmd *) * sc->max_host_ios,
5195 	    M_MPI3MR, M_NOWAIT | M_ZERO);
5196 
5197 	if (!sc->cmd_list) {
5198 		device_printf(sc->mpi3mr_dev, "Cannot alloc memory for mpt_cmd_list.\n");
5199 		return (ENOMEM);
5200 	}
5201 
5202 	for (i = 0; i < sc->max_host_ios; i++) {
5203 		sc->cmd_list[i] = malloc(sizeof(struct mpi3mr_cmd),
5204 		    M_MPI3MR, M_NOWAIT | M_ZERO);
5205 		if (!sc->cmd_list[i]) {
5206 			for (j = 0; j < i; j++)
5207 				free(sc->cmd_list[j], M_MPI3MR);
5208 			free(sc->cmd_list, M_MPI3MR);
5209 			sc->cmd_list = NULL;
5210 			return (ENOMEM);
5211 		}
5212 	}
5213 
5214 	for (i = 1; i < sc->max_host_ios; i++) {
5215 		cmd = sc->cmd_list[i];
5216 		cmd->hosttag = i;
5217 		cmd->sc = sc;
5218 		cmd->state = MPI3MR_CMD_STATE_BUSY;
5219 		callout_init_mtx(&cmd->callout, &sc->mpi3mr_mtx, 0);
5220 		cmd->ccb = NULL;
5221 		TAILQ_INSERT_TAIL(&(sc->cmd_list_head), cmd, next);
5222 		if (bus_dmamap_create(sc->buffer_dmat, 0, &cmd->dmamap))
5223 			return ENOMEM;
5224 	}
5225 	return (0);
5226 }
5227 
5228 /*
5229  * mpi3mr_get_command:		Get a coomand structure from free command pool
5230  * @sc:				Adapter soft instance
5231  * Return:			MPT command reference
5232  *
5233  * This function returns an MPT command to the caller.
5234  */
5235 struct mpi3mr_cmd *
mpi3mr_get_command(struct mpi3mr_softc * sc)5236 mpi3mr_get_command(struct mpi3mr_softc *sc)
5237 {
5238 	struct mpi3mr_cmd *cmd = NULL;
5239 
5240 	mtx_lock(&sc->cmd_pool_lock);
5241 	if (!TAILQ_EMPTY(&sc->cmd_list_head)) {
5242 		cmd = TAILQ_FIRST(&sc->cmd_list_head);
5243 		TAILQ_REMOVE(&sc->cmd_list_head, cmd, next);
5244 	} else {
5245 		goto out;
5246 	}
5247 
5248 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Get command SMID: 0x%x\n", cmd->hosttag);
5249 
5250 	memset((uint8_t *)&cmd->io_request, 0, MPI3MR_AREQ_FRAME_SZ);
5251 	cmd->data_dir = 0;
5252 	cmd->ccb = NULL;
5253 	cmd->targ = NULL;
5254 	cmd->state = MPI3MR_CMD_STATE_BUSY;
5255 	cmd->data = NULL;
5256 	cmd->length = 0;
5257 out:
5258 	mtx_unlock(&sc->cmd_pool_lock);
5259 	return cmd;
5260 }
5261 
5262 /*
5263  * mpi3mr_release_command:	Return a cmd to free command pool
5264  * input:			Command packet for return to free command pool
5265  *
5266  * This function returns an MPT command to the free command list.
5267  */
5268 void
mpi3mr_release_command(struct mpi3mr_cmd * cmd)5269 mpi3mr_release_command(struct mpi3mr_cmd *cmd)
5270 {
5271 	struct mpi3mr_softc *sc = cmd->sc;
5272 
5273 	mtx_lock(&sc->cmd_pool_lock);
5274 	TAILQ_INSERT_HEAD(&(sc->cmd_list_head), cmd, next);
5275 	cmd->state = MPI3MR_CMD_STATE_FREE;
5276 	cmd->req_qidx = 0;
5277 	mpi3mr_dprint(sc, MPI3MR_TRACE, "Release command SMID: 0x%x\n", cmd->hosttag);
5278 	mtx_unlock(&sc->cmd_pool_lock);
5279 
5280 	return;
5281 }
5282 
5283  /**
5284  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
5285  * @sc: Adapter instance reference
5286  *
5287  * Free the DMA memory allocated for IOCTL handling purpose.
5288  *
5289  * Return: None
5290  */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc * sc)5291 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_softc *sc)
5292 {
5293 	U16 i;
5294 	struct dma_memory_desc *mem_desc;
5295 
5296 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5297 		mem_desc = &sc->ioctl_sge[i];
5298 		if (mem_desc->addr && mem_desc->dma_addr) {
5299 			bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5300 			bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5301 			mem_desc->addr = NULL;
5302 			if (mem_desc->tag != NULL)
5303 				bus_dma_tag_destroy(mem_desc->tag);
5304 		}
5305 	}
5306 
5307 	mem_desc = &sc->ioctl_chain_sge;
5308 	if (mem_desc->addr && mem_desc->dma_addr) {
5309 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5310 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5311 		mem_desc->addr = NULL;
5312 		if (mem_desc->tag != NULL)
5313 			bus_dma_tag_destroy(mem_desc->tag);
5314 	}
5315 
5316 	mem_desc = &sc->ioctl_resp_sge;
5317 	if (mem_desc->addr && mem_desc->dma_addr) {
5318 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5319 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5320 		mem_desc->addr = NULL;
5321 		if (mem_desc->tag != NULL)
5322 			bus_dma_tag_destroy(mem_desc->tag);
5323 	}
5324 
5325 	sc->ioctl_sges_allocated = false;
5326 }
5327 
5328 /**
5329  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
5330  * @sc: Adapter instance reference
5331  *
5332  * This function allocates dmaable memory required to handle the
5333  * application issued MPI3 IOCTL requests.
5334  *
5335  * Return: None
5336  */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc * sc)5337 void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc)
5338 {
5339 	struct dma_memory_desc *mem_desc;
5340 	U16 i;
5341 
5342 	for (i=0; i<MPI3MR_NUM_IOCTL_SGE; i++) {
5343 		mem_desc = &sc->ioctl_sge[i];
5344 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
5345 
5346 		if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5347 					4, 0,			/* algnmnt, boundary */
5348 					sc->dma_loaddr,		/* lowaddr */
5349 					BUS_SPACE_MAXADDR,	/* highaddr */
5350 					NULL, NULL,		/* filter, filterarg */
5351 					mem_desc->size,		/* maxsize */
5352 					1,			/* nsegments */
5353 					mem_desc->size,		/* maxsegsize */
5354 					0,			/* flags */
5355 					NULL, NULL,		/* lockfunc, lockarg */
5356 					&mem_desc->tag)) {
5357 			mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5358 			goto out_failed;
5359 		}
5360 
5361 		if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5362 		    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5363 			mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5364 			goto out_failed;
5365 		}
5366 		bzero(mem_desc->addr, mem_desc->size);
5367 		bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5368 		    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5369 
5370 		if (!mem_desc->addr)
5371 			goto out_failed;
5372 	}
5373 
5374 	mem_desc = &sc->ioctl_chain_sge;
5375 	mem_desc->size = MPI3MR_4K_PGSZ;
5376 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5377 				4, 0,			/* algnmnt, boundary */
5378 				sc->dma_loaddr,		/* lowaddr */
5379 				BUS_SPACE_MAXADDR,	/* highaddr */
5380 				NULL, NULL,		/* filter, filterarg */
5381 				mem_desc->size,		/* maxsize */
5382 				1,			/* nsegments */
5383 				mem_desc->size,		/* maxsegsize */
5384 				0,			/* flags */
5385 				NULL, NULL,		/* lockfunc, lockarg */
5386 				&mem_desc->tag)) {
5387 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5388 		goto out_failed;
5389 	}
5390 
5391 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5392 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5393 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate replies memory\n", __func__);
5394 		goto out_failed;
5395 	}
5396 	bzero(mem_desc->addr, mem_desc->size);
5397 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5398 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5399 
5400 	if (!mem_desc->addr)
5401 		goto out_failed;
5402 
5403 	mem_desc = &sc->ioctl_resp_sge;
5404 	mem_desc->size = MPI3MR_4K_PGSZ;
5405 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,    /* parent */
5406 				4, 0,			/* algnmnt, boundary */
5407 				sc->dma_loaddr,		/* lowaddr */
5408 				BUS_SPACE_MAXADDR,	/* highaddr */
5409 				NULL, NULL,		/* filter, filterarg */
5410 				mem_desc->size,		/* maxsize */
5411 				1,			/* nsegments */
5412 				mem_desc->size,		/* maxsegsize */
5413 				0,			/* flags */
5414 				NULL, NULL,		/* lockfunc, lockarg */
5415 				&mem_desc->tag)) {
5416 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate request DMA tag\n");
5417 		goto out_failed;
5418 	}
5419 
5420 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5421 	    BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5422 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot allocate replies memory\n");
5423 		goto out_failed;
5424 	}
5425 	bzero(mem_desc->addr, mem_desc->size);
5426 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5427 	    mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5428 
5429 	if (!mem_desc->addr)
5430 		goto out_failed;
5431 
5432 	sc->ioctl_sges_allocated = true;
5433 
5434 	return;
5435 out_failed:
5436 	printf("cannot allocate DMA memory for the mpt commands"
5437 	    "  from the applications, application interface for MPT command is disabled\n");
5438 	mpi3mr_free_ioctl_dma_memory(sc);
5439 }
5440 
5441 static void inline
mpi3mr_free_dma_mem(struct mpi3mr_softc * sc,struct dma_memory_desc * mem_desc)5442 mpi3mr_free_dma_mem(struct mpi3mr_softc *sc,
5443 		    struct dma_memory_desc *mem_desc)
5444 {
5445 	if (mem_desc->dma_addr)
5446 		bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
5447 
5448 	if (mem_desc->addr != NULL) {
5449 		bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
5450 		mem_desc->addr = NULL;
5451 	}
5452 
5453 	if (mem_desc->tag != NULL)
5454 		bus_dma_tag_destroy(mem_desc->tag);
5455 }
5456 
5457 static int
mpi3mr_alloc_dma_mem(struct mpi3mr_softc * sc,struct dma_memory_desc * mem_desc)5458 mpi3mr_alloc_dma_mem(struct mpi3mr_softc *sc,
5459 		     struct dma_memory_desc *mem_desc)
5460 {
5461 	int retval;
5462 
5463 	if (bus_dma_tag_create(sc->mpi3mr_parent_dmat,  /* parent */
5464 				4, 0,			/* algnmnt, boundary */
5465 				sc->dma_loaddr,		/* lowaddr */
5466 				sc->dma_hiaddr,		/* highaddr */
5467 				NULL, NULL,		/* filter, filterarg */
5468 				mem_desc->size,		/* maxsize */
5469 				1,			/* nsegments */
5470 				mem_desc->size,		/* maxsize */
5471 				0,			/* flags */
5472 				NULL, NULL,		/* lockfunc, lockarg */
5473 				&mem_desc->tag)) {
5474 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate DMA tag\n", __func__);
5475 		return ENOMEM;
5476 	}
5477 
5478 	if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
5479 			     BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
5480 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate DMA memory\n", __func__);
5481 		retval = ENOMEM;
5482 		goto out;
5483 	}
5484 
5485 	bzero(mem_desc->addr, mem_desc->size);
5486 
5487 	bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
5488 			mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
5489 
5490 	if (!mem_desc->addr) {
5491 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot load DMA map\n", __func__);
5492 		retval = ENOMEM;
5493 		goto out;
5494 	}
5495 	return 0;
5496 out:
5497 	mpi3mr_free_dma_mem(sc, mem_desc);
5498 	return retval;
5499 }
5500 
5501 static int
mpi3mr_post_cfg_req(struct mpi3mr_softc * sc,Mpi3ConfigRequest_t * cfg_req)5502 mpi3mr_post_cfg_req(struct mpi3mr_softc *sc, Mpi3ConfigRequest_t *cfg_req)
5503 {
5504 	int retval;
5505 
5506 	mtx_lock(&sc->cfg_cmds.completion.lock);
5507 	if (sc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5508 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue cfg request: cfg command is in use\n");
5509 		mtx_unlock(&sc->cfg_cmds.completion.lock);
5510 		return -1;
5511 	}
5512 
5513 	sc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5514 	sc->cfg_cmds.is_waiting = 1;
5515 	sc->cfg_cmds.callback = NULL;
5516 	sc->cfg_cmds.ioc_status = 0;
5517 	sc->cfg_cmds.ioc_loginfo = 0;
5518 
5519 	cfg_req->HostTag = htole16(MPI3MR_HOSTTAG_CFGCMDS);
5520 	cfg_req->Function = MPI3_FUNCTION_CONFIG;
5521 	cfg_req->PageType = MPI3_CONFIG_PAGETYPE_DRIVER;
5522 	cfg_req->PageNumber = 1;
5523 	cfg_req->PageAddress = 0;
5524 
5525 	init_completion(&sc->cfg_cmds.completion);
5526 
5527 	retval = mpi3mr_submit_admin_cmd(sc, cfg_req, sizeof(*cfg_req));
5528 	if (retval) {
5529 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue cfg request: Admin Post failed\n");
5530 		goto out;
5531 	}
5532 
5533 	wait_for_completion_timeout(&sc->cfg_cmds.completion,
5534 				   (MPI3MR_INTADMCMD_TIMEOUT));
5535 
5536 	if (!(sc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5537 		if (!(sc->cfg_cmds.state & MPI3MR_CMD_RESET)) {
5538 			mpi3mr_dprint(sc, MPI3MR_ERROR, "config request command timed out\n");
5539 			mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5540 		}
5541 		retval = -1;
5542 		sc->cfg_cmds.is_waiting = 0;
5543 		goto out;
5544 	}
5545 
5546 	if ((sc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) !=
5547 	     MPI3_IOCSTATUS_SUCCESS ) {
5548 		mpi3mr_dprint(sc, MPI3MR_ERROR, "config request failed, IOCStatus(0x%04x) "
5549 			      " Loginfo(0x%08x) \n",(sc->cfg_cmds.ioc_status &
5550 			      MPI3_IOCSTATUS_STATUS_MASK), sc->cfg_cmds.ioc_loginfo);
5551 		retval = -1;
5552 	}
5553 
5554 out:
5555 	sc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5556 	mtx_unlock(&sc->cfg_cmds.completion.lock);
5557 	return retval;
5558 }
5559 
mpi3mr_process_cfg_req(struct mpi3mr_softc * sc,Mpi3ConfigRequest_t * cfg_req,Mpi3ConfigPageHeader_t * cfg_hdr,void * cfg_buf,U32 cfg_buf_sz)5560 static int mpi3mr_process_cfg_req(struct mpi3mr_softc *sc,
5561 				  Mpi3ConfigRequest_t *cfg_req,
5562 				  Mpi3ConfigPageHeader_t *cfg_hdr,
5563 				  void *cfg_buf, U32 cfg_buf_sz)
5564 {
5565 	int retval;
5566 	struct dma_memory_desc mem_desc = {0};
5567 
5568 	if (cfg_req->Action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5569 		mem_desc.size = sizeof(Mpi3ConfigPageHeader_t);
5570 	else {
5571 		mem_desc.size = le16toh(cfg_hdr->PageLength) * 4;
5572 		cfg_req->PageLength = cfg_hdr->PageLength;
5573 		cfg_req->PageVersion = cfg_hdr->PageVersion;
5574 	}
5575 
5576 	retval = mpi3mr_alloc_dma_mem(sc, &mem_desc);
5577 	if (retval) {
5578 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Failed to allocate DMA memory\n", __func__);
5579 		return retval;
5580 	}
5581 
5582 	mpi3mr_add_sg_single(&cfg_req->SGL, MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST,
5583 			     mem_desc.size, mem_desc.dma_addr);
5584 
5585 	retval = mpi3mr_post_cfg_req(sc, cfg_req);
5586 	if (retval)
5587 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Failed to post config request\n", __func__);
5588 	else
5589 		memcpy(cfg_buf, mem_desc.addr, min(mem_desc.size, cfg_buf_sz));
5590 
5591 	mpi3mr_free_dma_mem(sc, &mem_desc);
5592 	return retval;
5593 }
5594 
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_softc * sc)5595 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_softc *sc)
5596 {
5597 	int retval;
5598 	Mpi3DriverPage1_t driver_pg1 = {0};
5599 	Mpi3ConfigPageHeader_t cfg_hdr = {0};
5600 	Mpi3ConfigRequest_t cfg_req = {0};
5601 
5602 	cfg_req.Action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5603 	retval = mpi3mr_process_cfg_req(sc, &cfg_req, NULL, &cfg_hdr, sizeof(cfg_hdr));
5604 	if (retval)
5605 		goto error;
5606 
5607 	cfg_req.Action = MPI3_CONFIG_ACTION_READ_CURRENT;
5608 	retval = mpi3mr_process_cfg_req(sc, &cfg_req, &cfg_hdr, &driver_pg1, sizeof(driver_pg1));
5609 
5610 error:
5611 	if (!retval && driver_pg1.TimeStampUpdate)
5612 		sc->ts_update_interval = driver_pg1.TimeStampUpdate;
5613 	else
5614 		sc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
5615 
5616 	return retval;
5617 }
5618 
5619 void
mpi3mr_destory_mtx(struct mpi3mr_softc * sc)5620 mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
5621 {
5622 	int i;
5623 	struct mpi3mr_op_req_queue *op_req_q;
5624 	struct mpi3mr_op_reply_queue *op_reply_q;
5625 
5626 	if (sc->admin_reply) {
5627 		if (mtx_initialized(&sc->admin_reply_lock))
5628 			mtx_destroy(&sc->admin_reply_lock);
5629 	}
5630 
5631 	if (sc->op_reply_q) {
5632 		for(i = 0; i < sc->num_queues; i++) {
5633 			op_reply_q = sc->op_reply_q + i;
5634 			if (mtx_initialized(&op_reply_q->q_lock))
5635 				mtx_destroy(&op_reply_q->q_lock);
5636 		}
5637 	}
5638 
5639 	if (sc->op_req_q) {
5640 		for(i = 0; i < sc->num_queues; i++) {
5641 			op_req_q = sc->op_req_q + i;
5642 			if (mtx_initialized(&op_req_q->q_lock))
5643 				mtx_destroy(&op_req_q->q_lock);
5644 		}
5645 	}
5646 
5647 	if (mtx_initialized(&sc->init_cmds.completion.lock))
5648 		mtx_destroy(&sc->init_cmds.completion.lock);
5649 
5650 	if (mtx_initialized(&sc->cfg_cmds.completion.lock))
5651 		mtx_destroy(&sc->cfg_cmds.completion.lock);
5652 
5653 	if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
5654 		mtx_destroy(&sc->ioctl_cmds.completion.lock);
5655 
5656 	if (mtx_initialized(&sc->host_tm_cmds.completion.lock))
5657 		mtx_destroy(&sc->host_tm_cmds.completion.lock);
5658 
5659 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5660 		if (mtx_initialized(&sc->dev_rmhs_cmds[i].completion.lock))
5661 			mtx_destroy(&sc->dev_rmhs_cmds[i].completion.lock);
5662 	}
5663 
5664 	if (mtx_initialized(&sc->reset_mutex))
5665 		mtx_destroy(&sc->reset_mutex);
5666 
5667 	if (mtx_initialized(&sc->target_lock))
5668 		mtx_destroy(&sc->target_lock);
5669 
5670 	if (mtx_initialized(&sc->fwevt_lock))
5671 		mtx_destroy(&sc->fwevt_lock);
5672 
5673 	if (mtx_initialized(&sc->cmd_pool_lock))
5674 		mtx_destroy(&sc->cmd_pool_lock);
5675 
5676 	if (mtx_initialized(&sc->reply_free_q_lock))
5677 		mtx_destroy(&sc->reply_free_q_lock);
5678 
5679 	if (mtx_initialized(&sc->sense_buf_q_lock))
5680 		mtx_destroy(&sc->sense_buf_q_lock);
5681 
5682 	if (mtx_initialized(&sc->chain_buf_lock))
5683 		mtx_destroy(&sc->chain_buf_lock);
5684 
5685 	if (mtx_initialized(&sc->admin_req_lock))
5686 		mtx_destroy(&sc->admin_req_lock);
5687 
5688 	if (mtx_initialized(&sc->mpi3mr_mtx))
5689 		mtx_destroy(&sc->mpi3mr_mtx);
5690 }
5691 
5692 /**
5693  * mpi3mr_free_mem - Freeup adapter level data structures
5694  * @sc: Adapter reference
5695  *
5696  * Return: Nothing.
5697  */
5698 void
mpi3mr_free_mem(struct mpi3mr_softc * sc)5699 mpi3mr_free_mem(struct mpi3mr_softc *sc)
5700 {
5701 	int i;
5702 	struct mpi3mr_op_req_queue *op_req_q;
5703 	struct mpi3mr_op_reply_queue *op_reply_q;
5704 	struct mpi3mr_irq_context *irq_ctx;
5705 
5706 	if (sc->cmd_list) {
5707 		for (i = 0; i < sc->max_host_ios; i++) {
5708 			free(sc->cmd_list[i], M_MPI3MR);
5709 		}
5710 		free(sc->cmd_list, M_MPI3MR);
5711 		sc->cmd_list = NULL;
5712 	}
5713 
5714 	if (sc->pel_seq_number && sc->pel_seq_number_dma) {
5715 		bus_dmamap_unload(sc->pel_seq_num_dmatag, sc->pel_seq_num_dmamap);
5716 		bus_dmamem_free(sc->pel_seq_num_dmatag, sc->pel_seq_number, sc->pel_seq_num_dmamap);
5717 		sc->pel_seq_number = NULL;
5718 		if (sc->pel_seq_num_dmatag != NULL)
5719 			bus_dma_tag_destroy(sc->pel_seq_num_dmatag);
5720 	}
5721 
5722 	if (sc->throttle_groups) {
5723 		free(sc->throttle_groups, M_MPI3MR);
5724 		sc->throttle_groups = NULL;
5725 	}
5726 
5727 	/* Free up operational queues*/
5728 	if (sc->op_req_q) {
5729 		for (i = 0; i < sc->num_queues; i++) {
5730 			op_req_q = sc->op_req_q + i;
5731 			if (op_req_q->q_base && op_req_q->q_base_phys) {
5732 				bus_dmamap_unload(op_req_q->q_base_tag, op_req_q->q_base_dmamap);
5733 				bus_dmamem_free(op_req_q->q_base_tag, op_req_q->q_base, op_req_q->q_base_dmamap);
5734 				op_req_q->q_base = NULL;
5735 				if (op_req_q->q_base_tag != NULL)
5736 					bus_dma_tag_destroy(op_req_q->q_base_tag);
5737 			}
5738 		}
5739 		free(sc->op_req_q, M_MPI3MR);
5740 		sc->op_req_q = NULL;
5741 	}
5742 
5743 	if (sc->op_reply_q) {
5744 		for (i = 0; i < sc->num_queues; i++) {
5745 			op_reply_q = sc->op_reply_q + i;
5746 			if (op_reply_q->q_base && op_reply_q->q_base_phys) {
5747 				bus_dmamap_unload(op_reply_q->q_base_tag, op_reply_q->q_base_dmamap);
5748 				bus_dmamem_free(op_reply_q->q_base_tag, op_reply_q->q_base, op_reply_q->q_base_dmamap);
5749 				op_reply_q->q_base = NULL;
5750 				if (op_reply_q->q_base_tag != NULL)
5751 					bus_dma_tag_destroy(op_reply_q->q_base_tag);
5752 			}
5753 		}
5754 		free(sc->op_reply_q, M_MPI3MR);
5755 		sc->op_reply_q = NULL;
5756 	}
5757 
5758 	/* Free up chain buffers*/
5759 	if (sc->chain_sgl_list) {
5760 		for (i = 0; i < sc->chain_buf_count; i++) {
5761 			if (sc->chain_sgl_list[i].buf && sc->chain_sgl_list[i].buf_phys) {
5762 				bus_dmamap_unload(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf_dmamap);
5763 				bus_dmamem_free(sc->chain_sgl_list_tag, sc->chain_sgl_list[i].buf,
5764 						sc->chain_sgl_list[i].buf_dmamap);
5765 				sc->chain_sgl_list[i].buf = NULL;
5766 			}
5767 		}
5768 		if (sc->chain_sgl_list_tag != NULL)
5769 			bus_dma_tag_destroy(sc->chain_sgl_list_tag);
5770 		free(sc->chain_sgl_list, M_MPI3MR);
5771 		sc->chain_sgl_list = NULL;
5772 	}
5773 
5774 	if (sc->chain_bitmap) {
5775 		free(sc->chain_bitmap, M_MPI3MR);
5776 		sc->chain_bitmap = NULL;
5777 	}
5778 
5779 	for (i = 0; i < sc->msix_count; i++) {
5780 		irq_ctx = sc->irq_ctx + i;
5781 		if (irq_ctx)
5782 			irq_ctx->op_reply_q = NULL;
5783 	}
5784 
5785 	/* Free reply_buf_tag */
5786 	if (sc->reply_buf && sc->reply_buf_phys) {
5787 		bus_dmamap_unload(sc->reply_buf_tag, sc->reply_buf_dmamap);
5788 		bus_dmamem_free(sc->reply_buf_tag, sc->reply_buf,
5789 				sc->reply_buf_dmamap);
5790 		sc->reply_buf = NULL;
5791 		if (sc->reply_buf_tag != NULL)
5792 			bus_dma_tag_destroy(sc->reply_buf_tag);
5793 	}
5794 
5795 	/* Free reply_free_q_tag */
5796 	if (sc->reply_free_q && sc->reply_free_q_phys) {
5797 		bus_dmamap_unload(sc->reply_free_q_tag, sc->reply_free_q_dmamap);
5798 		bus_dmamem_free(sc->reply_free_q_tag, sc->reply_free_q,
5799 				sc->reply_free_q_dmamap);
5800 		sc->reply_free_q = NULL;
5801 		if (sc->reply_free_q_tag != NULL)
5802 			bus_dma_tag_destroy(sc->reply_free_q_tag);
5803 	}
5804 
5805 	/* Free sense_buf_tag */
5806 	if (sc->sense_buf && sc->sense_buf_phys) {
5807 		bus_dmamap_unload(sc->sense_buf_tag, sc->sense_buf_dmamap);
5808 		bus_dmamem_free(sc->sense_buf_tag, sc->sense_buf,
5809 				sc->sense_buf_dmamap);
5810 		sc->sense_buf = NULL;
5811 		if (sc->sense_buf_tag != NULL)
5812 			bus_dma_tag_destroy(sc->sense_buf_tag);
5813 	}
5814 
5815 	/* Free sense_buf_q_tag */
5816 	if (sc->sense_buf_q && sc->sense_buf_q_phys) {
5817 		bus_dmamap_unload(sc->sense_buf_q_tag, sc->sense_buf_q_dmamap);
5818 		bus_dmamem_free(sc->sense_buf_q_tag, sc->sense_buf_q,
5819 				sc->sense_buf_q_dmamap);
5820 		sc->sense_buf_q = NULL;
5821 		if (sc->sense_buf_q_tag != NULL)
5822 			bus_dma_tag_destroy(sc->sense_buf_q_tag);
5823 	}
5824 
5825 	/* Free up internal(non-IO) commands*/
5826 	if (sc->init_cmds.reply) {
5827 		free(sc->init_cmds.reply, M_MPI3MR);
5828 		sc->init_cmds.reply = NULL;
5829 	}
5830 
5831 	if (sc->cfg_cmds.reply) {
5832 		free(sc->cfg_cmds.reply, M_MPI3MR);
5833 		sc->cfg_cmds.reply = NULL;
5834 	}
5835 
5836 	if (sc->ioctl_cmds.reply) {
5837 		free(sc->ioctl_cmds.reply, M_MPI3MR);
5838 		sc->ioctl_cmds.reply = NULL;
5839 	}
5840 
5841 	if (sc->pel_cmds.reply) {
5842 		free(sc->pel_cmds.reply, M_MPI3MR);
5843 		sc->pel_cmds.reply = NULL;
5844 	}
5845 
5846 	if (sc->pel_abort_cmd.reply) {
5847 		free(sc->pel_abort_cmd.reply, M_MPI3MR);
5848 		sc->pel_abort_cmd.reply = NULL;
5849 	}
5850 
5851 	if (sc->host_tm_cmds.reply) {
5852 		free(sc->host_tm_cmds.reply, M_MPI3MR);
5853 		sc->host_tm_cmds.reply = NULL;
5854 	}
5855 
5856 	if (sc->log_data_buffer) {
5857 		free(sc->log_data_buffer, M_MPI3MR);
5858 		sc->log_data_buffer = NULL;
5859 	}
5860 
5861 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5862 		if (sc->dev_rmhs_cmds[i].reply) {
5863 			free(sc->dev_rmhs_cmds[i].reply, M_MPI3MR);
5864 			sc->dev_rmhs_cmds[i].reply = NULL;
5865 		}
5866 	}
5867 
5868 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5869 		if (sc->evtack_cmds[i].reply) {
5870 			free(sc->evtack_cmds[i].reply, M_MPI3MR);
5871 			sc->evtack_cmds[i].reply = NULL;
5872 		}
5873 	}
5874 
5875 	if (sc->removepend_bitmap) {
5876 		free(sc->removepend_bitmap, M_MPI3MR);
5877 		sc->removepend_bitmap = NULL;
5878 	}
5879 
5880 	if (sc->devrem_bitmap) {
5881 		free(sc->devrem_bitmap, M_MPI3MR);
5882 		sc->devrem_bitmap = NULL;
5883 	}
5884 
5885 	if (sc->evtack_cmds_bitmap) {
5886 		free(sc->evtack_cmds_bitmap, M_MPI3MR);
5887 		sc->evtack_cmds_bitmap = NULL;
5888 	}
5889 
5890 	/* Free Admin reply*/
5891 	if (sc->admin_reply && sc->admin_reply_phys) {
5892 		bus_dmamap_unload(sc->admin_reply_tag, sc->admin_reply_dmamap);
5893 		bus_dmamem_free(sc->admin_reply_tag, sc->admin_reply,
5894 				sc->admin_reply_dmamap);
5895 		sc->admin_reply = NULL;
5896 		if (sc->admin_reply_tag != NULL)
5897 			bus_dma_tag_destroy(sc->admin_reply_tag);
5898 	}
5899 
5900 	/* Free Admin request*/
5901 	if (sc->admin_req && sc->admin_req_phys) {
5902 		bus_dmamap_unload(sc->admin_req_tag, sc->admin_req_dmamap);
5903 		bus_dmamem_free(sc->admin_req_tag, sc->admin_req,
5904 				sc->admin_req_dmamap);
5905 		sc->admin_req = NULL;
5906 		if (sc->admin_req_tag != NULL)
5907 			bus_dma_tag_destroy(sc->admin_req_tag);
5908 	}
5909 	mpi3mr_free_ioctl_dma_memory(sc);
5910 
5911 }
5912 
5913 /**
5914  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5915  * @sc: Adapter instance reference
5916  * @cmdptr: Internal command tracker
5917  *
5918  * Complete an internal driver commands with state indicating it
5919  * is completed due to reset.
5920  *
5921  * Return: Nothing.
5922  */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc * sc,struct mpi3mr_drvr_cmd * cmdptr)5923 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_softc *sc,
5924 	struct mpi3mr_drvr_cmd *cmdptr)
5925 {
5926 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5927 		cmdptr->state |= MPI3MR_CMD_RESET;
5928 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5929 		if (cmdptr->is_waiting) {
5930 			complete(&cmdptr->completion);
5931 			cmdptr->is_waiting = 0;
5932 		} else if (cmdptr->callback)
5933 			cmdptr->callback(sc, cmdptr);
5934 	}
5935 }
5936 
5937 /**
5938  * mpi3mr_flush_drv_cmds - Flush internal driver commands
5939  * @sc: Adapter instance reference
5940  *
5941  * Flush all internal driver commands post reset
5942  *
5943  * Return: Nothing.
5944  */
mpi3mr_flush_drv_cmds(struct mpi3mr_softc * sc)5945 static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
5946 {
5947 	int i = 0;
5948 	struct mpi3mr_drvr_cmd *cmdptr;
5949 
5950 	cmdptr = &sc->init_cmds;
5951 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5952 
5953 	cmdptr = &sc->cfg_cmds;
5954 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5955 
5956 	cmdptr = &sc->ioctl_cmds;
5957 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5958 
5959 	cmdptr = &sc->host_tm_cmds;
5960 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5961 
5962 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5963 		cmdptr = &sc->dev_rmhs_cmds[i];
5964 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5965 	}
5966 
5967 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5968 		cmdptr = &sc->evtack_cmds[i];
5969 		mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5970 	}
5971 
5972 	cmdptr = &sc->pel_cmds;
5973 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5974 
5975 	cmdptr = &sc->pel_abort_cmd;
5976 	mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
5977 }
5978 
5979 
5980 /**
5981  * mpi3mr_memset_buffers - memset memory for a controller
5982  * @sc: Adapter instance reference
5983  *
5984  * clear all the memory allocated for a controller, typically
5985  * called post reset to reuse the memory allocated during the
5986  * controller init.
5987  *
5988  * Return: Nothing.
5989  */
mpi3mr_memset_buffers(struct mpi3mr_softc * sc)5990 static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
5991 {
5992 	U16 i;
5993 	struct mpi3mr_throttle_group_info *tg;
5994 
5995 	memset(sc->admin_req, 0, sc->admin_req_q_sz);
5996 	memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
5997 
5998 	memset(sc->init_cmds.reply, 0, sc->reply_sz);
5999 	memset(sc->cfg_cmds.reply, 0, sc->reply_sz);
6000 	memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
6001 	memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
6002 	memset(sc->pel_cmds.reply, 0, sc->reply_sz);
6003 	memset(sc->pel_abort_cmd.reply, 0, sc->reply_sz);
6004 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
6005 		memset(sc->dev_rmhs_cmds[i].reply, 0, sc->reply_sz);
6006 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
6007 		memset(sc->evtack_cmds[i].reply, 0, sc->reply_sz);
6008 	memset(sc->removepend_bitmap, 0, sc->dev_handle_bitmap_sz);
6009 	memset(sc->devrem_bitmap, 0, sc->devrem_bitmap_sz);
6010 	memset(sc->evtack_cmds_bitmap, 0, sc->evtack_cmds_bitmap_sz);
6011 
6012 	for (i = 0; i < sc->num_queues; i++) {
6013 		sc->op_reply_q[i].qid = 0;
6014 		sc->op_reply_q[i].ci = 0;
6015 		sc->op_reply_q[i].num_replies = 0;
6016 		sc->op_reply_q[i].ephase = 0;
6017 		mpi3mr_atomic_set(&sc->op_reply_q[i].pend_ios, 0);
6018 		memset(sc->op_reply_q[i].q_base, 0, sc->op_reply_q[i].qsz);
6019 
6020 		sc->op_req_q[i].ci = 0;
6021 		sc->op_req_q[i].pi = 0;
6022 		sc->op_req_q[i].num_reqs = 0;
6023 		sc->op_req_q[i].qid = 0;
6024 		sc->op_req_q[i].reply_qid = 0;
6025 		memset(sc->op_req_q[i].q_base, 0, sc->op_req_q[i].qsz);
6026 	}
6027 
6028 	mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
6029 	if (sc->throttle_groups) {
6030 		tg = sc->throttle_groups;
6031 		for (i = 0; i < sc->num_io_throttle_group; i++, tg++) {
6032 			tg->id = 0;
6033 			tg->fw_qd = 0;
6034 			tg->modified_qd = 0;
6035 			tg->io_divert= 0;
6036 			tg->high = 0;
6037 			tg->low = 0;
6038 			mpi3mr_atomic_set(&tg->pend_large_data_sz, 0);
6039 		}
6040  	}
6041 }
6042 
6043 /**
6044  * mpi3mr_invalidate_devhandles -Invalidate device handles
6045  * @sc: Adapter instance reference
6046  *
6047  * Invalidate the device handles in the target device structures
6048  * . Called post reset prior to reinitializing the controller.
6049  *
6050  * Return: Nothing.
6051  */
mpi3mr_invalidate_devhandles(struct mpi3mr_softc * sc)6052 static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
6053 {
6054 	struct mpi3mr_target *target = NULL;
6055 
6056 	mtx_lock_spin(&sc->target_lock);
6057 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
6058 		if (target) {
6059 			target->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
6060 			target->io_throttle_enabled = 0;
6061 			target->io_divert = 0;
6062 			target->throttle_group = NULL;
6063 			target->ws_len = 0;
6064 		}
6065 	}
6066 	mtx_unlock_spin(&sc->target_lock);
6067 }
6068 
6069 /**
6070  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
6071  * @sc: Adapter instance reference
6072  *
6073  * This is executed post controller reset to identify any
6074  * missing devices during reset and remove from the upper layers
6075  * or expose any newly detected device to the upper layers.
6076  *
6077  * Return: Nothing.
6078  */
6079 
mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc * sc)6080 static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
6081 {
6082 	struct mpi3mr_target *target = NULL;
6083 	struct mpi3mr_target *target_temp = NULL;
6084 
6085 	TAILQ_FOREACH_SAFE(target, &sc->cam_sc->tgt_list, tgt_next, target_temp) {
6086 		if (target->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
6087 			if (target->exposed_to_os)
6088 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
6089 			mpi3mr_remove_device_from_list(sc, target, true);
6090 		} else if (target->is_hidden && target->exposed_to_os) {
6091 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
6092 		}
6093 	}
6094 
6095 	TAILQ_FOREACH(target, &sc->cam_sc->tgt_list, tgt_next) {
6096 		if ((target->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
6097 		    !target->is_hidden && !target->exposed_to_os) {
6098 			mpi3mr_add_device(sc, target->per_id);
6099 		}
6100 	}
6101 
6102 }
6103 
mpi3mr_flush_io(struct mpi3mr_softc * sc)6104 static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
6105 {
6106 	int i;
6107 	struct mpi3mr_cmd *cmd = NULL;
6108 	union ccb *ccb = NULL;
6109 
6110 	for (i = 0; i < sc->max_host_ios; i++) {
6111 		cmd = sc->cmd_list[i];
6112 
6113 		if (cmd && cmd->ccb) {
6114 			if (cmd->callout_owner) {
6115 				ccb = (union ccb *)(cmd->ccb);
6116 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
6117 				mpi3mr_atomic_dec(&sc->fw_outstanding);
6118 				mpi3mr_atomic_dec(&cmd->targ->outstanding);
6119 				mpi3mr_cmd_done(sc, cmd);
6120 			} else {
6121 				cmd->ccb = NULL;
6122 				mpi3mr_release_command(cmd);
6123 			}
6124 		}
6125 	}
6126 }
6127 
6128 /**
6129  * mpi3mr_set_diagsave - Set diag save bit for snapdump
6130  * @sc: Adapter reference
6131  *
6132  * Set diag save bit in IOC configuration register to enable
6133  * snapdump.
6134  *
6135  * Return: Nothing.
6136  */
mpi3mr_set_diagsave(struct mpi3mr_softc * sc)6137 static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
6138 {
6139 	U32 ioc_config;
6140 
6141 	ioc_config =
6142 	    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6143 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
6144 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6145 }
6146 
6147 /**
6148  * mpi3mr_issue_reset - Issue reset to the controller
6149  * @sc: Adapter reference
6150  * @reset_type: Reset type
6151  * @reset_reason: Reset reason code
6152  *
6153  * Unlock the host diagnostic registers and write the specific
6154  * reset type to that, wait for reset acknowledgement from the
6155  * controller, if the reset is not successful retry for the
6156  * predefined number of times.
6157  *
6158  * Return: 0 on success, non-zero on failure.
6159  */
mpi3mr_issue_reset(struct mpi3mr_softc * sc,U16 reset_type,U16 reset_reason)6160 static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
6161 	U16 reset_reason)
6162 {
6163 	int retval = -1;
6164 	U8 unlock_retry_count = 0;
6165 	U32 host_diagnostic = 0, ioc_status, ioc_config, scratch_pad0;
6166 	U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
6167 
6168 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
6169 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
6170 		return retval;
6171 	if (sc->unrecoverable)
6172 		return retval;
6173 
6174 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
6175 		retval = 0;
6176 		return retval;
6177 	}
6178 
6179 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s reset due to %s(0x%x)\n",
6180 	    mpi3mr_reset_type_name(reset_type),
6181 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
6182 
6183 	mpi3mr_clear_reset_history(sc);
6184 	do {
6185 		mpi3mr_dprint(sc, MPI3MR_INFO,
6186 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
6187 		    ++unlock_retry_count);
6188 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
6189 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6190 			    "%s reset failed! due to host diag register unlock failure"
6191 			    "host_diagnostic(0x%08x)\n", mpi3mr_reset_type_name(reset_type),
6192 			    host_diagnostic);
6193 			sc->unrecoverable = 1;
6194 			return retval;
6195 		}
6196 
6197 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6198 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH);
6199 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6200 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST);
6201 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6202 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
6203 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6204 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD);
6205 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6206 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH);
6207 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6208 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH);
6209 		mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6210 			MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH);
6211 
6212 		DELAY(1000); /* delay in usec */
6213 		host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
6214 		mpi3mr_dprint(sc, MPI3MR_INFO,
6215 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
6216 		    unlock_retry_count, host_diagnostic);
6217 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
6218 
6219 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)
6220 		mpi3mr_set_diagsave(sc);
6221 
6222 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_FREEBSD <<
6223 			MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
6224 			(sc->facts.ioc_num <<
6225 			MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
6226 	mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, scratch_pad0);
6227 	mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
6228 
6229 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
6230 		do {
6231 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6232 			if (ioc_status &
6233 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
6234 				ioc_config =
6235 				    mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6236 				if (mpi3mr_soft_reset_success(ioc_status,
6237 				    ioc_config)) {
6238 					mpi3mr_clear_reset_history(sc);
6239 					retval = 0;
6240 					break;
6241 				}
6242 			}
6243 			DELAY(100 * 1000);
6244 		} while (--timeout);
6245 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
6246 		do {
6247 			ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6248 			if (mpi3mr_diagfault_success(sc, ioc_status)) {
6249 				retval = 0;
6250 				break;
6251 			}
6252 			DELAY(100 * 1000);
6253 		} while (--timeout);
6254 	}
6255 
6256 	mpi3mr_regwrite(sc, MPI3_SYSIF_WRITE_SEQUENCE_OFFSET,
6257 		MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND);
6258 
6259 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6260 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6261 
6262 	mpi3mr_dprint(sc, MPI3MR_INFO,
6263 	    "IOC Status/Config after %s reset is (0x%x)/(0x%x)\n",
6264 	    !retval ? "successful":"failed", ioc_status,
6265 	    ioc_config);
6266 
6267 	if (retval)
6268 		sc->unrecoverable = 1;
6269 
6270 	return retval;
6271 }
6272 
mpi3mr_cleanup_event_taskq(struct mpi3mr_softc * sc)6273 inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
6274 {
6275 	/*
6276 	 * Block the taskqueue before draining.  This means any new tasks won't
6277 	 * be queued to the taskqueue worker thread.  But it doesn't stop the
6278 	 * current workers that are running.  taskqueue_drain waits for those
6279 	 * correctly in the case of thread backed taskqueues.  The while loop
6280 	 * ensures that all taskqueue threads have finished their current tasks.
6281 	 */
6282 	taskqueue_block(sc->cam_sc->ev_tq);
6283 	while (taskqueue_cancel(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task, NULL) != 0) {
6284 		taskqueue_drain(sc->cam_sc->ev_tq, &sc->cam_sc->ev_task);
6285 	}
6286 }
6287 
6288 /**
6289  * mpi3mr_soft_reset_handler - Reset the controller
6290  * @sc: Adapter instance reference
6291  * @reset_reason: Reset reason code
6292  * @snapdump: snapdump enable/disbale bit
6293  *
6294  * This is an handler for recovering controller by issuing soft
6295  * reset or diag fault reset. This is a blocking function and
6296  * when one reset is executed if any other resets they will be
6297  * blocked. All IOCTLs/IO will be blocked during the reset. If
6298  * controller reset is successful then the controller will be
6299  * reinitalized, otherwise the controller will be marked as not
6300  * recoverable
6301  *
6302  * Return: 0 on success, non-zero on failure.
6303  */
mpi3mr_soft_reset_handler(struct mpi3mr_softc * sc,U16 reset_reason,bool snapdump)6304 int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
6305 	U16 reset_reason, bool snapdump)
6306 {
6307 	int retval = 0, i = 0;
6308 	enum mpi3mr_iocstate ioc_state;
6309 
6310 	mpi3mr_dprint(sc, MPI3MR_INFO, "soft reset invoked: reason code: %s\n",
6311 	    mpi3mr_reset_rc_name(reset_reason));
6312 
6313 	if ((reset_reason == MPI3MR_RESET_FROM_IOCTL) &&
6314 	     (sc->reset.ioctl_reset_snapdump != true))
6315 		snapdump = false;
6316 
6317 	mpi3mr_dprint(sc, MPI3MR_INFO,
6318 	    "soft_reset_handler: wait if diag save is in progress\n");
6319 	while (sc->diagsave_timeout)
6320 		DELAY(1000 * 1000);
6321 
6322 	ioc_state = mpi3mr_get_iocstate(sc);
6323 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE) {
6324 		mpi3mr_dprint(sc, MPI3MR_ERROR, "controller is in unrecoverable state, exit\n");
6325 		sc->reset.type = MPI3MR_NO_RESET;
6326 		sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
6327 		sc->reset.status = -1;
6328 		sc->reset.ioctl_reset_snapdump = false;
6329 		return -1;
6330 	}
6331 
6332 	if (sc->reset_in_progress) {
6333 		mpi3mr_dprint(sc, MPI3MR_INFO, "reset is already in progress, exit\n");
6334 		return -1;
6335 	}
6336 
6337 	/* Pause IOs, drain and block the event taskqueue */
6338 	xpt_freeze_simq(sc->cam_sc->sim, 1);
6339 
6340 	mpi3mr_cleanup_event_taskq(sc);
6341 
6342 	sc->reset_in_progress = 1;
6343 	sc->block_ioctls = 1;
6344 
6345 	if (sc->timestamp_thread_active)
6346 		wakeup(&sc->timestamp_chan);
6347 
6348 	while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
6349 		ioc_state = mpi3mr_get_iocstate(sc);
6350 		if (ioc_state == MRIOC_STATE_FAULT)
6351 			break;
6352 		i++;
6353 		if (!(i % 5)) {
6354 			mpi3mr_dprint(sc, MPI3MR_INFO,
6355 			    "[%2ds]waiting for IOCTL to be finished from %s\n", i, __func__);
6356 		}
6357 		DELAY(1000 * 1000);
6358 	}
6359 
6360 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
6361 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
6362 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
6363 
6364 		mpi3mr_dprint(sc, MPI3MR_INFO, "Turn off events prior to reset\n");
6365 
6366 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6367 			sc->event_masks[i] = -1;
6368 		mpi3mr_issue_event_notification(sc);
6369 	}
6370 
6371 	mpi3mr_disable_interrupts(sc);
6372 
6373 	if (snapdump)
6374 		mpi3mr_trigger_snapdump(sc, reset_reason);
6375 
6376 	retval = mpi3mr_issue_reset(sc,
6377 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
6378 	if (retval) {
6379 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to issue soft reset to the ioc\n");
6380 		goto out;
6381 	}
6382 
6383 	mpi3mr_flush_drv_cmds(sc);
6384 	mpi3mr_flush_io(sc);
6385 	mpi3mr_invalidate_devhandles(sc);
6386 	mpi3mr_memset_buffers(sc);
6387 
6388 	if (sc->prepare_for_reset) {
6389 		sc->prepare_for_reset = 0;
6390 		sc->prepare_for_reset_timeout_counter = 0;
6391 	}
6392 
6393 	retval = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_RESET);
6394 	if (retval) {
6395 		mpi3mr_dprint(sc, MPI3MR_ERROR, "reinit after soft reset failed: reason %d\n",
6396 		    reset_reason);
6397 		goto out;
6398 	}
6399 
6400 	DELAY((1000 * 1000) * 10);
6401 out:
6402 	if (!retval) {
6403 		sc->diagsave_timeout = 0;
6404 		sc->reset_in_progress = 0;
6405 		mpi3mr_rfresh_tgtdevs(sc);
6406 		sc->ts_update_counter = 0;
6407 		sc->block_ioctls = 0;
6408 		sc->pel_abort_requested = 0;
6409 		if (sc->pel_wait_pend) {
6410 			sc->pel_cmds.retry_count = 0;
6411 			mpi3mr_issue_pel_wait(sc, &sc->pel_cmds);
6412 			mpi3mr_app_send_aen(sc);
6413 		}
6414 	} else {
6415 		ioc_state = mpi3mr_get_iocstate(sc);
6416 		if (ioc_state != MRIOC_STATE_FAULT)
6417 			mpi3mr_issue_reset(sc,
6418 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
6419 
6420 		sc->unrecoverable = 1;
6421 		sc->reset_in_progress = 0;
6422 		sc->block_ioctls = 0;
6423 	}
6424 
6425 	mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
6426 
6427 	taskqueue_unblock(sc->cam_sc->ev_tq);
6428 	xpt_release_simq(sc->cam_sc->sim, 1);
6429 
6430 	sc->reset.type = MPI3MR_NO_RESET;
6431 	sc->reset.reason = MPI3MR_DEFAULT_RESET_REASON;
6432 	sc->reset.status = retval;
6433 	sc->reset.ioctl_reset_snapdump = false;
6434 
6435 	return retval;
6436 }
6437 
6438 /**
6439  * mpi3mr_issue_ioc_shutdown - shutdown controller
6440  * @sc: Adapter instance reference
6441  *
6442  * Send shutodwn notification to the controller and wait for the
6443  * shutdown_timeout for it to be completed.
6444  *
6445  * Return: Nothing.
6446  */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc * sc)6447 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_softc *sc)
6448 {
6449 	U32 ioc_config, ioc_status;
6450 	U8 retval = 1, retry = 0;
6451 	U32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
6452 
6453 	mpi3mr_dprint(sc, MPI3MR_INFO, "sending shutdown notification\n");
6454 	if (sc->unrecoverable) {
6455 		mpi3mr_dprint(sc, MPI3MR_ERROR,
6456 		    "controller is unrecoverable, shutdown not issued\n");
6457 		return;
6458 	}
6459 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6460 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6461 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
6462 		mpi3mr_dprint(sc, MPI3MR_ERROR, "shutdown already in progress\n");
6463 		return;
6464 	}
6465 
6466 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6467 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6468 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6469 
6470 	mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6471 
6472 	if (sc->facts.shutdown_timeout)
6473 		timeout = sc->facts.shutdown_timeout * 10;
6474 
6475 	do {
6476 		ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6477 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6478 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
6479 			retval = 0;
6480 			break;
6481 		}
6482 
6483 		if (sc->unrecoverable)
6484 			break;
6485 
6486 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
6487 			mpi3mr_print_fault_info(sc);
6488 
6489 			if (retry >= MPI3MR_MAX_SHUTDOWN_RETRY_COUNT)
6490 				break;
6491 
6492 			if (mpi3mr_issue_reset(sc,
6493 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6494 			    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6495 				break;
6496 
6497 			ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6498 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
6499 			ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
6500 
6501 			mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
6502 
6503 			if (sc->facts.shutdown_timeout)
6504 				timeout = sc->facts.shutdown_timeout * 10;
6505 
6506 			retry++;
6507 		}
6508 
6509                 DELAY(100 * 1000);
6510 
6511 	} while (--timeout);
6512 
6513 	ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
6514 	ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
6515 
6516 	if (retval) {
6517 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
6518 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
6519 			mpi3mr_dprint(sc, MPI3MR_ERROR,
6520 			    "shutdown still in progress after timeout\n");
6521 	}
6522 
6523 	mpi3mr_dprint(sc, MPI3MR_INFO,
6524 	    "ioc_status/ioc_config after %s shutdown is (0x%x)/(0x%x)\n",
6525 	    (!retval)?"successful":"failed", ioc_status,
6526 	    ioc_config);
6527 }
6528 
6529 /**
6530  * mpi3mr_cleanup_ioc - Cleanup controller
6531  * @sc: Adapter instance reference
6532 
6533  * controller cleanup handler, Message unit reset or soft reset
6534  * and shutdown notification is issued to the controller.
6535  *
6536  * Return: Nothing.
6537  */
mpi3mr_cleanup_ioc(struct mpi3mr_softc * sc)6538 void mpi3mr_cleanup_ioc(struct mpi3mr_softc *sc)
6539 {
6540 	enum mpi3mr_iocstate ioc_state;
6541 
6542 	mpi3mr_dprint(sc, MPI3MR_INFO, "cleaning up the controller\n");
6543 	mpi3mr_disable_interrupts(sc);
6544 
6545 	ioc_state = mpi3mr_get_iocstate(sc);
6546 
6547 	if ((!sc->unrecoverable) && (!sc->reset_in_progress) &&
6548 	    (ioc_state == MRIOC_STATE_READY)) {
6549 		if (mpi3mr_mur_ioc(sc,
6550 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
6551 			mpi3mr_issue_reset(sc,
6552 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
6553 			    MPI3MR_RESET_FROM_MUR_FAILURE);
6554 		mpi3mr_issue_ioc_shutdown(sc);
6555 	}
6556 
6557 	mpi3mr_dprint(sc, MPI3MR_INFO, "controller cleanup completed\n");
6558 }
6559