xref: /freebsd/sys/dev/mpi3mr/mpi3mr_cam.c (revision 3208a189c1e2c4ef35daa432fe45629a043d7047)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/selinfo.h>
49 #include <sys/module.h>
50 #include <sys/bus.h>
51 #include <sys/conf.h>
52 #include <sys/bio.h>
53 #include <sys/malloc.h>
54 #include <sys/uio.h>
55 #include <sys/sysctl.h>
56 #include <sys/endian.h>
57 #include <sys/queue.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/sbuf.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <sys/rman.h>
65 
66 #include <machine/stdarg.h>
67 
68 #include <cam/cam.h>
69 #include <cam/cam_ccb.h>
70 #include <cam/cam_debug.h>
71 #include <cam/cam_sim.h>
72 #include <cam/cam_xpt_sim.h>
73 #include <cam/cam_xpt_periph.h>
74 #include <cam/cam_periph.h>
75 #include <cam/scsi/scsi_all.h>
76 #include <cam/scsi/scsi_message.h>
77 #include <cam/scsi/smp_all.h>
78 
79 #include <dev/nvme/nvme.h>
80 #include "mpi/mpi30_api.h"
81 #include "mpi3mr_cam.h"
82 #include "mpi3mr.h"
83 #include <sys/time.h>			/* XXX for pcpu.h */
84 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
85 
86 #define	smp_processor_id()  PCPU_GET(cpuid)
87 
88 static void
89 mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
90 static void
91 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
92 void
93 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
94 static void
95 mpi3mr_freeup_events(struct mpi3mr_softc *sc);
96 
97 extern int
98 mpi3mr_register_events(struct mpi3mr_softc *sc);
99 extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
100     bus_addr_t dma_addr);
101 extern void mpi3mr_build_zero_len_sge(void *paddr);
102 
103 static U32 event_count;
104 
105 static void mpi3mr_prepare_sgls(void *arg,
106 	bus_dma_segment_t *segs, int nsegs, int error)
107 {
108 	struct mpi3mr_softc *sc;
109 	struct mpi3mr_cmd *cm;
110 	u_int i;
111 	bus_addr_t chain_dma;
112 	void *chain;
113 	U8 *sg_local;
114 	U32 chain_length;
115 	int sges_left;
116 	U32 sges_in_segment;
117 	U8 simple_sgl_flags;
118 	U8 simple_sgl_flags_last;
119 	U8 last_chain_sgl_flags;
120 	struct mpi3mr_chain *chain_req;
121 	Mpi3SCSIIORequest_t *scsiio_req;
122 	union ccb *ccb;
123 
124 	cm = (struct mpi3mr_cmd *)arg;
125 	sc = cm->sc;
126 	scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request;
127 	ccb = cm->ccb;
128 
129 	if (error) {
130 		device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error);
131 		if (error == EFBIG) {
132 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
133 		} else {
134 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
135 		}
136 		mpi3mr_release_command(cm);
137 		xpt_done(ccb);
138 		return;
139 	}
140 
141 	if (cm->data_dir == MPI3MR_READ)
142 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
143 		    BUS_DMASYNC_PREREAD);
144 	if (cm->data_dir == MPI3MR_WRITE)
145 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
146 		    BUS_DMASYNC_PREWRITE);
147 
148 	KASSERT(nsegs <= MPI3MR_SG_DEPTH && nsegs > 0,
149 	    ("%s: bad SGE count: %d\n", device_get_nameunit(sc->mpi3mr_dev), nsegs));
150 
151 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
152 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
153 	simple_sgl_flags_last = simple_sgl_flags |
154 	    MPI3_SGE_FLAGS_END_OF_LIST;
155 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
156 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
157 
158 	sg_local = (U8 *)&scsiio_req->SGL;
159 
160 	if (scsiio_req->DataLength == 0) {
161 		/* XXX we don't ever get here when DataLength == 0, right? cm->data is NULL */
162 		/* This whole if can likely be removed -- we handle it in mpi3mr_request_map */
163 		mpi3mr_build_zero_len_sge(sg_local);
164 		goto enqueue;
165 	}
166 
167 	sges_left = nsegs;
168 
169 	sges_in_segment = (sc->facts.op_req_sz -
170 	    offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t);
171 
172 	i = 0;
173 
174 	mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n",
175 		nsegs, scsiio_req->DataLength);
176 
177 	if (sges_left <= sges_in_segment)
178 		goto fill_in_last_segment;
179 
180 	/* fill in main message segment when there is a chain following */
181 	while (sges_in_segment > 1) {
182 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
183 		    segs[i].ds_len, segs[i].ds_addr);
184 		sg_local += sizeof(Mpi3SGESimple_t);
185 		sges_left--;
186 		sges_in_segment--;
187 		i++;
188 	}
189 
190 	chain_req = &sc->chain_sgl_list[cm->hosttag];
191 
192 	chain = chain_req->buf;
193 	chain_dma = chain_req->buf_phys;
194 	memset(chain_req->buf, 0, PAGE_SIZE);
195 	sges_in_segment = sges_left;
196 	chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
197 
198 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
199 	    chain_length, chain_dma);
200 
201 	sg_local = chain;
202 
203 fill_in_last_segment:
204 	while (sges_left > 0) {
205 		if (sges_left == 1)
206 			mpi3mr_add_sg_single(sg_local,
207 			    simple_sgl_flags_last, segs[i].ds_len,
208 			    segs[i].ds_addr);
209 		else
210 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
211 			    segs[i].ds_len, segs[i].ds_addr);
212 		sg_local += sizeof(Mpi3SGESimple_t);
213 		sges_left--;
214 		i++;
215 	}
216 
217 enqueue:
218 	/*
219 	 * Now that we've created the sgls, we send the request to the device.
220 	 * Unlike in Linux, dmaload isn't guaranteed to load every time, but
221 	 * this function is always called when the resources are available, so
222 	 * we can send the request to hardware here always. mpi3mr_map_request
223 	 * knows about this quirk and will only take evasive action when an
224 	 * error other than EINPROGRESS is returned from dmaload.
225 	 */
226 	mpi3mr_enqueue_request(sc, cm);
227 
228 	return;
229 }
230 
231 static void
232 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
233 {
234 	u_int32_t retcode = 0;
235 	union ccb *ccb;
236 
237 	ccb = cm->ccb;
238 	if (cm->data != NULL) {
239 		mtx_lock(&sc->io_lock);
240 		/* Map data buffer into bus space */
241 		retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap,
242 		    ccb, mpi3mr_prepare_sgls, cm, 0);
243 		mtx_unlock(&sc->io_lock);
244 		if (retcode != 0 && retcode != EINPROGRESS) {
245 			device_printf(sc->mpi3mr_dev,
246 			    "bus_dmamap_load(): retcode = %d\n", retcode);
247 			/*
248 			 * Any other error means prepare_sgls wasn't called, and
249 			 * will never be called, so we have to mop up. This error
250 			 * should never happen, though.
251 			 */
252 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
253 			mpi3mr_release_command(cm);
254 			xpt_done(ccb);
255 		}
256 	} else {
257 		/*
258 		 * No data, we enqueue it directly here.
259 		 */
260 		mpi3mr_enqueue_request(sc, cm);
261 	}
262 }
263 
264 void
265 mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
266 {
267 	if (cmd->data != NULL) {
268 		if (cmd->data_dir == MPI3MR_READ)
269 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD);
270 		if (cmd->data_dir == MPI3MR_WRITE)
271 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE);
272 		mtx_lock(&sc->io_lock);
273 		bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap);
274 		mtx_unlock(&sc->io_lock);
275 	}
276 }
277 
278 /**
279  * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw
280  * @sc: Adapter instance reference
281  * @ccb: SCSI Command reference
282  *
283  * The controller hardware cannot handle certain unmap commands
284  * for NVMe drives, this routine checks those and return true
285  * and completes the SCSI command with proper status and sense
286  * data.
287  *
288  * Return: TRUE for allowed unmap, FALSE otherwise.
289  */
290 static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc,
291 	union ccb *ccb)
292 {
293 	struct ccb_scsiio *csio;
294 	uint16_t param_list_len, block_desc_len, trunc_param_len = 0;
295 
296 	csio = &ccb->csio;
297 	param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]);
298 
299 	switch(pci_get_revid(sc->mpi3mr_dev)) {
300 	case SAS4116_CHIP_REV_A0:
301 		if (!param_list_len) {
302 			mpi3mr_dprint(sc, MPI3MR_ERROR,
303 			    "%s: CDB received with zero parameter length\n",
304 			    __func__);
305 			mpi3mr_print_cdb(ccb);
306 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
307 			xpt_done(ccb);
308 			return false;
309 		}
310 
311 		if (param_list_len < 24) {
312 			mpi3mr_dprint(sc, MPI3MR_ERROR,
313 			    "%s: CDB received with invalid param_list_len: %d\n",
314 			    __func__, param_list_len);
315 			mpi3mr_print_cdb(ccb);
316 			scsi_set_sense_data(&ccb->csio.sense_data,
317 				/*sense_format*/ SSD_TYPE_FIXED,
318 				/*current_error*/ 1,
319 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
320 				/*asc*/ 0x1A,
321 				/*ascq*/ 0x00,
322 				/*extra args*/ SSD_ELEM_NONE);
323 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
324 			ccb->ccb_h.status =
325 			    CAM_SCSI_STATUS_ERROR |
326 			    CAM_AUTOSNS_VALID;
327 			return false;
328 		}
329 
330 		if (param_list_len != csio->dxfer_len) {
331 			mpi3mr_dprint(sc, MPI3MR_ERROR,
332 			    "%s: CDB received with param_list_len: %d bufflen: %d\n",
333 			    __func__, param_list_len, csio->dxfer_len);
334 			mpi3mr_print_cdb(ccb);
335 			scsi_set_sense_data(&ccb->csio.sense_data,
336 				/*sense_format*/ SSD_TYPE_FIXED,
337 				/*current_error*/ 1,
338 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
339 				/*asc*/ 0x1A,
340 				/*ascq*/ 0x00,
341 				/*extra args*/ SSD_ELEM_NONE);
342 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
343 			ccb->ccb_h.status =
344 			    CAM_SCSI_STATUS_ERROR |
345 			    CAM_AUTOSNS_VALID;
346 			xpt_done(ccb);
347 			return false;
348 		}
349 
350 		block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]);
351 
352 		if (block_desc_len < 16) {
353 			mpi3mr_dprint(sc, MPI3MR_ERROR,
354 			    "%s: Invalid descriptor length in param list: %d\n",
355 			    __func__, block_desc_len);
356 			mpi3mr_print_cdb(ccb);
357 			scsi_set_sense_data(&ccb->csio.sense_data,
358 				/*sense_format*/ SSD_TYPE_FIXED,
359 				/*current_error*/ 1,
360 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
361 				/*asc*/ 0x26,
362 				/*ascq*/ 0x00,
363 				/*extra args*/ SSD_ELEM_NONE);
364 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
365 			ccb->ccb_h.status =
366 			    CAM_SCSI_STATUS_ERROR |
367 			    CAM_AUTOSNS_VALID;
368 			xpt_done(ccb);
369 			return false;
370 		}
371 
372 		if (param_list_len > (block_desc_len + 8)) {
373 			mpi3mr_print_cdb(ccb);
374 			mpi3mr_dprint(sc, MPI3MR_INFO,
375 			    "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n",
376 			    __func__, param_list_len, (block_desc_len + 8));
377 			param_list_len = block_desc_len + 8;
378 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
379 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
380 			mpi3mr_print_cdb(ccb);
381 		}
382 		break;
383 
384 	case SAS4116_CHIP_REV_B0:
385 		if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) {
386 			trunc_param_len -= (param_list_len - 8) & 0xF;
387 			mpi3mr_print_cdb(ccb);
388 			mpi3mr_dprint(sc, MPI3MR_INFO,
389 			    "%s: Truncating param_list_len from (%d) to (%d)\n",
390 			    __func__, param_list_len, trunc_param_len);
391 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
392 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
393 			mpi3mr_print_cdb(ccb);
394 		}
395 		break;
396 	}
397 
398 	return true;
399 }
400 
401 /**
402  * mpi3mr_tm_response_name -  get TM response as a string
403  * @resp_code: TM response code
404  *
405  * Convert known task management response code as a readable
406  * string.
407  *
408  * Return: response code string.
409  */
410 static const char* mpi3mr_tm_response_name(U8 resp_code)
411 {
412 	char *desc;
413 
414 	switch (resp_code) {
415 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
416 		desc = "task management request completed";
417 		break;
418 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
419 		desc = "invalid frame";
420 		break;
421 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
422 		desc = "task management request not supported";
423 		break;
424 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
425 		desc = "task management request failed";
426 		break;
427 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
428 		desc = "task management request succeeded";
429 		break;
430 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
431 		desc = "invalid LUN";
432 		break;
433 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
434 		desc = "overlapped tag attempted";
435 		break;
436 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
437 		desc = "task queued, however not sent to target";
438 		break;
439 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
440 		desc = "task management request denied by NVMe device";
441 		break;
442 	default:
443 		desc = "unknown";
444 		break;
445 	}
446 
447 	return desc;
448 }
449 
450 void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
451 {
452 	int i;
453 	int num_of_reply_queues = sc->num_queues;
454 	struct mpi3mr_irq_context *irq_ctx;
455 
456 	for (i = 0; i < num_of_reply_queues; i++) {
457 		irq_ctx = &sc->irq_ctx[i];
458 		mpi3mr_complete_io_cmd(sc, irq_ctx);
459 	}
460 }
461 
462 void
463 trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason)
464 {
465 	if (sc->reset_in_progress) {
466 		mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
467 		return;
468 	}
469 	sc->reset.type = reset_type;
470 	sc->reset.reason = reset_reason;
471 
472 	return;
473 }
474 
475 /**
476  * mpi3mr_issue_tm - Issue Task Management request
477  * @sc: Adapter instance reference
478  * @tm_type: Task Management type
479  * @handle: Device handle
480  * @lun: lun ID
481  * @htag: Host tag of the TM request
482  * @timeout: TM timeout value
483  * @drv_cmd: Internal command tracker
484  * @resp_code: Response code place holder
485  * @cmd: Timed out command reference
486  *
487  * Issues a Task Management Request to the controller for a
488  * specified target, lun and command and wait for its completion
489  * and check TM response. Recover the TM if it timed out by
490  * issuing controller reset.
491  *
492  * Return: 0 on success, non-zero on errors
493  */
494 static int
495 mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd,
496 		U8 tm_type, unsigned long timeout)
497 {
498 	int retval = 0;
499 	MPI3_SCSI_TASK_MGMT_REQUEST tm_req;
500 	MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL;
501 	struct mpi3mr_drvr_cmd *drv_cmd = NULL;
502 	struct mpi3mr_target *tgtdev = NULL;
503 	struct mpi3mr_op_req_queue *op_req_q = NULL;
504 	union ccb *ccb;
505 	U8 resp_code;
506 
507 
508 	if (sc->unrecoverable) {
509 		mpi3mr_dprint(sc, MPI3MR_INFO,
510 			"Controller is in unrecoverable state!! TM not required\n");
511 		return retval;
512 	}
513 	if (sc->reset_in_progress) {
514 		mpi3mr_dprint(sc, MPI3MR_INFO,
515 			"controller reset in progress!! TM not required\n");
516 		return retval;
517 	}
518 
519 	if (!cmd->ccb) {
520 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
521 		return retval;
522 	}
523 	ccb = cmd->ccb;
524 
525 	tgtdev = cmd->targ;
526 	if (tgtdev == NULL)  {
527 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x,"
528 			      "TM is not required\n", ccb->ccb_h.target_id);
529 		return retval;
530 	}
531 	if (tgtdev->dev_removed == 1)  {
532 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n",
533 			      ccb->ccb_h.target_id);
534 		return retval;
535 	}
536 
537 	drv_cmd = &sc->host_tm_cmds;
538 	mtx_lock(&drv_cmd->lock);
539 
540 	memset(&tm_req, 0, sizeof(tm_req));
541 	tm_req.DevHandle = htole16(tgtdev->dev_handle);
542 	tm_req.TaskType = tm_type;
543 	tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS);
544 	int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN);
545 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
546 	drv_cmd->state = MPI3MR_CMD_PENDING;
547 	drv_cmd->is_waiting = 1;
548 	drv_cmd->callback = NULL;
549 
550 	if (ccb) {
551 		if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
552 			op_req_q = &sc->op_req_q[cmd->req_qidx];
553 			tm_req.TaskHostTag = htole16(cmd->hosttag);
554 			tm_req.TaskRequestQueueID = htole16(op_req_q->qid);
555 		}
556 	}
557 
558 	if (tgtdev)
559 		mpi3mr_atomic_inc(&tgtdev->block_io);
560 
561 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
562 		if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
563 		     && tgtdev->dev_spec.pcie_inf.abort_to)
564  			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
565 		else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET)
566 			 && tgtdev->dev_spec.pcie_inf.reset_to)
567 			 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
568 	}
569 
570 	sc->tm_chan = (void *)&drv_cmd;
571 
572 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
573 		      "posting task management request: type(%d), handle(0x%04x)\n",
574 		       tm_type, tgtdev->dev_handle);
575 
576 	init_completion(&drv_cmd->completion);
577 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
578 	if (retval) {
579 		mpi3mr_dprint(sc, MPI3MR_ERROR,
580 			      "posting task management request is failed\n");
581 		retval = -1;
582 		goto out_unlock;
583 	}
584 	wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc);
585 
586 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
587 		drv_cmd->is_waiting = 0;
588 		retval = -1;
589 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
590 			mpi3mr_dprint(sc, MPI3MR_ERROR,
591 				      "task management request timed out after %ld seconds\n", timeout);
592 			if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) {
593 				mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n");
594 				mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8);
595 			}
596 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT);
597 			retval = ETIMEDOUT;
598 		}
599 		goto out_unlock;
600 	}
601 
602 	if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) {
603 		mpi3mr_dprint(sc, MPI3MR_ERROR,
604 			      "invalid task management reply message\n");
605 		retval = -1;
606 		goto out_unlock;
607 	}
608 	tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply;
609 
610 	switch (drv_cmd->ioc_status) {
611 	case MPI3_IOCSTATUS_SUCCESS:
612 		resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE;
613 		break;
614 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
615 		resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
616 		break;
617 	default:
618 		mpi3mr_dprint(sc, MPI3MR_ERROR,
619 			      "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
620 			       tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
621 		retval = -1;
622 		goto out_unlock;
623 	}
624 
625 	switch (resp_code) {
626 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
627 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
628 		break;
629 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
630 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
631 			retval = -1;
632 		break;
633 	default:
634 		retval = -1;
635 		break;
636 	}
637 
638 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
639 		      "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)"
640 		      "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
641 		      tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code);
642 
643 	if (retval)
644 		goto out_unlock;
645 
646 	mpi3mr_disable_interrupts(sc);
647 	mpi3mr_poll_pend_io_completions(sc);
648 	mpi3mr_enable_interrupts(sc);
649 	mpi3mr_poll_pend_io_completions(sc);
650 
651 	switch (tm_type) {
652 	case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
653 		if (cmd->state == MPI3MR_CMD_STATE_IN_TM) {
654 			mpi3mr_dprint(sc, MPI3MR_ERROR,
655 				      "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated"
656 				      "marking task abort failed!\n", sc->name, cmd->ccb);
657 			retval = -1;
658 		}
659 		break;
660 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
661 		if (mpi3mr_atomic_read(&tgtdev->outstanding)) {
662 			mpi3mr_dprint(sc, MPI3MR_ERROR,
663 				      "%s: target reset returned success from firmware but IOs are still pending on the target (%p)"
664 				      "marking target reset failed!\n",
665 				      sc->name, tgtdev);
666 			retval = -1;
667 		}
668 		break;
669 	default:
670 		break;
671 	}
672 
673 out_unlock:
674 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
675 	mtx_unlock(&drv_cmd->lock);
676 	if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0)
677 		mpi3mr_atomic_dec(&tgtdev->block_io);
678 
679 	return retval;
680 }
681 
682 /**
683  * mpi3mr_task_abort- Abort error handling callback
684  * @cmd: Timed out command reference
685  *
686  * Issue Abort Task Management if the command is in LLD scope
687  * and verify if it is aborted successfully and return status
688  * accordingly.
689  *
690  * Return: SUCCESS of successful abort the SCSI command else FAILED
691  */
692 static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd)
693 {
694 	int retval = 0;
695 	struct mpi3mr_softc *sc;
696 	union ccb *ccb;
697 
698 	sc = cmd->sc;
699 
700 	if (!cmd->ccb) {
701 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
702 		return retval;
703 	}
704 	ccb = cmd->ccb;
705 
706 	mpi3mr_dprint(sc, MPI3MR_INFO,
707 		      "attempting abort task for ccb(%p)\n", ccb);
708 
709 	mpi3mr_print_cdb(ccb);
710 
711 	if (cmd->state != MPI3MR_CMD_STATE_BUSY) {
712 		mpi3mr_dprint(sc, MPI3MR_INFO,
713 			      "%s: ccb is not in driver scope, abort task is not required\n",
714 			      sc->name);
715 		return retval;
716 	}
717 	cmd->state = MPI3MR_CMD_STATE_IN_TM;
718 
719 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT);
720 
721 	mpi3mr_dprint(sc, MPI3MR_INFO,
722 		      "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb);
723 
724 	return retval;
725 }
726 
727 /**
728  * mpi3mr_target_reset - Target reset error handling callback
729  * @cmd: Timed out command reference
730  *
731  * Issue Target reset Task Management and verify the SCSI commands are
732  * terminated successfully and return status accordingly.
733  *
734  * Return: SUCCESS of successful termination of the SCSI commands else
735  *         FAILED
736  */
737 static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd)
738 {
739 	int retval = 0;
740 	struct mpi3mr_softc *sc;
741 	struct mpi3mr_target *target;
742 
743 	sc = cmd->sc;
744 
745 	target = cmd->targ;
746 	if (target == NULL)  {
747 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p,"
748 			      "target reset is not required\n", target);
749 		return retval;
750 	}
751 
752 	mpi3mr_dprint(sc, MPI3MR_INFO,
753 		      "attempting target reset on target(%d)\n", target->per_id);
754 
755 
756 	if (mpi3mr_atomic_read(&target->outstanding)) {
757 		mpi3mr_dprint(sc, MPI3MR_INFO,
758 			      "no outstanding IOs on the target(%d),"
759 			      " target reset not required.\n", target->per_id);
760 		return retval;
761 	}
762 
763 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT);
764 
765 	mpi3mr_dprint(sc, MPI3MR_INFO,
766 		      "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"),
767 		      target->per_id);
768 
769 	return retval;
770 }
771 
772 /**
773  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
774  * @sc: Adapter instance reference
775  *
776  * Calculate the pending I/Os for the controller and return.
777  *
778  * Return: Number of pending I/Os
779  */
780 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc)
781 {
782 	U16 i, pend_ios = 0;
783 
784 	for (i = 0; i < sc->num_queues; i++)
785 		pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
786 	return pend_ios;
787 }
788 
789 /**
790  * mpi3mr_wait_for_host_io - block for I/Os to complete
791  * @sc: Adapter instance reference
792  * @timeout: time out in seconds
793  *
794  * Waits for pending I/Os for the given adapter to complete or
795  * to hit the timeout.
796  *
797  * Return: Nothing
798  */
799 static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout)
800 {
801 	enum mpi3mr_iocstate iocstate;
802 
803 	iocstate = mpi3mr_get_iocstate(sc);
804 	if (iocstate != MRIOC_STATE_READY) {
805 		mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__);
806 		return -1;
807 	}
808 
809 	if (!mpi3mr_get_fw_pending_ios(sc))
810 		return 0;
811 
812 	mpi3mr_dprint(sc, MPI3MR_INFO,
813 		      "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n",
814 		      __func__, timeout, mpi3mr_get_fw_pending_ios(sc));
815 
816 	int i;
817 	for (i = 0; i < timeout; i++) {
818 		if (!mpi3mr_get_fw_pending_ios(sc)) {
819 			mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__);
820 			return 0;
821 
822 		}
823 		iocstate = mpi3mr_get_iocstate(sc);
824 		if (iocstate != MRIOC_STATE_READY) {
825 			mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further"
826 				      "Proceed with Reset\n", __func__);
827 			return -1;
828 		}
829 		DELAY(1000 * 1000);
830 	}
831 
832 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__,
833 		      mpi3mr_get_fw_pending_ios(sc));
834 
835 	return -1;
836 }
837 
838 static void
839 mpi3mr_scsiio_timeout(void *data)
840 {
841 	int retval = 0;
842 	struct mpi3mr_softc *sc;
843 	struct mpi3mr_cmd *cmd;
844 	struct mpi3mr_target *targ_dev = NULL;
845 
846 	if (!data)
847 		return;
848 
849 	cmd = (struct mpi3mr_cmd *)data;
850 	sc = cmd->sc;
851 
852 	if (cmd->ccb == NULL) {
853 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
854 		return;
855 	}
856 
857 	/*
858 	 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset
859 	 * with max timeout for outstanding IOs to complete is 180sec.
860 	 */
861 	targ_dev = cmd->targ;
862 	if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) {
863 		if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT))
864 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
865 		return;
866  	}
867 
868 	/* Issue task abort to recover the timed out IO */
869 	retval = mpi3mr_task_abort(cmd);
870 	if (!retval || (retval == ETIMEDOUT))
871 		return;
872 
873 	/*
874 	 * task abort has failed to recover the timed out IO,
875 	 * try with the target reset
876 	 */
877 	retval = mpi3mr_target_reset(cmd);
878 	if (!retval || (retval == ETIMEDOUT))
879 		return;
880 
881 	/*
882 	 * task abort and target reset has failed. So issue Controller reset(soft reset)
883 	 * through OCR thread context
884 	 */
885 	trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
886 
887 	return;
888 }
889 
890 void int_to_lun(unsigned int lun, U8 *req_lun)
891 {
892 	int i;
893 
894 	memset(req_lun, 0, sizeof(*req_lun));
895 
896 	for (i = 0; i < sizeof(lun); i += 2) {
897 		req_lun[i] = (lun >> 8) & 0xFF;
898 		req_lun[i+1] = lun & 0xFF;
899 		lun = lun >> 16;
900 	}
901 
902 }
903 
904 static U16 get_req_queue_index(struct mpi3mr_softc *sc)
905 {
906 	U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0;
907 
908 	reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios);
909 	for (i = 0; i < sc->num_queues; i++) {
910 		if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) {
911 			reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
912 			reply_q_index = i;
913 		}
914 	}
915 
916 	return reply_q_index;
917 }
918 
919 static void
920 mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
921 {
922 	Mpi3SCSIIORequest_t *req = NULL;
923 	struct ccb_scsiio *csio;
924 	struct mpi3mr_softc *sc;
925 	struct mpi3mr_target *targ;
926 	struct mpi3mr_cmd *cm;
927 	uint8_t scsi_opcode, queue_idx;
928 	uint32_t mpi_control;
929 
930 	sc = cam_sc->sc;
931 	mtx_assert(&sc->mpi3mr_mtx, MA_OWNED);
932 
933 	if (sc->unrecoverable) {
934 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
935 		xpt_done(ccb);
936 		return;
937 	}
938 
939 	csio = &ccb->csio;
940 	KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets,
941 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
942 	     csio->ccb_h.target_id));
943 
944 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
945 
946 	if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) &&
947 	    !((scsi_opcode == SYNCHRONIZE_CACHE) ||
948 	      (scsi_opcode == START_STOP_UNIT))) {
949 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
950 		xpt_done(ccb);
951 		return;
952 	}
953 
954 	targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id);
955 	if (targ == NULL)  {
956 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n",
957 			      csio->ccb_h.target_id);
958 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
959 		xpt_done(ccb);
960 		return;
961 	}
962 
963 	if (targ && targ->is_hidden)  {
964 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n",
965 			      csio->ccb_h.target_id);
966 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
967 		xpt_done(ccb);
968 		return;
969 	}
970 
971 	if (targ->dev_removed == 1)  {
972 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id);
973 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
974 		xpt_done(ccb);
975 		return;
976 	}
977 
978 	if (targ->dev_handle == 0x0) {
979 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n",
980 		    __func__, csio->ccb_h.target_id);
981 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
982 		xpt_done(ccb);
983 		return;
984 	}
985 
986 	if (mpi3mr_atomic_read(&targ->block_io) ||
987 		(sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) {
988 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n",
989 		    __func__, csio->ccb_h.target_id);
990 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
991 		xpt_done(ccb);
992 		return;
993 	}
994 
995 	/*
996 	 * Sometimes, it is possible to get a command that is not "In
997 	 * Progress" and was actually aborted by the upper layer.  Check for
998 	 * this here and complete the command without error.
999 	 */
1000 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1001 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for "
1002 		    "target %u\n", __func__, csio->ccb_h.target_id);
1003 		xpt_done(ccb);
1004 		return;
1005 	}
1006 	/*
1007 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1008 	 * that the volume has timed out.  We want volumes to be enumerated
1009 	 * until they are deleted/removed, not just failed.
1010 	 */
1011 	if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) {
1012 		if (targ->devinfo == 0)
1013 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1014 		else
1015 			mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1016 		xpt_done(ccb);
1017 		return;
1018 	}
1019 
1020 	if ((scsi_opcode == UNMAP) &&
1021 		(pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) &&
1022 		(targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1023 		(mpi3mr_allow_unmap_to_fw(sc, ccb) == false))
1024 		return;
1025 
1026 	cm = mpi3mr_get_command(sc);
1027 	if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) {
1028 		if (cm != NULL) {
1029 			mpi3mr_release_command(cm);
1030 		}
1031 		if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) {
1032 			xpt_freeze_simq(cam_sc->sim, 1);
1033 			cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN;
1034 		}
1035 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1036 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
1037 		xpt_done(ccb);
1038 		return;
1039 	}
1040 
1041 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1042 	case CAM_DIR_IN:
1043 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
1044 		cm->data_dir = MPI3MR_READ;
1045 		break;
1046 	case CAM_DIR_OUT:
1047 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
1048 		cm->data_dir = MPI3MR_WRITE;
1049 		break;
1050 	case CAM_DIR_NONE:
1051 	default:
1052 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
1053 		break;
1054 	}
1055 
1056 	if (csio->cdb_len > 16)
1057 		mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
1058 
1059 	req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1060 	bzero(req, sizeof(*req));
1061 	req->Function = MPI3_FUNCTION_SCSI_IO;
1062 	req->HostTag = cm->hosttag;
1063 	req->DataLength = htole32(csio->dxfer_len);
1064 	req->DevHandle = htole16(targ->dev_handle);
1065 
1066 	/*
1067 	 * It looks like the hardware doesn't require an explicit tag
1068 	 * number for each transaction.  SAM Task Management not supported
1069 	 * at the moment.
1070 	 */
1071 	switch (csio->tag_action) {
1072 	case MSG_HEAD_OF_Q_TAG:
1073 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ;
1074 		break;
1075 	case MSG_ORDERED_Q_TAG:
1076 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ;
1077 		break;
1078 	case MSG_ACA_TASK:
1079 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ;
1080 		break;
1081 	case CAM_TAG_ACTION_NONE:
1082 	case MSG_SIMPLE_Q_TAG:
1083 	default:
1084 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
1085 		break;
1086 	}
1087 
1088 	req->Flags = htole32(mpi_control);
1089 
1090 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1091 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1092 	else {
1093 		KASSERT(csio->cdb_len <= IOCDBLEN,
1094 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
1095 		    "is not set", csio->cdb_len));
1096 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1097 	}
1098 
1099 	cm->length = csio->dxfer_len;
1100 	cm->targ = targ;
1101 	int_to_lun(csio->ccb_h.target_lun, req->LUN);
1102 	cm->ccb = ccb;
1103 	csio->ccb_h.qos.sim_data = sbinuptime();
1104 	queue_idx = get_req_queue_index(sc);
1105 	cm->req_qidx = queue_idx;
1106 
1107 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n",
1108 		(queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag);
1109 
1110 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
1111 	case CAM_DATA_PADDR:
1112 	case CAM_DATA_SG_PADDR:
1113 		device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n",
1114 		    __func__);
1115 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1116 		mpi3mr_release_command(cm);
1117 		xpt_done(ccb);
1118 		return;
1119 	case CAM_DATA_SG:
1120 		device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n",
1121 		    __func__);
1122 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1123 		mpi3mr_release_command(cm);
1124 		xpt_done(ccb);
1125 		return;
1126 	case CAM_DATA_VADDR:
1127 	case CAM_DATA_BIO:
1128 		if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) {
1129 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
1130 			mpi3mr_release_command(cm);
1131 			xpt_done(ccb);
1132 			return;
1133 		}
1134 		ccb->ccb_h.status |= CAM_SIM_QUEUED;
1135 		cm->length = csio->dxfer_len;
1136 		if (cm->length)
1137 			cm->data = csio->data_ptr;
1138 		break;
1139 	default:
1140 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_INVALID);
1141 		mpi3mr_release_command(cm);
1142 		xpt_done(ccb);
1143 		return;
1144 	}
1145 
1146 	/* Prepare SGEs and queue to hardware */
1147 	mpi3mr_map_request(sc, cm);
1148 }
1149 
1150 static void
1151 mpi3mr_enqueue_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
1152 {
1153 	static int ratelimit;
1154 	struct mpi3mr_op_req_queue *opreqq = &sc->op_req_q[cm->req_qidx];
1155 	struct mpi3mr_throttle_group_info *tg = NULL;
1156 	uint32_t data_len_blks = 0;
1157 	uint32_t tracked_io_sz = 0;
1158 	uint32_t ioc_pend_data_len = 0, tg_pend_data_len = 0;
1159 	struct mpi3mr_target *targ = cm->targ;
1160 	union ccb *ccb = cm->ccb;
1161 	Mpi3SCSIIORequest_t *req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1162 
1163 	if (sc->iot_enable) {
1164 		data_len_blks = ccb->csio.dxfer_len >> 9;
1165 
1166 		if ((data_len_blks >= sc->io_throttle_data_length) &&
1167 		    targ->io_throttle_enabled) {
1168 
1169 			tracked_io_sz = data_len_blks;
1170 			tg = targ->throttle_group;
1171 			if (tg) {
1172 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1173 				mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks);
1174 
1175 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1176 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
1177 
1178 				if (ratelimit % 1000) {
1179 					mpi3mr_dprint(sc, MPI3MR_IOT,
1180 						"large vd_io persist_id(%d), handle(0x%04x), data_len(%d),"
1181 						"ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n",
1182 						targ->per_id, targ->dev_handle,
1183 						data_len_blks, ioc_pend_data_len,
1184 						tg_pend_data_len, sc->io_throttle_high,
1185 						tg->high);
1186 					ratelimit++;
1187 				}
1188 
1189 				if (!tg->io_divert  && ((ioc_pend_data_len >=
1190 				    sc->io_throttle_high) ||
1191 				    (tg_pend_data_len >= tg->high))) {
1192 					tg->io_divert = 1;
1193 					mpi3mr_dprint(sc, MPI3MR_IOT,
1194 						"VD: Setting divert flag for tg_id(%d), persist_id(%d)\n",
1195 						tg->id, targ->per_id);
1196 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1197 						mpi3mr_print_cdb(ccb);
1198 					mpi3mr_set_io_divert_for_all_vd_in_tg(sc,
1199 					    tg, 1);
1200 				}
1201 			} else {
1202 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1203 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1204 				if (ratelimit % 1000) {
1205 					mpi3mr_dprint(sc, MPI3MR_IOT,
1206 					    "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n",
1207 					    targ->per_id, targ->dev_handle,
1208 					    data_len_blks, ioc_pend_data_len,
1209 					    sc->io_throttle_high);
1210 					ratelimit++;
1211 				}
1212 
1213 				if (ioc_pend_data_len >= sc->io_throttle_high) {
1214 					targ->io_divert = 1;
1215 					mpi3mr_dprint(sc, MPI3MR_IOT,
1216 						"PD: Setting divert flag for persist_id(%d)\n",
1217 						targ->per_id);
1218 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1219 						mpi3mr_print_cdb(ccb);
1220 				}
1221 			}
1222 		}
1223 
1224 		if (targ->io_divert) {
1225 			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
1226 			req->Flags = htole32(le32toh(req->Flags) | MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING);
1227 		}
1228 	}
1229 
1230 	if (mpi3mr_submit_io(sc, opreqq, (U8 *)&cm->io_request)) {
1231 		if (tracked_io_sz) {
1232 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz);
1233 			if (tg)
1234 				mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz);
1235 		}
1236 		mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
1237 		mpi3mr_release_command(cm);
1238 		xpt_done(ccb);
1239 	} else {
1240 		callout_reset_sbt(&cm->callout, mstosbt(ccb->ccb_h.timeout), 0,
1241 		    mpi3mr_scsiio_timeout, cm, 0);
1242 		cm->callout_owner = true;
1243 		mpi3mr_atomic_inc(&sc->fw_outstanding);
1244 		mpi3mr_atomic_inc(&targ->outstanding);
1245 		if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
1246 			sc->io_cmds_highwater++;
1247 	}
1248 
1249 	return;
1250 }
1251 
1252 static void
1253 mpi3mr_cam_poll(struct cam_sim *sim)
1254 {
1255 	struct mpi3mr_cam_softc *cam_sc;
1256 	struct mpi3mr_irq_context *irq_ctx;
1257 	struct mpi3mr_softc *sc;
1258 	int i;
1259 
1260 	cam_sc = cam_sim_softc(sim);
1261 	sc = cam_sc->sc;
1262 
1263 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n",
1264 		__func__, __LINE__);
1265 
1266 	for (i = 0; i < sc->num_queues; i++) {
1267 		irq_ctx = sc->irq_ctx + i;
1268 		if (irq_ctx->op_reply_q->qid) {
1269 			mpi3mr_complete_io_cmd(sc, irq_ctx);
1270 		}
1271 	}
1272 }
1273 
1274 static void
1275 mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
1276 {
1277 	struct mpi3mr_cam_softc *cam_sc;
1278 	struct mpi3mr_target *targ;
1279 
1280 	cam_sc = cam_sim_softc(sim);
1281 
1282 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
1283 	    ccb->ccb_h.func_code, ccb->ccb_h.target_id);
1284 
1285 	mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED);
1286 
1287 	switch (ccb->ccb_h.func_code) {
1288 	case XPT_PATH_INQ:
1289 	{
1290 		struct ccb_pathinq *cpi = &ccb->cpi;
1291 
1292 		cpi->version_num = 1;
1293 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1294 		cpi->target_sprt = 0;
1295 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1296 		cpi->hba_eng_cnt = 0;
1297 		cpi->max_target = cam_sc->maxtargets - 1;
1298 		cpi->max_lun = 0;
1299 
1300 		/*
1301 		 * initiator_id is set here to an ID outside the set of valid
1302 		 * target IDs (including volumes).
1303 		 */
1304 		cpi->initiator_id = cam_sc->maxtargets;
1305 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1306 		strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN);
1307 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1308 		cpi->unit_number = cam_sim_unit(sim);
1309 		cpi->bus_id = cam_sim_bus(sim);
1310 		/*
1311 		 * XXXSLM-I think this needs to change based on config page or
1312 		 * something instead of hardcoded to 150000.
1313 		 */
1314 		cpi->base_transfer_speed = 150000;
1315 		cpi->transport = XPORT_SAS;
1316 		cpi->transport_version = 0;
1317 		cpi->protocol = PROTO_SCSI;
1318 		cpi->protocol_version = SCSI_REV_SPC;
1319 
1320 		targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id);
1321 
1322 		if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1323 		    ((targ->dev_spec.pcie_inf.dev_info &
1324 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1325 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) {
1326 			cpi->maxio = targ->dev_spec.pcie_inf.mdts;
1327 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1328 				"PCI device target_id: %u max io size: %u\n",
1329 				ccb->ccb_h.target_id, cpi->maxio);
1330 		} else {
1331 			cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1);
1332 		}
1333 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1334 		break;
1335 	}
1336 	case XPT_GET_TRAN_SETTINGS:
1337 	{
1338 		struct ccb_trans_settings	*cts;
1339 		struct ccb_trans_settings_sas	*sas;
1340 		struct ccb_trans_settings_scsi	*scsi;
1341 
1342 		cts = &ccb->cts;
1343 		sas = &cts->xport_specific.sas;
1344 		scsi = &cts->proto_specific.scsi;
1345 
1346 		KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets,
1347 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1348 		    cts->ccb_h.target_id));
1349 		targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id);
1350 
1351 		if (targ == NULL) {
1352 			mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n",
1353 			cts->ccb_h.target_id);
1354 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1355 			break;
1356 		}
1357 
1358 		if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1))  {
1359 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1360 			break;
1361 		}
1362 
1363 		cts->protocol_version = SCSI_REV_SPC2;
1364 		cts->transport = XPORT_SAS;
1365 		cts->transport_version = 0;
1366 
1367 		sas->valid = CTS_SAS_VALID_SPEED;
1368 
1369 		switch (targ->link_rate) {
1370 		case 0x08:
1371 			sas->bitrate = 150000;
1372 			break;
1373 		case 0x09:
1374 			sas->bitrate = 300000;
1375 			break;
1376 		case 0x0a:
1377 			sas->bitrate = 600000;
1378 			break;
1379 		case 0x0b:
1380 			sas->bitrate = 1200000;
1381 			break;
1382 		default:
1383 			sas->valid = 0;
1384 		}
1385 
1386 		cts->protocol = PROTO_SCSI;
1387 		scsi->valid = CTS_SCSI_VALID_TQ;
1388 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1389 
1390 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1391 		break;
1392 	}
1393 	case XPT_CALC_GEOMETRY:
1394 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1395 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1396 		break;
1397 	case XPT_RESET_DEV:
1398 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action "
1399 		    "XPT_RESET_DEV\n");
1400 		return;
1401 	case XPT_RESET_BUS:
1402 	case XPT_ABORT:
1403 	case XPT_TERM_IO:
1404 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success "
1405 		    "for abort or reset\n");
1406 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1407 		break;
1408 	case XPT_SCSI_IO:
1409 		mpi3mr_action_scsiio(cam_sc, ccb);
1410 		return;
1411 	default:
1412 		mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1413 		break;
1414 	}
1415 	xpt_done(ccb);
1416 }
1417 
1418 void
1419 mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc)
1420 {
1421 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1422 		if (cam_sc->startup_refcount++ == 0) {
1423 			/* just starting, freeze the simq */
1424 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1425 			    "%s freezing simq\n", __func__);
1426 			xpt_hold_boot();
1427 		}
1428 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1429 		    cam_sc->startup_refcount);
1430 	}
1431 }
1432 
1433 void
1434 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc)
1435 {
1436 	if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) {
1437 		cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN;
1438 		xpt_release_simq(cam_sc->sim, 1);
1439 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n");
1440 	}
1441 }
1442 
1443 void
1444 mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ)
1445 {
1446 	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1447 	path_id_t pathid;
1448 	target_id_t targetid;
1449 	union ccb *ccb;
1450 
1451 	pathid = cam_sim_path(cam_sc->sim);
1452 	if (targ == NULL)
1453 		targetid = CAM_TARGET_WILDCARD;
1454 	else
1455 		targetid = targ->per_id;
1456 
1457 	/*
1458 	 * Allocate a CCB and schedule a rescan.
1459 	 */
1460 	ccb = xpt_alloc_ccb_nowait();
1461 	if (ccb == NULL) {
1462 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n");
1463 		return;
1464 	}
1465 
1466 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
1467 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1468 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n");
1469 		xpt_free_ccb(ccb);
1470 		return;
1471 	}
1472 
1473 	if (targetid == CAM_TARGET_WILDCARD)
1474 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1475 	else
1476 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
1477 
1478 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid);
1479 	xpt_rescan(ccb);
1480 }
1481 
1482 void
1483 mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc)
1484 {
1485 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1486 		if (--cam_sc->startup_refcount == 0) {
1487 			/* finished all discovery-related actions, release
1488 			 * the simq and rescan for the latest topology.
1489 			 */
1490 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1491 			    "%s releasing simq\n", __func__);
1492 			cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP;
1493 			xpt_release_simq(cam_sc->sim, 1);
1494 			xpt_release_boot();
1495 		}
1496 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1497 		    cam_sc->startup_refcount);
1498 	}
1499 }
1500 
1501 static void
1502 mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1503 {
1504 	if (!fw_event)
1505 		return;
1506 
1507 	if (fw_event->event_data != NULL) {
1508 		free(fw_event->event_data, M_MPI3MR);
1509 		fw_event->event_data = NULL;
1510 	}
1511 
1512 	free(fw_event, M_MPI3MR);
1513 	fw_event = NULL;
1514 }
1515 
1516 static void
1517 mpi3mr_freeup_events(struct mpi3mr_softc *sc)
1518 {
1519 	struct mpi3mr_fw_event_work *fw_event = NULL;
1520 	mtx_lock(&sc->mpi3mr_mtx);
1521 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
1522 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
1523 		mpi3mr_fw_event_free(sc, fw_event);
1524 	}
1525 	mtx_unlock(&sc->mpi3mr_mtx);
1526 }
1527 
1528 static void
1529 mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
1530 	Mpi3EventDataSasTopologyChangeList_t *event_data)
1531 {
1532 	int i;
1533 	U16 handle;
1534 	U8 reason_code, phy_number;
1535 	char *status_str = NULL;
1536 	U8 link_rate, prev_link_rate;
1537 
1538 	switch (event_data->ExpStatus) {
1539 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1540 		status_str = "remove";
1541 		break;
1542 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1543 		status_str =  "responding";
1544 		break;
1545 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1546 		status_str = "remove delay";
1547 		break;
1548 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1549 		status_str = "direct attached";
1550 		break;
1551 	default:
1552 		status_str = "unknown status";
1553 		break;
1554 	}
1555 
1556 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n",
1557 	    __func__, status_str);
1558 	mpi3mr_dprint(sc, MPI3MR_INFO,
1559 		"%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) "
1560 	    "start_phy(%02d), num_entries(%d)\n", __func__,
1561 	    (event_data->ExpanderDevHandle),
1562 	    (event_data->EnclosureHandle),
1563 	    event_data->StartPhyNum, event_data->NumEntries);
1564 	for (i = 0; i < event_data->NumEntries; i++) {
1565 		handle = (event_data->PhyEntry[i].AttachedDevHandle);
1566 		if (!handle)
1567 			continue;
1568 		phy_number = event_data->StartPhyNum + i;
1569 		reason_code = event_data->PhyEntry[i].Status &
1570 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1571 		switch (reason_code) {
1572 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1573 			status_str = "target remove";
1574 			break;
1575 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1576 			status_str = "delay target remove";
1577 			break;
1578 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1579 			status_str = "link rate change";
1580 			break;
1581 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1582 			status_str = "target responding";
1583 			break;
1584 		default:
1585 			status_str = "unknown";
1586 			break;
1587 		}
1588 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1589 		prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF;
1590 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:"
1591 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1592 		    phy_number, handle, status_str, link_rate, prev_link_rate);
1593 	}
1594 }
1595 
1596 static void
1597 mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt)
1598 {
1599 
1600 	Mpi3EventDataSasTopologyChangeList_t *event_data =
1601 		    (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data;
1602 	int i;
1603 	U16 handle;
1604 	U8 reason_code, link_rate;
1605 	struct mpi3mr_target *target = NULL;
1606 
1607 
1608 	mpi3mr_sastopochg_evt_debug(sc, event_data);
1609 
1610 	for (i = 0; i < event_data->NumEntries; i++) {
1611 		handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle);
1612 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1613 
1614 		if (!handle)
1615 			continue;
1616 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1617 
1618 		if (!target)
1619 			continue;
1620 
1621 		target->link_rate = link_rate;
1622 		reason_code = event_data->PhyEntry[i].Status &
1623 			MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1624 
1625 		switch (reason_code) {
1626 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1627 			if (target->exposed_to_os)
1628 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1629 			mpi3mr_remove_device_from_list(sc, target, false);
1630 			break;
1631 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1632 			break;
1633 		default:
1634 			break;
1635 		}
1636 	}
1637 
1638 	/*
1639 	 * refcount was incremented for this event in
1640 	 * mpi3mr_evt_handler. Decrement it here because the event has
1641 	 * been processed.
1642 	 */
1643 	mpi3mr_startup_decrement(sc->cam_sc);
1644 	return;
1645 }
1646 
1647 static inline void
1648 mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc,
1649 		      struct mpi3mr_fw_event_work *fwevt)
1650 {
1651 	mpi3mr_app_save_logdata(sc, fwevt->event_data,
1652 				fwevt->event_data_size);
1653 }
1654 
1655 static void
1656 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
1657 	Mpi3EventDataPcieTopologyChangeList_t *event_data)
1658 {
1659 	int i;
1660 	U16 handle;
1661 	U16 reason_code;
1662 	U8 port_number;
1663 	char *status_str = NULL;
1664 	U8 link_rate, prev_link_rate;
1665 
1666 	switch (event_data->SwitchStatus) {
1667 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1668 		status_str = "remove";
1669 		break;
1670 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1671 		status_str =  "responding";
1672 		break;
1673 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1674 		status_str = "remove delay";
1675 		break;
1676 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1677 		status_str = "direct attached";
1678 		break;
1679 	default:
1680 		status_str = "unknown status";
1681 		break;
1682 	}
1683 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n",
1684 		__func__, status_str);
1685 	mpi3mr_dprint(sc, MPI3MR_INFO,
1686 		"%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
1687 		"start_port(%02d), num_entries(%d)\n", __func__,
1688 		le16toh(event_data->SwitchDevHandle),
1689 		le16toh(event_data->EnclosureHandle),
1690 		event_data->StartPortNum, event_data->NumEntries);
1691 	for (i = 0; i < event_data->NumEntries; i++) {
1692 		handle =
1693 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1694 		if (!handle)
1695 			continue;
1696 		port_number = event_data->StartPortNum + i;
1697 		reason_code = event_data->PortEntry[i].PortStatus;
1698 		switch (reason_code) {
1699 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1700 			status_str = "target remove";
1701 			break;
1702 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1703 			status_str = "delay target remove";
1704 			break;
1705 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1706 			status_str = "link rate change";
1707 			break;
1708 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1709 			status_str = "target responding";
1710 			break;
1711 		default:
1712 			status_str = "unknown";
1713 			break;
1714 		}
1715 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1716 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1717 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
1718 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1719 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:"
1720 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1721 		    port_number, handle, status_str, link_rate, prev_link_rate);
1722 	}
1723 }
1724 
1725 static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc,
1726     struct mpi3mr_fw_event_work *fwevt)
1727 {
1728 	Mpi3EventDataPcieTopologyChangeList_t *event_data =
1729 		    (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data;
1730 	int i;
1731 	U16 handle;
1732 	U8 reason_code, link_rate;
1733 	struct mpi3mr_target *target = NULL;
1734 
1735 
1736 	mpi3mr_pcietopochg_evt_debug(sc, event_data);
1737 
1738 	for (i = 0; i < event_data->NumEntries; i++) {
1739 		handle =
1740 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1741 		if (!handle)
1742 			continue;
1743 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1744 		if (!target)
1745 			continue;
1746 
1747 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1748 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1749 		target->link_rate = link_rate;
1750 
1751 		reason_code = event_data->PortEntry[i].PortStatus;
1752 
1753 		switch (reason_code) {
1754 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1755 			if (target->exposed_to_os)
1756 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1757 			mpi3mr_remove_device_from_list(sc, target, false);
1758 			break;
1759 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1760 			break;
1761 		default:
1762 			break;
1763 		}
1764 	}
1765 
1766 	/*
1767 	 * refcount was incremented for this event in
1768 	 * mpi3mr_evt_handler. Decrement it here because the event has
1769 	 * been processed.
1770 	 */
1771 	mpi3mr_startup_decrement(sc->cam_sc);
1772 	return;
1773 }
1774 
1775 void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id)
1776 {
1777 	struct mpi3mr_target *target;
1778 
1779 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1780 		"Adding device(persistent id: 0x%x)\n", per_id);
1781 
1782 	mpi3mr_startup_increment(sc->cam_sc);
1783 	target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id);
1784 
1785 	if (!target) {
1786 		mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's"
1787 		    "internal target list, persistent_id: %d\n",
1788 		    per_id);
1789 		goto out;
1790 	}
1791 
1792 	if (target->is_hidden) {
1793 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n",
1794 			per_id);
1795 		goto out;
1796 	}
1797 
1798 	if (!target->exposed_to_os && !sc->reset_in_progress) {
1799 		mpi3mr_rescan_target(sc, target);
1800 		mpi3mr_dprint(sc, MPI3MR_INFO,
1801 			"Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle);
1802 		target->exposed_to_os = 1;
1803 	}
1804 
1805 out:
1806 	mpi3mr_startup_decrement(sc->cam_sc);
1807 }
1808 
1809 int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
1810 {
1811 	U32 i = 0;
1812 	int retval = 0;
1813 	struct mpi3mr_target *target;
1814 
1815 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1816 		"Removing Device (dev_handle: %d)\n", handle);
1817 
1818 	target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1819 
1820 	if (!target) {
1821 		mpi3mr_dprint(sc, MPI3MR_INFO,
1822 			"Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n",
1823 			target->per_id, handle);
1824 		mpi3mr_rescan_target(sc, NULL);
1825 		retval = -1;
1826 		goto out;
1827 	}
1828 
1829 	target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
1830 
1831 	while (mpi3mr_atomic_read(&target->outstanding) && (i < 30)) {
1832 		i++;
1833 		if (!(i % 2)) {
1834 			mpi3mr_dprint(sc, MPI3MR_INFO,
1835 			    "[%2d]waiting for "
1836 			    "waiting for outstanding commands to complete on target: %d\n",
1837 			    i, target->per_id);
1838 		}
1839 		DELAY(1000 * 1000);
1840 	}
1841 
1842 	if (target->exposed_to_os && !sc->reset_in_progress) {
1843 		mpi3mr_rescan_target(sc, target);
1844 		mpi3mr_dprint(sc, MPI3MR_INFO,
1845 			"Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle);
1846 		target->exposed_to_os = 0;
1847 	}
1848 
1849 	target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL;
1850 out:
1851 	return retval;
1852 }
1853 
1854 void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
1855 	struct mpi3mr_target *target, bool must_delete)
1856 {
1857 	mtx_lock_spin(&sc->target_lock);
1858 	if ((target->state == MPI3MR_DEV_REMOVE_HS_STARTED) ||
1859 	    (must_delete == true)) {
1860 		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
1861 		target->state = MPI3MR_DEV_DELETED;
1862 	}
1863 	mtx_unlock_spin(&sc->target_lock);
1864 
1865 	if (target->state == MPI3MR_DEV_DELETED) {
1866  		free(target, M_MPI3MR);
1867  		target = NULL;
1868  	}
1869 
1870 	return;
1871 }
1872 
1873 /**
1874  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1875  * @sc: Adapter instance reference
1876  * @fwevt: Firmware event
1877  *
1878  * Process Device Status Change event and based on device's new
1879  * information, either expose the device to the upper layers, or
1880  * remove the device from upper layers.
1881  *
1882  * Return: Nothing.
1883  */
1884 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc,
1885 	struct mpi3mr_fw_event_work *fwevt)
1886 {
1887 	U16 dev_handle = 0;
1888 	U8 uhide = 0, delete = 0, cleanup = 0;
1889 	struct mpi3mr_target *tgtdev = NULL;
1890 	Mpi3EventDataDeviceStatusChange_t *evtdata =
1891 	    (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data;
1892 
1893 
1894 
1895 	dev_handle = le16toh(evtdata->DevHandle);
1896 	mpi3mr_dprint(sc, MPI3MR_INFO,
1897 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1898 	    __func__, dev_handle, evtdata->ReasonCode);
1899 	switch (evtdata->ReasonCode) {
1900 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1901 		delete = 1;
1902 		break;
1903 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1904 		uhide = 1;
1905 		break;
1906 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1907 		delete = 1;
1908 		cleanup = 1;
1909 		break;
1910 	default:
1911 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__,
1912 		    evtdata->ReasonCode);
1913 		break;
1914 	}
1915 
1916 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1917 	if (!tgtdev)
1918 		return;
1919 
1920 	if (uhide) {
1921 		if (!tgtdev->exposed_to_os)
1922 			mpi3mr_add_device(sc, tgtdev->per_id);
1923 	}
1924 
1925 	if (delete)
1926 		mpi3mr_remove_device_from_os(sc, dev_handle);
1927 
1928 	if (cleanup)
1929 		mpi3mr_remove_device_from_list(sc, tgtdev, false);
1930 }
1931 
1932 /**
1933  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1934  * @sc: Adapter instance reference
1935  * @dev_pg0: New device page0
1936  *
1937  * Process Device Info Change event and based on device's new
1938  * information, either expose the device to the upper layers, or
1939  * remove the device from upper layers or update the details of
1940  * the device.
1941  *
1942  * Return: Nothing.
1943  */
1944 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc,
1945 	Mpi3DevicePage0_t *dev_pg0)
1946 {
1947 	struct mpi3mr_target *tgtdev = NULL;
1948 	U16 dev_handle = 0, perst_id = 0;
1949 
1950 	perst_id = le16toh(dev_pg0->PersistentID);
1951 	dev_handle = le16toh(dev_pg0->DevHandle);
1952 	mpi3mr_dprint(sc, MPI3MR_INFO,
1953 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1954 	    __func__, dev_handle, perst_id);
1955 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1956 	if (!tgtdev)
1957 		return;
1958 
1959 	mpi3mr_update_device(sc, tgtdev, dev_pg0, false);
1960 	if (!tgtdev->is_hidden && !tgtdev->exposed_to_os)
1961 		mpi3mr_add_device(sc, perst_id);
1962 
1963 	if (tgtdev->is_hidden && tgtdev->exposed_to_os)
1964 		mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle);
1965 }
1966 
1967 static void
1968 mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1969 {
1970 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
1971 		goto out;
1972 
1973 	if (!fw_event->process_event)
1974 		goto evt_ack;
1975 
1976 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on  Event: [%x]\n",
1977 	    event_count++, __func__, fw_event->event);
1978 
1979 	switch (fw_event->event) {
1980 	case MPI3_EVENT_DEVICE_ADDED:
1981 	{
1982 		Mpi3DevicePage0_t *dev_pg0 =
1983 			(Mpi3DevicePage0_t *) fw_event->event_data;
1984 		mpi3mr_add_device(sc, dev_pg0->PersistentID);
1985 		break;
1986 	}
1987 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
1988 	{
1989 		mpi3mr_devinfochg_evt_bh(sc,
1990 		    (Mpi3DevicePage0_t *) fw_event->event_data);
1991 		break;
1992 	}
1993 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1994 	{
1995 		mpi3mr_devstatuschg_evt_bh(sc, fw_event);
1996 		break;
1997 	}
1998 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1999 	{
2000 		mpi3mr_process_sastopochg_evt(sc, fw_event);
2001 		break;
2002 	}
2003 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2004 	{
2005 		mpi3mr_process_pcietopochg_evt(sc, fw_event);
2006 		break;
2007 	}
2008 	case MPI3_EVENT_LOG_DATA:
2009 	{
2010 		mpi3mr_logdata_evt_bh(sc, fw_event);
2011 		break;
2012 	}
2013 	default:
2014 		mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
2015 		    fw_event->event);
2016 		break;
2017 
2018 	}
2019 
2020 evt_ack:
2021 	if (fw_event->send_ack) {
2022 		mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n",
2023 		    fw_event->event);
2024 		mpi3mr_process_event_ack(sc, fw_event->event,
2025 		    fw_event->event_context);
2026 	}
2027 
2028 out:
2029 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
2030 	    __func__, fw_event->event);
2031 
2032 	mpi3mr_fw_event_free(sc, fw_event);
2033 }
2034 
2035 void
2036 mpi3mr_firmware_event_work(void *arg, int pending)
2037 {
2038 	struct mpi3mr_fw_event_work *fw_event;
2039 	struct mpi3mr_softc *sc;
2040 
2041 	sc = (struct mpi3mr_softc *)arg;
2042 
2043 	mtx_lock(&sc->fwevt_lock);
2044 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
2045 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
2046 		mtx_unlock(&sc->fwevt_lock);
2047 		mpi3mr_fw_work(sc, fw_event);
2048 		mtx_lock(&sc->fwevt_lock);
2049 	}
2050 	mtx_unlock(&sc->fwevt_lock);
2051 }
2052 
2053 
2054 /*
2055  * mpi3mr_cam_attach - CAM layer registration
2056  * @sc: Adapter reference
2057  *
2058  * This function does simq allocation, cam registration, xpt_bus registration,
2059  * event taskqueue initialization and async event handler registration.
2060  *
2061  * Return: 0 on success and proper error codes on failure
2062  */
2063 int
2064 mpi3mr_cam_attach(struct mpi3mr_softc *sc)
2065 {
2066 	struct mpi3mr_cam_softc *cam_sc;
2067 	cam_status status;
2068 	int unit, error = 0, reqs;
2069 
2070 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
2071 
2072 	cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
2073 	if (!cam_sc) {
2074 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2075 		    "Failed to allocate memory for controller CAM instance\n");
2076 		return (ENOMEM);
2077 	}
2078 
2079 	cam_sc->maxtargets = sc->facts.max_perids + 1;
2080 
2081 	TAILQ_INIT(&cam_sc->tgt_list);
2082 
2083 	sc->cam_sc = cam_sc;
2084 	cam_sc->sc = sc;
2085 
2086 	reqs = sc->max_host_ios;
2087 
2088 	if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) {
2089 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n");
2090 		error = ENOMEM;
2091 		goto out;
2092 	}
2093 
2094 	unit = device_get_unit(sc->mpi3mr_dev);
2095 	cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc,
2096 	    unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq);
2097 	if (cam_sc->sim == NULL) {
2098 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n");
2099 		error = EINVAL;
2100 		goto out;
2101 	}
2102 
2103 	TAILQ_INIT(&cam_sc->ev_queue);
2104 
2105 	/* Initialize taskqueue for Event Handling */
2106 	TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc);
2107 	cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO,
2108 	    taskqueue_thread_enqueue, &cam_sc->ev_tq);
2109 	taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq",
2110 	    device_get_nameunit(sc->mpi3mr_dev));
2111 
2112 	mtx_lock(&sc->mpi3mr_mtx);
2113 
2114 	/*
2115 	 * XXX There should be a bus for every port on the adapter, but since
2116 	 * we're just going to fake the topology for now, we'll pretend that
2117 	 * everything is just a target on a single bus.
2118 	 */
2119 	if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) {
2120 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2121 		    "Error 0x%x registering SCSI bus\n", error);
2122 		mtx_unlock(&sc->mpi3mr_mtx);
2123 		goto out;
2124 	}
2125 
2126 	/*
2127 	 * Assume that discovery events will start right away.
2128 	 *
2129 	 * Hold off boot until discovery is complete.
2130 	 */
2131 	cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY;
2132 	sc->cam_sc->startup_refcount = 0;
2133 	mpi3mr_startup_increment(cam_sc);
2134 
2135 	callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/);
2136 
2137 	/*
2138 	 * Register for async events so we can determine the EEDP
2139 	 * capabilities of devices.
2140 	 */
2141 	status = xpt_create_path(&cam_sc->path, /*periph*/NULL,
2142 	    cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD,
2143 	    CAM_LUN_WILDCARD);
2144 	if (status != CAM_REQ_CMP) {
2145 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2146 		    "Error 0x%x creating sim path\n", status);
2147 		cam_sc->path = NULL;
2148 	}
2149 
2150 	if (status != CAM_REQ_CMP) {
2151 		/*
2152 		 * EEDP use is the exception, not the rule.
2153 		 * Warn the user, but do not fail to attach.
2154 		 */
2155 		mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n");
2156 	}
2157 
2158 	mtx_unlock(&sc->mpi3mr_mtx);
2159 
2160 	error = mpi3mr_register_events(sc);
2161 
2162 out:
2163 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error);
2164 	return (error);
2165 }
2166 
2167 int
2168 mpi3mr_cam_detach(struct mpi3mr_softc *sc)
2169 {
2170 	struct mpi3mr_cam_softc *cam_sc;
2171 	struct mpi3mr_target *target;
2172 
2173 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__);
2174 	if (sc->cam_sc == NULL)
2175 		return (0);
2176 
2177 	cam_sc = sc->cam_sc;
2178 
2179 	mpi3mr_freeup_events(sc);
2180 
2181 	/*
2182 	 * Drain and free the event handling taskqueue with the lock
2183 	 * unheld so that any parallel processing tasks drain properly
2184 	 * without deadlocking.
2185 	 */
2186 	if (cam_sc->ev_tq != NULL)
2187 		taskqueue_free(cam_sc->ev_tq);
2188 
2189 	mtx_lock(&sc->mpi3mr_mtx);
2190 
2191 	while (cam_sc->startup_refcount != 0)
2192 		mpi3mr_startup_decrement(cam_sc);
2193 
2194 	/* Deregister our async handler */
2195 	if (cam_sc->path != NULL) {
2196 		xpt_free_path(cam_sc->path);
2197 		cam_sc->path = NULL;
2198 	}
2199 
2200 	if (cam_sc->flags & MPI3MRSAS_IN_STARTUP)
2201 		xpt_release_simq(cam_sc->sim, 1);
2202 
2203 	if (cam_sc->sim != NULL) {
2204 		xpt_bus_deregister(cam_sim_path(cam_sc->sim));
2205 		cam_sim_free(cam_sc->sim, FALSE);
2206 	}
2207 
2208 	mtx_unlock(&sc->mpi3mr_mtx);
2209 
2210 	if (cam_sc->devq != NULL)
2211 		cam_simq_free(cam_sc->devq);
2212 
2213 get_target:
2214 	mtx_lock_spin(&sc->target_lock);
2215  	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
2216  		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
2217 		mtx_unlock_spin(&sc->target_lock);
2218 		goto out_tgt_free;
2219 	}
2220 	mtx_unlock_spin(&sc->target_lock);
2221 out_tgt_free:
2222 	if (target) {
2223 		free(target, M_MPI3MR);
2224 		target = NULL;
2225 		goto get_target;
2226  	}
2227 
2228 	free(cam_sc, M_MPI3MR);
2229 	sc->cam_sc = NULL;
2230 
2231 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__);
2232 	return (0);
2233 }
2234