xref: /freebsd/sys/dev/mpi3mr/mpi3mr_cam.c (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include <sys/types.h>
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/selinfo.h>
52 #include <sys/module.h>
53 #include <sys/bus.h>
54 #include <sys/conf.h>
55 #include <sys/bio.h>
56 #include <sys/malloc.h>
57 #include <sys/uio.h>
58 #include <sys/sysctl.h>
59 #include <sys/endian.h>
60 #include <sys/queue.h>
61 #include <sys/kthread.h>
62 #include <sys/taskqueue.h>
63 #include <sys/sbuf.h>
64 
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <sys/rman.h>
68 
69 #include <machine/stdarg.h>
70 
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_debug.h>
74 #include <cam/cam_sim.h>
75 #include <cam/cam_xpt_sim.h>
76 #include <cam/cam_xpt_periph.h>
77 #include <cam/cam_periph.h>
78 #include <cam/scsi/scsi_all.h>
79 #include <cam/scsi/scsi_message.h>
80 #include <cam/scsi/smp_all.h>
81 
82 #include <dev/nvme/nvme.h>
83 #include "mpi/mpi30_api.h"
84 #include "mpi3mr_cam.h"
85 #include "mpi3mr.h"
86 #include <sys/time.h>			/* XXX for pcpu.h */
87 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
88 
89 #define	smp_processor_id()  PCPU_GET(cpuid)
90 
91 static int
92 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
93 void
94 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
95 static void
96 mpi3mr_freeup_events(struct mpi3mr_softc *sc);
97 
98 extern int
99 mpi3mr_register_events(struct mpi3mr_softc *sc);
100 extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
101     bus_addr_t dma_addr);
102 extern void mpi3mr_build_zero_len_sge(void *paddr);
103 
104 static U32 event_count;
105 
106 static void mpi3mr_prepare_sgls(void *arg,
107 	bus_dma_segment_t *segs, int nsegs, int error)
108 {
109 	struct mpi3mr_softc *sc;
110 	struct mpi3mr_cmd *cm;
111 	u_int i;
112 	bus_addr_t chain_dma;
113 	void *chain;
114 	U8 *sg_local;
115 	U32 chain_length;
116 	int sges_left;
117 	U32 sges_in_segment;
118 	U8 simple_sgl_flags;
119 	U8 simple_sgl_flags_last;
120 	U8 last_chain_sgl_flags;
121 	struct mpi3mr_chain *chain_req;
122 	Mpi3SCSIIORequest_t *scsiio_req;
123 
124 	cm = (struct mpi3mr_cmd *)arg;
125 	sc = cm->sc;
126 	scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request;
127 
128 	if (error) {
129 		cm->error_code = error;
130 		device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error);
131 		if (error == EFBIG) {
132 			cm->ccb->ccb_h.status = CAM_REQ_TOO_BIG;
133 			return;
134 		}
135 	}
136 
137 	if (cm->data_dir == MPI3MR_READ)
138 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
139 		    BUS_DMASYNC_PREREAD);
140 	if (cm->data_dir == MPI3MR_WRITE)
141 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
142 		    BUS_DMASYNC_PREWRITE);
143 	if (nsegs > MPI3MR_SG_DEPTH) {
144 		device_printf(sc->mpi3mr_dev, "SGE count is too large or 0.\n");
145 		return;
146 	}
147 
148 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
149 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
150 	simple_sgl_flags_last = simple_sgl_flags |
151 	    MPI3_SGE_FLAGS_END_OF_LIST;
152 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
153 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
154 
155 	sg_local = (U8 *)&scsiio_req->SGL;
156 
157 	if (!scsiio_req->DataLength) {
158 		mpi3mr_build_zero_len_sge(sg_local);
159 		return;
160 	}
161 
162 	sges_left = nsegs;
163 
164 	if (sges_left < 0) {
165 		printf("scsi_dma_map failed: request for %d bytes!\n",
166 			scsiio_req->DataLength);
167 		return;
168 	}
169 	if (sges_left > MPI3MR_SG_DEPTH) {
170 		printf("scsi_dma_map returned unsupported sge count %d!\n",
171 			sges_left);
172 		return;
173 	}
174 
175 	sges_in_segment = (sc->facts.op_req_sz -
176 	    offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t);
177 
178 	i = 0;
179 
180 	mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n",
181 		nsegs, scsiio_req->DataLength);
182 
183 	if (sges_left <= sges_in_segment)
184 		goto fill_in_last_segment;
185 
186 	/* fill in main message segment when there is a chain following */
187 	while (sges_in_segment > 1) {
188 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
189 		    segs[i].ds_len, segs[i].ds_addr);
190 		sg_local += sizeof(Mpi3SGESimple_t);
191 		sges_left--;
192 		sges_in_segment--;
193 		i++;
194 	}
195 
196 	chain_req = &sc->chain_sgl_list[cm->hosttag];
197 
198 	chain = chain_req->buf;
199 	chain_dma = chain_req->buf_phys;
200 	memset(chain_req->buf, 0, PAGE_SIZE);
201 	sges_in_segment = sges_left;
202 	chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
203 
204 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
205 	    chain_length, chain_dma);
206 
207 	sg_local = chain;
208 
209 fill_in_last_segment:
210 	while (sges_left > 0) {
211 		if (sges_left == 1)
212 			mpi3mr_add_sg_single(sg_local,
213 			    simple_sgl_flags_last, segs[i].ds_len,
214 			    segs[i].ds_addr);
215 		else
216 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
217 			    segs[i].ds_len, segs[i].ds_addr);
218 		sg_local += sizeof(Mpi3SGESimple_t);
219 		sges_left--;
220 		i++;
221 	}
222 
223 	return;
224 }
225 
226 int
227 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
228 {
229 	u_int32_t retcode = 0;
230 
231 	if (cm->data != NULL) {
232 		mtx_lock(&sc->io_lock);
233 		/* Map data buffer into bus space */
234 		retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap,
235 		    cm->ccb, mpi3mr_prepare_sgls, cm, 0);
236 		mtx_unlock(&sc->io_lock);
237 		if (retcode)
238 			device_printf(sc->mpi3mr_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
239 		if (retcode == EINPROGRESS) {
240 			device_printf(sc->mpi3mr_dev, "request load in progress\n");
241 			xpt_freeze_simq(sc->cam_sc->sim, 1);
242 		}
243 	}
244 	if (cm->error_code)
245 		return cm->error_code;
246 	if (retcode)
247 		mpi3mr_set_ccbstatus(cm->ccb, CAM_REQ_INVALID);
248 
249 	return (retcode);
250 }
251 
252 void
253 mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
254 {
255 	if (cmd->data != NULL) {
256 		if (cmd->data_dir == MPI3MR_READ)
257 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD);
258 		if (cmd->data_dir == MPI3MR_WRITE)
259 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE);
260 		mtx_lock(&sc->io_lock);
261 		bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap);
262 		mtx_unlock(&sc->io_lock);
263 	}
264 }
265 
266 /**
267  * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw
268  * @sc: Adapter instance reference
269  * @ccb: SCSI Command reference
270  *
271  * The controller hardware cannot handle certain unmap commands
272  * for NVMe drives, this routine checks those and return true
273  * and completes the SCSI command with proper status and sense
274  * data.
275  *
276  * Return: TRUE for allowed unmap, FALSE otherwise.
277  */
278 static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc,
279 	union ccb *ccb)
280 {
281 	struct ccb_scsiio *csio;
282 	uint16_t param_list_len, block_desc_len, trunc_param_len = 0;
283 
284 	csio = &ccb->csio;
285 	param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]);
286 
287 	switch(pci_get_revid(sc->mpi3mr_dev)) {
288 	case SAS4116_CHIP_REV_A0:
289 		if (!param_list_len) {
290 			mpi3mr_dprint(sc, MPI3MR_ERROR,
291 			    "%s: CDB received with zero parameter length\n",
292 			    __func__);
293 			mpi3mr_print_cdb(ccb);
294 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
295 			xpt_done(ccb);
296 			return false;
297 		}
298 
299 		if (param_list_len < 24) {
300 			mpi3mr_dprint(sc, MPI3MR_ERROR,
301 			    "%s: CDB received with invalid param_list_len: %d\n",
302 			    __func__, param_list_len);
303 			mpi3mr_print_cdb(ccb);
304 			scsi_set_sense_data(&ccb->csio.sense_data,
305 				/*sense_format*/ SSD_TYPE_FIXED,
306 				/*current_error*/ 1,
307 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
308 				/*asc*/ 0x1A,
309 				/*ascq*/ 0x00,
310 				/*extra args*/ SSD_ELEM_NONE);
311 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
312 			ccb->ccb_h.status =
313 			    CAM_SCSI_STATUS_ERROR |
314 			    CAM_AUTOSNS_VALID;
315 			return false;
316 		}
317 
318 		if (param_list_len != csio->dxfer_len) {
319 			mpi3mr_dprint(sc, MPI3MR_ERROR,
320 			    "%s: CDB received with param_list_len: %d bufflen: %d\n",
321 			    __func__, param_list_len, csio->dxfer_len);
322 			mpi3mr_print_cdb(ccb);
323 			scsi_set_sense_data(&ccb->csio.sense_data,
324 				/*sense_format*/ SSD_TYPE_FIXED,
325 				/*current_error*/ 1,
326 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
327 				/*asc*/ 0x1A,
328 				/*ascq*/ 0x00,
329 				/*extra args*/ SSD_ELEM_NONE);
330 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
331 			ccb->ccb_h.status =
332 			    CAM_SCSI_STATUS_ERROR |
333 			    CAM_AUTOSNS_VALID;
334 			xpt_done(ccb);
335 			return false;
336 		}
337 
338 		block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]);
339 
340 		if (block_desc_len < 16) {
341 			mpi3mr_dprint(sc, MPI3MR_ERROR,
342 			    "%s: Invalid descriptor length in param list: %d\n",
343 			    __func__, block_desc_len);
344 			mpi3mr_print_cdb(ccb);
345 			scsi_set_sense_data(&ccb->csio.sense_data,
346 				/*sense_format*/ SSD_TYPE_FIXED,
347 				/*current_error*/ 1,
348 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
349 				/*asc*/ 0x26,
350 				/*ascq*/ 0x00,
351 				/*extra args*/ SSD_ELEM_NONE);
352 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
353 			ccb->ccb_h.status =
354 			    CAM_SCSI_STATUS_ERROR |
355 			    CAM_AUTOSNS_VALID;
356 			xpt_done(ccb);
357 			return false;
358 		}
359 
360 		if (param_list_len > (block_desc_len + 8)) {
361 			mpi3mr_print_cdb(ccb);
362 			mpi3mr_dprint(sc, MPI3MR_INFO,
363 			    "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n",
364 			    __func__, param_list_len, (block_desc_len + 8));
365 			param_list_len = block_desc_len + 8;
366 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
367 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
368 			mpi3mr_print_cdb(ccb);
369 		}
370 		break;
371 
372 	case SAS4116_CHIP_REV_B0:
373 		if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) {
374 			trunc_param_len -= (param_list_len - 8) & 0xF;
375 			mpi3mr_print_cdb(ccb);
376 			mpi3mr_dprint(sc, MPI3MR_INFO,
377 			    "%s: Truncating param_list_len from (%d) to (%d)\n",
378 			    __func__, param_list_len, trunc_param_len);
379 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
380 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
381 			mpi3mr_print_cdb(ccb);
382 		}
383 		break;
384 	}
385 
386 	return true;
387 }
388 
389 /**
390  * mpi3mr_tm_response_name -  get TM response as a string
391  * @resp_code: TM response code
392  *
393  * Convert known task management response code as a readable
394  * string.
395  *
396  * Return: response code string.
397  */
398 static const char* mpi3mr_tm_response_name(U8 resp_code)
399 {
400 	char *desc;
401 
402 	switch (resp_code) {
403 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
404 		desc = "task management request completed";
405 		break;
406 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
407 		desc = "invalid frame";
408 		break;
409 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
410 		desc = "task management request not supported";
411 		break;
412 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
413 		desc = "task management request failed";
414 		break;
415 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
416 		desc = "task management request succeeded";
417 		break;
418 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
419 		desc = "invalid LUN";
420 		break;
421 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
422 		desc = "overlapped tag attempted";
423 		break;
424 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
425 		desc = "task queued, however not sent to target";
426 		break;
427 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
428 		desc = "task management request denied by NVMe device";
429 		break;
430 	default:
431 		desc = "unknown";
432 		break;
433 	}
434 
435 	return desc;
436 }
437 
438 void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
439 {
440 	int i;
441 	int num_of_reply_queues = sc->num_queues;
442 	struct mpi3mr_irq_context *irq_ctx;
443 
444 	for (i = 0; i < num_of_reply_queues; i++) {
445 		irq_ctx = &sc->irq_ctx[i];
446 		mpi3mr_complete_io_cmd(sc, irq_ctx);
447 	}
448 }
449 
450 void
451 trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason)
452 {
453 	if (sc->reset_in_progress) {
454 		mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
455 		return;
456 	}
457 	sc->reset.type = reset_type;
458 	sc->reset.reason = reset_reason;
459 
460 	return;
461 }
462 
463 /**
464  * mpi3mr_issue_tm - Issue Task Management request
465  * @sc: Adapter instance reference
466  * @tm_type: Task Management type
467  * @handle: Device handle
468  * @lun: lun ID
469  * @htag: Host tag of the TM request
470  * @timeout: TM timeout value
471  * @drv_cmd: Internal command tracker
472  * @resp_code: Response code place holder
473  * @cmd: Timed out command reference
474  *
475  * Issues a Task Management Request to the controller for a
476  * specified target, lun and command and wait for its completion
477  * and check TM response. Recover the TM if it timed out by
478  * issuing controller reset.
479  *
480  * Return: 0 on success, non-zero on errors
481  */
482 static int
483 mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd,
484 		U8 tm_type, unsigned long timeout)
485 {
486 	int retval = 0;
487 	MPI3_SCSI_TASK_MGMT_REQUEST tm_req;
488 	MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL;
489 	struct mpi3mr_drvr_cmd *drv_cmd = NULL;
490 	struct mpi3mr_target *tgtdev = NULL;
491 	struct mpi3mr_op_req_queue *op_req_q = NULL;
492 	union ccb *ccb;
493 	U8 resp_code;
494 
495 
496 	if (sc->unrecoverable) {
497 		mpi3mr_dprint(sc, MPI3MR_INFO,
498 			"Controller is in unrecoverable state!! TM not required\n");
499 		return retval;
500 	}
501 	if (sc->reset_in_progress) {
502 		mpi3mr_dprint(sc, MPI3MR_INFO,
503 			"controller reset in progress!! TM not required\n");
504 		return retval;
505 	}
506 
507 	if (!cmd->ccb) {
508 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
509 		return retval;
510 	}
511 	ccb = cmd->ccb;
512 
513 	tgtdev = cmd->targ;
514 	if (tgtdev == NULL)  {
515 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x,"
516 			      "TM is not required\n", ccb->ccb_h.target_id);
517 		return retval;
518 	}
519 	if (tgtdev->dev_removed == 1)  {
520 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n",
521 			      ccb->ccb_h.target_id);
522 		return retval;
523 	}
524 
525 	drv_cmd = &sc->host_tm_cmds;
526 	mtx_lock(&drv_cmd->lock);
527 
528 	memset(&tm_req, 0, sizeof(tm_req));
529 	tm_req.DevHandle = htole16(tgtdev->dev_handle);
530 	tm_req.TaskType = tm_type;
531 	tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS);
532 	int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN);
533 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
534 	drv_cmd->state = MPI3MR_CMD_PENDING;
535 	drv_cmd->is_waiting = 1;
536 	drv_cmd->callback = NULL;
537 
538 	if (ccb) {
539 		if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
540 			op_req_q = &sc->op_req_q[cmd->req_qidx];
541 			tm_req.TaskHostTag = htole16(cmd->hosttag);
542 			tm_req.TaskRequestQueueID = htole16(op_req_q->qid);
543 		}
544 	}
545 
546 	if (tgtdev)
547 		mpi3mr_atomic_inc(&tgtdev->block_io);
548 
549 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
550 		if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
551 		     && tgtdev->dev_spec.pcie_inf.abort_to)
552  			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
553 		else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET)
554 			 && tgtdev->dev_spec.pcie_inf.reset_to)
555 			 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
556 	}
557 
558 	sc->tm_chan = (void *)&drv_cmd;
559 
560 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
561 		      "posting task management request: type(%d), handle(0x%04x)\n",
562 		       tm_type, tgtdev->dev_handle);
563 
564 	init_completion(&drv_cmd->completion);
565 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
566 	if (retval) {
567 		mpi3mr_dprint(sc, MPI3MR_ERROR,
568 			      "posting task management request is failed\n");
569 		retval = -1;
570 		goto out_unlock;
571 	}
572 	wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc);
573 
574 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
575 		drv_cmd->is_waiting = 0;
576 		retval = -1;
577 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
578 			mpi3mr_dprint(sc, MPI3MR_ERROR,
579 				      "task management request timed out after %ld seconds\n", timeout);
580 			if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) {
581 				mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n");
582 				mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8);
583 			}
584 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT);
585 			retval = ETIMEDOUT;
586 		}
587 		goto out_unlock;
588 	}
589 
590 	if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) {
591 		mpi3mr_dprint(sc, MPI3MR_ERROR,
592 			      "invalid task management reply message\n");
593 		retval = -1;
594 		goto out_unlock;
595 	}
596 	tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply;
597 
598 	switch (drv_cmd->ioc_status) {
599 	case MPI3_IOCSTATUS_SUCCESS:
600 		resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE;
601 		break;
602 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
603 		resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
604 		break;
605 	default:
606 		mpi3mr_dprint(sc, MPI3MR_ERROR,
607 			      "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
608 			       tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
609 		retval = -1;
610 		goto out_unlock;
611 	}
612 
613 	switch (resp_code) {
614 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
615 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
616 		break;
617 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
618 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
619 			retval = -1;
620 		break;
621 	default:
622 		retval = -1;
623 		break;
624 	}
625 
626 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
627 		      "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)"
628 		      "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
629 		      tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code);
630 
631 	if (retval)
632 		goto out_unlock;
633 
634 	mpi3mr_disable_interrupts(sc);
635 	mpi3mr_poll_pend_io_completions(sc);
636 	mpi3mr_enable_interrupts(sc);
637 	mpi3mr_poll_pend_io_completions(sc);
638 
639 	switch (tm_type) {
640 	case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
641 		if (cmd->state == MPI3MR_CMD_STATE_IN_TM) {
642 			mpi3mr_dprint(sc, MPI3MR_ERROR,
643 				      "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated"
644 				      "marking task abort failed!\n", sc->name, cmd->ccb);
645 			retval = -1;
646 		}
647 		break;
648 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
649 		if (mpi3mr_atomic_read(&tgtdev->outstanding)) {
650 			mpi3mr_dprint(sc, MPI3MR_ERROR,
651 				      "%s: target reset returned success from firmware but IOs are still pending on the target (%p)"
652 				      "marking target reset failed!\n",
653 				      sc->name, tgtdev);
654 			retval = -1;
655 		}
656 		break;
657 	default:
658 		break;
659 	}
660 
661 out_unlock:
662 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
663 	mtx_unlock(&drv_cmd->lock);
664 	if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0)
665 		mpi3mr_atomic_dec(&tgtdev->block_io);
666 
667 	return retval;
668 }
669 
670 /**
671  * mpi3mr_task_abort- Abort error handling callback
672  * @cmd: Timed out command reference
673  *
674  * Issue Abort Task Management if the command is in LLD scope
675  * and verify if it is aborted successfully and return status
676  * accordingly.
677  *
678  * Return: SUCCESS of successful abort the SCSI command else FAILED
679  */
680 static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd)
681 {
682 	int retval = 0;
683 	struct mpi3mr_softc *sc;
684 	union ccb *ccb;
685 
686 	sc = cmd->sc;
687 
688 	if (!cmd->ccb) {
689 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
690 		return retval;
691 	}
692 	ccb = cmd->ccb;
693 
694 	mpi3mr_dprint(sc, MPI3MR_INFO,
695 		      "attempting abort task for ccb(%p)\n", ccb);
696 
697 	mpi3mr_print_cdb(ccb);
698 
699 	if (cmd->state != MPI3MR_CMD_STATE_BUSY) {
700 		mpi3mr_dprint(sc, MPI3MR_INFO,
701 			      "%s: ccb is not in driver scope, abort task is not required\n",
702 			      sc->name);
703 		return retval;
704 	}
705 	cmd->state = MPI3MR_CMD_STATE_IN_TM;
706 
707 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT);
708 
709 	mpi3mr_dprint(sc, MPI3MR_INFO,
710 		      "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb);
711 
712 	return retval;
713 }
714 
715 /**
716  * mpi3mr_target_reset - Target reset error handling callback
717  * @cmd: Timed out command reference
718  *
719  * Issue Target reset Task Management and verify the SCSI commands are
720  * terminated successfully and return status accordingly.
721  *
722  * Return: SUCCESS of successful termination of the SCSI commands else
723  *         FAILED
724  */
725 static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd)
726 {
727 	int retval = 0;
728 	struct mpi3mr_softc *sc;
729 	struct mpi3mr_target *target;
730 
731 	sc = cmd->sc;
732 
733 	target = cmd->targ;
734 	if (target == NULL)  {
735 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p,"
736 			      "target reset is not required\n", target);
737 		return retval;
738 	}
739 
740 	mpi3mr_dprint(sc, MPI3MR_INFO,
741 		      "attempting target reset on target(%d)\n", target->per_id);
742 
743 
744 	if (mpi3mr_atomic_read(&target->outstanding)) {
745 		mpi3mr_dprint(sc, MPI3MR_INFO,
746 			      "no outstanding IOs on the target(%d),"
747 			      " target reset not required.\n", target->per_id);
748 		return retval;
749 	}
750 
751 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT);
752 
753 	mpi3mr_dprint(sc, MPI3MR_INFO,
754 		      "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"),
755 		      target->per_id);
756 
757 	return retval;
758 }
759 
760 /**
761  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
762  * @sc: Adapter instance reference
763  *
764  * Calculate the pending I/Os for the controller and return.
765  *
766  * Return: Number of pending I/Os
767  */
768 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc)
769 {
770 	U16 i, pend_ios = 0;
771 
772 	for (i = 0; i < sc->num_queues; i++)
773 		pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
774 	return pend_ios;
775 }
776 
777 /**
778  * mpi3mr_wait_for_host_io - block for I/Os to complete
779  * @sc: Adapter instance reference
780  * @timeout: time out in seconds
781  *
782  * Waits for pending I/Os for the given adapter to complete or
783  * to hit the timeout.
784  *
785  * Return: Nothing
786  */
787 static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout)
788 {
789 	enum mpi3mr_iocstate iocstate;
790 
791 	iocstate = mpi3mr_get_iocstate(sc);
792 	if (iocstate != MRIOC_STATE_READY) {
793 		mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__);
794 		return -1;
795 	}
796 
797 	if (!mpi3mr_get_fw_pending_ios(sc))
798 		return 0;
799 
800 	mpi3mr_dprint(sc, MPI3MR_INFO,
801 		      "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n",
802 		      __func__, timeout, mpi3mr_get_fw_pending_ios(sc));
803 
804 	int i;
805 	for (i = 0; i < timeout; i++) {
806 		if (!mpi3mr_get_fw_pending_ios(sc)) {
807 			mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__);
808 			return 0;
809 
810 		}
811 		iocstate = mpi3mr_get_iocstate(sc);
812 		if (iocstate != MRIOC_STATE_READY) {
813 			mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further"
814 				      "Proceed with Reset\n", __func__);
815 			return -1;
816 		}
817 		DELAY(1000 * 1000);
818 	}
819 
820 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__,
821 		      mpi3mr_get_fw_pending_ios(sc));
822 
823 	return -1;
824 }
825 
826 static void
827 mpi3mr_scsiio_timeout(void *data)
828 {
829 	int retval = 0;
830 	struct mpi3mr_softc *sc;
831 	struct mpi3mr_cmd *cmd;
832 	struct mpi3mr_target *targ_dev = NULL;
833 
834 	if (!data)
835 		return;
836 
837 	cmd = (struct mpi3mr_cmd *)data;
838 	sc = cmd->sc;
839 
840 	if (cmd->ccb == NULL) {
841 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
842 		return;
843 	}
844 
845 	/*
846 	 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset
847 	 * with max timeout for outstanding IOs to complete is 180sec.
848 	 */
849 	targ_dev = cmd->targ;
850 	if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) {
851 		if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT))
852 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
853 		return;
854  	}
855 
856 	/* Issue task abort to recover the timed out IO */
857 	retval = mpi3mr_task_abort(cmd);
858 	if (!retval || (retval == ETIMEDOUT))
859 		return;
860 
861 	/*
862 	 * task abort has failed to recover the timed out IO,
863 	 * try with the target reset
864 	 */
865 	retval = mpi3mr_target_reset(cmd);
866 	if (!retval || (retval == ETIMEDOUT))
867 		return;
868 
869 	/*
870 	 * task abort and target reset has failed. So issue Controller reset(soft reset)
871 	 * through OCR thread context
872 	 */
873 	trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
874 
875 	return;
876 }
877 
878 void int_to_lun(unsigned int lun, U8 *req_lun)
879 {
880 	int i;
881 
882 	memset(req_lun, 0, sizeof(*req_lun));
883 
884 	for (i = 0; i < sizeof(lun); i += 2) {
885 		req_lun[i] = (lun >> 8) & 0xFF;
886 		req_lun[i+1] = lun & 0xFF;
887 		lun = lun >> 16;
888 	}
889 
890 }
891 
892 static U16 get_req_queue_index(struct mpi3mr_softc *sc)
893 {
894 	U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0;
895 
896 	reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios);
897 	for (i = 0; i < sc->num_queues; i++) {
898 		if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) {
899 			reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
900 			reply_q_index = i;
901 		}
902 	}
903 
904 	return reply_q_index;
905 }
906 
907 static void
908 mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
909 {
910 	Mpi3SCSIIORequest_t *req = NULL;
911 	struct ccb_scsiio *csio;
912 	struct mpi3mr_softc *sc;
913 	struct mpi3mr_target *targ;
914 	struct mpi3mr_cmd *cm;
915 	uint8_t scsi_opcode, queue_idx;
916 	uint32_t mpi_control;
917 	struct mpi3mr_op_req_queue *opreqq = NULL;
918 	U32 data_len_blks = 0;
919 	U32 tracked_io_sz = 0;
920 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
921 	struct mpi3mr_throttle_group_info *tg = NULL;
922 	static int ratelimit;
923 
924 	sc = cam_sc->sc;
925 	mtx_assert(&sc->mpi3mr_mtx, MA_OWNED);
926 
927 	if (sc->unrecoverable) {
928 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
929 		xpt_done(ccb);
930 		return;
931 	}
932 
933 	csio = &ccb->csio;
934 	KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets,
935 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
936 	     csio->ccb_h.target_id));
937 
938 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
939 
940 	if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) &&
941 	    !((scsi_opcode == SYNCHRONIZE_CACHE) ||
942 	      (scsi_opcode == START_STOP_UNIT))) {
943 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
944 		xpt_done(ccb);
945 		return;
946 	}
947 
948 	targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id);
949 	if (targ == NULL)  {
950 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n",
951 			      csio->ccb_h.target_id);
952 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
953 		xpt_done(ccb);
954 		return;
955 	}
956 
957 	if (targ && targ->is_hidden)  {
958 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n",
959 			      csio->ccb_h.target_id);
960 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
961 		xpt_done(ccb);
962 		return;
963 	}
964 
965 	if (targ->dev_removed == 1)  {
966 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id);
967 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
968 		xpt_done(ccb);
969 		return;
970 	}
971 
972 	if (targ->dev_handle == 0x0) {
973 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n",
974 		    __func__, csio->ccb_h.target_id);
975 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
976 		xpt_done(ccb);
977 		return;
978 	}
979 
980 	if (mpi3mr_atomic_read(&targ->block_io) ||
981 		(sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) {
982 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n",
983 		    __func__, csio->ccb_h.target_id);
984 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
985 		xpt_done(ccb);
986 		return;
987 	}
988 
989 	/*
990 	 * Sometimes, it is possible to get a command that is not "In
991 	 * Progress" and was actually aborted by the upper layer.  Check for
992 	 * this here and complete the command without error.
993 	 */
994 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
995 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for "
996 		    "target %u\n", __func__, csio->ccb_h.target_id);
997 		xpt_done(ccb);
998 		return;
999 	}
1000 	/*
1001 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1002 	 * that the volume has timed out.  We want volumes to be enumerated
1003 	 * until they are deleted/removed, not just failed.
1004 	 */
1005 	if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) {
1006 		if (targ->devinfo == 0)
1007 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1008 		else
1009 			mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1010 		xpt_done(ccb);
1011 		return;
1012 	}
1013 
1014 	if ((scsi_opcode == UNMAP) &&
1015 		(pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) &&
1016 		(targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1017 		(mpi3mr_allow_unmap_to_fw(sc, ccb) == false))
1018 		return;
1019 
1020 	cm = mpi3mr_get_command(sc);
1021 	if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) {
1022 		if (cm != NULL) {
1023 			mpi3mr_release_command(cm);
1024 		}
1025 		if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) {
1026 			xpt_freeze_simq(cam_sc->sim, 1);
1027 			cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN;
1028 		}
1029 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1030 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1031 		xpt_done(ccb);
1032 		return;
1033 	}
1034 
1035 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1036 	case CAM_DIR_IN:
1037 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
1038 		cm->data_dir = MPI3MR_READ;
1039 		break;
1040 	case CAM_DIR_OUT:
1041 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
1042 		cm->data_dir = MPI3MR_WRITE;
1043 		break;
1044 	case CAM_DIR_NONE:
1045 	default:
1046 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
1047 		break;
1048 	}
1049 
1050 	if (csio->cdb_len > 16)
1051 		mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
1052 
1053 	req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1054 	bzero(req, sizeof(*req));
1055 	req->Function = MPI3_FUNCTION_SCSI_IO;
1056 	req->HostTag = cm->hosttag;
1057 	req->DataLength = htole32(csio->dxfer_len);
1058 	req->DevHandle = htole16(targ->dev_handle);
1059 
1060 	/*
1061 	 * It looks like the hardware doesn't require an explicit tag
1062 	 * number for each transaction.  SAM Task Management not supported
1063 	 * at the moment.
1064 	 */
1065 	switch (csio->tag_action) {
1066 	case MSG_HEAD_OF_Q_TAG:
1067 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ;
1068 		break;
1069 	case MSG_ORDERED_Q_TAG:
1070 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ;
1071 		break;
1072 	case MSG_ACA_TASK:
1073 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ;
1074 		break;
1075 	case CAM_TAG_ACTION_NONE:
1076 	case MSG_SIMPLE_Q_TAG:
1077 	default:
1078 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
1079 		break;
1080 	}
1081 
1082 	req->Flags = htole32(mpi_control);
1083 
1084 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1085 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1086 	else {
1087 		KASSERT(csio->cdb_len <= IOCDBLEN,
1088 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
1089 		    "is not set", csio->cdb_len));
1090 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1091 	}
1092 
1093 	cm->length = csio->dxfer_len;
1094 	cm->targ = targ;
1095 	int_to_lun(csio->ccb_h.target_lun, req->LUN);
1096 	cm->ccb = ccb;
1097 	csio->ccb_h.qos.sim_data = sbinuptime();
1098 	queue_idx = get_req_queue_index(sc);
1099 	cm->req_qidx = queue_idx;
1100 
1101 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n",
1102 		(queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag);
1103 
1104 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1105 
1106 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
1107 	case CAM_DATA_PADDR:
1108 	case CAM_DATA_SG_PADDR:
1109 		device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n",
1110 		    __func__);
1111 		mpi3mr_release_command(cm);
1112 		ccb->ccb_h.status = CAM_REQ_INVALID;
1113 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1114 		xpt_done(ccb);
1115 		return;
1116 	case CAM_DATA_SG:
1117 		device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n",
1118 		    __func__);
1119 		mpi3mr_release_command(cm);
1120 		ccb->ccb_h.status = CAM_REQ_INVALID;
1121 		xpt_done(ccb);
1122 		return;
1123 	case CAM_DATA_VADDR:
1124 	case CAM_DATA_BIO:
1125 		if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) {
1126 			mpi3mr_release_command(cm);
1127 			ccb->ccb_h.status = CAM_REQ_TOO_BIG;
1128 			xpt_done(ccb);
1129 			return;
1130 		}
1131 		cm->length = csio->dxfer_len;
1132 		if (cm->length)
1133 			cm->data = csio->data_ptr;
1134 		break;
1135 	default:
1136 		ccb->ccb_h.status = CAM_REQ_INVALID;
1137 		xpt_done(ccb);
1138 		return;
1139 	}
1140 
1141 	/* Prepare SGEs */
1142 	if (mpi3mr_map_request(sc, cm)) {
1143 		mpi3mr_release_command(cm);
1144 		xpt_done(ccb);
1145 		printf("func: %s line: %d Build SGLs failed\n", __func__, __LINE__);
1146 		return;
1147 	}
1148 
1149 	opreqq = &sc->op_req_q[queue_idx];
1150 
1151 	if (sc->iot_enable) {
1152 		data_len_blks = csio->dxfer_len >> 9;
1153 
1154 		if ((data_len_blks >= sc->io_throttle_data_length) &&
1155 		    targ->io_throttle_enabled) {
1156 			tracked_io_sz = data_len_blks;
1157 			tg = targ->throttle_group;
1158 			if (tg) {
1159 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1160 				mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks);
1161 
1162 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1163 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
1164 
1165 				if (ratelimit % 1000) {
1166 					mpi3mr_dprint(sc, MPI3MR_IOT,
1167 						"large vd_io persist_id(%d), handle(0x%04x), data_len(%d),"
1168 						"ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n",
1169 						targ->per_id, targ->dev_handle,
1170 						data_len_blks, ioc_pend_data_len,
1171 						tg_pend_data_len, sc->io_throttle_high,
1172 						tg->high);
1173 					ratelimit++;
1174 				}
1175 
1176 				if (!tg->io_divert  && ((ioc_pend_data_len >=
1177 				    sc->io_throttle_high) ||
1178 				    (tg_pend_data_len >= tg->high))) {
1179 					tg->io_divert = 1;
1180 					mpi3mr_dprint(sc, MPI3MR_IOT,
1181 						"VD: Setting divert flag for tg_id(%d), persist_id(%d)\n",
1182 						tg->id, targ->per_id);
1183 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1184 						mpi3mr_print_cdb(ccb);
1185 					mpi3mr_set_io_divert_for_all_vd_in_tg(sc,
1186 					    tg, 1);
1187 				}
1188 			} else {
1189 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1190 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1191 				if (ratelimit % 1000) {
1192 					mpi3mr_dprint(sc, MPI3MR_IOT,
1193 					    "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n",
1194 					    targ->per_id, targ->dev_handle,
1195 					    data_len_blks, ioc_pend_data_len,
1196 					    sc->io_throttle_high);
1197 					ratelimit++;
1198 				}
1199 
1200 				if (ioc_pend_data_len >= sc->io_throttle_high) {
1201 					targ->io_divert = 1;
1202 					mpi3mr_dprint(sc, MPI3MR_IOT,
1203 						"PD: Setting divert flag for persist_id(%d)\n",
1204 						targ->per_id);
1205 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1206 						mpi3mr_print_cdb(ccb);
1207 				}
1208 			}
1209 		}
1210 
1211 		if (targ->io_divert) {
1212 			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
1213 			mpi_control |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
1214 		}
1215 	}
1216 	req->Flags = htole32(mpi_control);
1217 
1218 	if (mpi3mr_submit_io(sc, opreqq,
1219 	    	(U8 *)&cm->io_request)) {
1220 		mpi3mr_release_command(cm);
1221 		if (tracked_io_sz) {
1222 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz);
1223 			if (tg)
1224 				mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz);
1225 		}
1226 		mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
1227 		xpt_done(ccb);
1228 	} else {
1229 		callout_reset_sbt(&cm->callout, SBT_1S * 90 , 0,
1230 				  mpi3mr_scsiio_timeout, cm, 0);
1231 		mpi3mr_atomic_inc(&sc->fw_outstanding);
1232 		mpi3mr_atomic_inc(&targ->outstanding);
1233 		if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
1234 			sc->io_cmds_highwater++;
1235 	}
1236 
1237 	cm->callout_owner = true;
1238 	return;
1239 }
1240 
1241 static void
1242 mpi3mr_cam_poll(struct cam_sim *sim)
1243 {
1244 	struct mpi3mr_cam_softc *cam_sc;
1245 	struct mpi3mr_irq_context *irq_ctx;
1246 	struct mpi3mr_softc *sc;
1247 	int i;
1248 
1249 	cam_sc = cam_sim_softc(sim);
1250 	sc = cam_sc->sc;
1251 
1252 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n",
1253 		__func__, __LINE__);
1254 
1255 	for (i = 0; i < sc->num_queues; i++) {
1256 		irq_ctx = sc->irq_ctx + i;
1257 		if (irq_ctx->op_reply_q->qid) {
1258 			mpi3mr_complete_io_cmd(sc, irq_ctx);
1259 		}
1260 	}
1261 }
1262 
1263 static void
1264 mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
1265 {
1266 	struct mpi3mr_cam_softc *cam_sc;
1267 	struct mpi3mr_target *targ;
1268 
1269 	cam_sc = cam_sim_softc(sim);
1270 
1271 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
1272 	    ccb->ccb_h.func_code, ccb->ccb_h.target_id);
1273 
1274 	mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED);
1275 
1276 	switch (ccb->ccb_h.func_code) {
1277 	case XPT_PATH_INQ:
1278 	{
1279 		struct ccb_pathinq *cpi = &ccb->cpi;
1280 
1281 		cpi->version_num = 1;
1282 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1283 		cpi->target_sprt = 0;
1284 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1285 		cpi->hba_eng_cnt = 0;
1286 		cpi->max_target = cam_sc->maxtargets - 1;
1287 		cpi->max_lun = 0;
1288 
1289 		/*
1290 		 * initiator_id is set here to an ID outside the set of valid
1291 		 * target IDs (including volumes).
1292 		 */
1293 		cpi->initiator_id = cam_sc->maxtargets;
1294 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1295 		strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN);
1296 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1297 		cpi->unit_number = cam_sim_unit(sim);
1298 		cpi->bus_id = cam_sim_bus(sim);
1299 		/*
1300 		 * XXXSLM-I think this needs to change based on config page or
1301 		 * something instead of hardcoded to 150000.
1302 		 */
1303 		cpi->base_transfer_speed = 150000;
1304 		cpi->transport = XPORT_SAS;
1305 		cpi->transport_version = 0;
1306 		cpi->protocol = PROTO_SCSI;
1307 		cpi->protocol_version = SCSI_REV_SPC;
1308 
1309 		targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id);
1310 
1311 		if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1312 		    ((targ->dev_spec.pcie_inf.dev_info &
1313 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1314 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) {
1315 			cpi->maxio = targ->dev_spec.pcie_inf.mdts;
1316 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1317 				"PCI device target_id: %u max io size: %u\n",
1318 				ccb->ccb_h.target_id, cpi->maxio);
1319 		} else {
1320 			cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1);
1321 		}
1322 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1323 		break;
1324 	}
1325 	case XPT_GET_TRAN_SETTINGS:
1326 	{
1327 		struct ccb_trans_settings	*cts;
1328 		struct ccb_trans_settings_sas	*sas;
1329 		struct ccb_trans_settings_scsi	*scsi;
1330 
1331 		cts = &ccb->cts;
1332 		sas = &cts->xport_specific.sas;
1333 		scsi = &cts->proto_specific.scsi;
1334 
1335 		KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets,
1336 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1337 		    cts->ccb_h.target_id));
1338 		targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id);
1339 
1340 		if (targ == NULL) {
1341 			mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n",
1342 			cts->ccb_h.target_id);
1343 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1344 			break;
1345 		}
1346 
1347 		if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1))  {
1348 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1349 			break;
1350 		}
1351 
1352 		cts->protocol_version = SCSI_REV_SPC2;
1353 		cts->transport = XPORT_SAS;
1354 		cts->transport_version = 0;
1355 
1356 		sas->valid = CTS_SAS_VALID_SPEED;
1357 
1358 		switch (targ->link_rate) {
1359 		case 0x08:
1360 			sas->bitrate = 150000;
1361 			break;
1362 		case 0x09:
1363 			sas->bitrate = 300000;
1364 			break;
1365 		case 0x0a:
1366 			sas->bitrate = 600000;
1367 			break;
1368 		case 0x0b:
1369 			sas->bitrate = 1200000;
1370 			break;
1371 		default:
1372 			sas->valid = 0;
1373 		}
1374 
1375 		cts->protocol = PROTO_SCSI;
1376 		scsi->valid = CTS_SCSI_VALID_TQ;
1377 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1378 
1379 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1380 		break;
1381 	}
1382 	case XPT_CALC_GEOMETRY:
1383 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1384 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1385 		break;
1386 	case XPT_RESET_DEV:
1387 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action "
1388 		    "XPT_RESET_DEV\n");
1389 		return;
1390 	case XPT_RESET_BUS:
1391 	case XPT_ABORT:
1392 	case XPT_TERM_IO:
1393 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success "
1394 		    "for abort or reset\n");
1395 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1396 		break;
1397 	case XPT_SCSI_IO:
1398 		mpi3mr_action_scsiio(cam_sc, ccb);
1399 		return;
1400 	default:
1401 		mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1402 		break;
1403 	}
1404 	xpt_done(ccb);
1405 }
1406 
1407 void
1408 mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc)
1409 {
1410 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1411 		if (cam_sc->startup_refcount++ == 0) {
1412 			/* just starting, freeze the simq */
1413 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1414 			    "%s freezing simq\n", __func__);
1415 			xpt_hold_boot();
1416 		}
1417 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1418 		    cam_sc->startup_refcount);
1419 	}
1420 }
1421 
1422 void
1423 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc)
1424 {
1425 	if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) {
1426 		cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN;
1427 		xpt_release_simq(cam_sc->sim, 1);
1428 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n");
1429 	}
1430 }
1431 
1432 void
1433 mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ)
1434 {
1435 	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1436 	path_id_t pathid;
1437 	target_id_t targetid;
1438 	union ccb *ccb;
1439 
1440 	pathid = cam_sim_path(cam_sc->sim);
1441 	if (targ == NULL)
1442 		targetid = CAM_TARGET_WILDCARD;
1443 	else
1444 		targetid = targ->per_id;
1445 
1446 	/*
1447 	 * Allocate a CCB and schedule a rescan.
1448 	 */
1449 	ccb = xpt_alloc_ccb_nowait();
1450 	if (ccb == NULL) {
1451 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n");
1452 		return;
1453 	}
1454 
1455 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
1456 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1457 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n");
1458 		xpt_free_ccb(ccb);
1459 		return;
1460 	}
1461 
1462 	if (targetid == CAM_TARGET_WILDCARD)
1463 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1464 	else
1465 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
1466 
1467 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid);
1468 	xpt_rescan(ccb);
1469 }
1470 
1471 void
1472 mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc)
1473 {
1474 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1475 		if (--cam_sc->startup_refcount == 0) {
1476 			/* finished all discovery-related actions, release
1477 			 * the simq and rescan for the latest topology.
1478 			 */
1479 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1480 			    "%s releasing simq\n", __func__);
1481 			cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP;
1482 			xpt_release_simq(cam_sc->sim, 1);
1483 			xpt_release_boot();
1484 		}
1485 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1486 		    cam_sc->startup_refcount);
1487 	}
1488 }
1489 
1490 static void
1491 mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1492 {
1493 	if (!fw_event)
1494 		return;
1495 
1496 	if (fw_event->event_data != NULL) {
1497 		free(fw_event->event_data, M_MPI3MR);
1498 		fw_event->event_data = NULL;
1499 	}
1500 
1501 	free(fw_event, M_MPI3MR);
1502 	fw_event = NULL;
1503 }
1504 
1505 static void
1506 mpi3mr_freeup_events(struct mpi3mr_softc *sc)
1507 {
1508 	struct mpi3mr_fw_event_work *fw_event = NULL;
1509 	mtx_lock(&sc->mpi3mr_mtx);
1510 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
1511 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
1512 		mpi3mr_fw_event_free(sc, fw_event);
1513 	}
1514 	mtx_unlock(&sc->mpi3mr_mtx);
1515 }
1516 
1517 static void
1518 mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
1519 	Mpi3EventDataSasTopologyChangeList_t *event_data)
1520 {
1521 	int i;
1522 	U16 handle;
1523 	U8 reason_code, phy_number;
1524 	char *status_str = NULL;
1525 	U8 link_rate, prev_link_rate;
1526 
1527 	switch (event_data->ExpStatus) {
1528 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1529 		status_str = "remove";
1530 		break;
1531 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1532 		status_str =  "responding";
1533 		break;
1534 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1535 		status_str = "remove delay";
1536 		break;
1537 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1538 		status_str = "direct attached";
1539 		break;
1540 	default:
1541 		status_str = "unknown status";
1542 		break;
1543 	}
1544 
1545 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n",
1546 	    __func__, status_str);
1547 	mpi3mr_dprint(sc, MPI3MR_INFO,
1548 		"%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) "
1549 	    "start_phy(%02d), num_entries(%d)\n", __func__,
1550 	    (event_data->ExpanderDevHandle),
1551 	    (event_data->EnclosureHandle),
1552 	    event_data->StartPhyNum, event_data->NumEntries);
1553 	for (i = 0; i < event_data->NumEntries; i++) {
1554 		handle = (event_data->PhyEntry[i].AttachedDevHandle);
1555 		if (!handle)
1556 			continue;
1557 		phy_number = event_data->StartPhyNum + i;
1558 		reason_code = event_data->PhyEntry[i].Status &
1559 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1560 		switch (reason_code) {
1561 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1562 			status_str = "target remove";
1563 			break;
1564 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1565 			status_str = "delay target remove";
1566 			break;
1567 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1568 			status_str = "link rate change";
1569 			break;
1570 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1571 			status_str = "target responding";
1572 			break;
1573 		default:
1574 			status_str = "unknown";
1575 			break;
1576 		}
1577 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1578 		prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF;
1579 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:"
1580 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1581 		    phy_number, handle, status_str, link_rate, prev_link_rate);
1582 	}
1583 }
1584 
1585 static void
1586 mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt)
1587 {
1588 
1589 	Mpi3EventDataSasTopologyChangeList_t *event_data =
1590 		    (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data;
1591 	int i;
1592 	U16 handle;
1593 	U8 reason_code, link_rate;
1594 	struct mpi3mr_target *target = NULL;
1595 
1596 
1597 	mpi3mr_sastopochg_evt_debug(sc, event_data);
1598 
1599 	for (i = 0; i < event_data->NumEntries; i++) {
1600 		handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle);
1601 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1602 
1603 		if (!handle)
1604 			continue;
1605 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1606 
1607 		if (!target)
1608 			continue;
1609 
1610 		target->link_rate = link_rate;
1611 		reason_code = event_data->PhyEntry[i].Status &
1612 			MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1613 
1614 		switch (reason_code) {
1615 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1616 			if (target->exposed_to_os)
1617 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1618 			mpi3mr_remove_device_from_list(sc, target, false);
1619 			break;
1620 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1621 			break;
1622 		default:
1623 			break;
1624 		}
1625 	}
1626 
1627 	/*
1628 	 * refcount was incremented for this event in
1629 	 * mpi3mr_evt_handler. Decrement it here because the event has
1630 	 * been processed.
1631 	 */
1632 	mpi3mr_startup_decrement(sc->cam_sc);
1633 	return;
1634 }
1635 
1636 static inline void
1637 mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc,
1638 		      struct mpi3mr_fw_event_work *fwevt)
1639 {
1640 	mpi3mr_app_save_logdata(sc, fwevt->event_data,
1641 				fwevt->event_data_size);
1642 }
1643 
1644 static void
1645 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
1646 	Mpi3EventDataPcieTopologyChangeList_t *event_data)
1647 {
1648 	int i;
1649 	U16 handle;
1650 	U16 reason_code;
1651 	U8 port_number;
1652 	char *status_str = NULL;
1653 	U8 link_rate, prev_link_rate;
1654 
1655 	switch (event_data->SwitchStatus) {
1656 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1657 		status_str = "remove";
1658 		break;
1659 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1660 		status_str =  "responding";
1661 		break;
1662 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1663 		status_str = "remove delay";
1664 		break;
1665 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1666 		status_str = "direct attached";
1667 		break;
1668 	default:
1669 		status_str = "unknown status";
1670 		break;
1671 	}
1672 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n",
1673 		__func__, status_str);
1674 	mpi3mr_dprint(sc, MPI3MR_INFO,
1675 		"%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
1676 		"start_port(%02d), num_entries(%d)\n", __func__,
1677 		le16toh(event_data->SwitchDevHandle),
1678 		le16toh(event_data->EnclosureHandle),
1679 		event_data->StartPortNum, event_data->NumEntries);
1680 	for (i = 0; i < event_data->NumEntries; i++) {
1681 		handle =
1682 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1683 		if (!handle)
1684 			continue;
1685 		port_number = event_data->StartPortNum + i;
1686 		reason_code = event_data->PortEntry[i].PortStatus;
1687 		switch (reason_code) {
1688 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1689 			status_str = "target remove";
1690 			break;
1691 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1692 			status_str = "delay target remove";
1693 			break;
1694 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1695 			status_str = "link rate change";
1696 			break;
1697 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1698 			status_str = "target responding";
1699 			break;
1700 		default:
1701 			status_str = "unknown";
1702 			break;
1703 		}
1704 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1705 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1706 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
1707 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1708 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:"
1709 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1710 		    port_number, handle, status_str, link_rate, prev_link_rate);
1711 	}
1712 }
1713 
1714 static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc,
1715     struct mpi3mr_fw_event_work *fwevt)
1716 {
1717 	Mpi3EventDataPcieTopologyChangeList_t *event_data =
1718 		    (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data;
1719 	int i;
1720 	U16 handle;
1721 	U8 reason_code, link_rate;
1722 	struct mpi3mr_target *target = NULL;
1723 
1724 
1725 	mpi3mr_pcietopochg_evt_debug(sc, event_data);
1726 
1727 	for (i = 0; i < event_data->NumEntries; i++) {
1728 		handle =
1729 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1730 		if (!handle)
1731 			continue;
1732 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1733 		if (!target)
1734 			continue;
1735 
1736 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1737 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1738 		target->link_rate = link_rate;
1739 
1740 		reason_code = event_data->PortEntry[i].PortStatus;
1741 
1742 		switch (reason_code) {
1743 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1744 			if (target->exposed_to_os)
1745 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1746 			mpi3mr_remove_device_from_list(sc, target, false);
1747 			break;
1748 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1749 			break;
1750 		default:
1751 			break;
1752 		}
1753 	}
1754 
1755 	/*
1756 	 * refcount was incremented for this event in
1757 	 * mpi3mr_evt_handler. Decrement it here because the event has
1758 	 * been processed.
1759 	 */
1760 	mpi3mr_startup_decrement(sc->cam_sc);
1761 	return;
1762 }
1763 
1764 void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id)
1765 {
1766 	struct mpi3mr_target *target;
1767 
1768 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1769 		"Adding device(persistent id: 0x%x)\n", per_id);
1770 
1771 	mpi3mr_startup_increment(sc->cam_sc);
1772 	target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id);
1773 
1774 	if (!target) {
1775 		mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's"
1776 		    "internal target list, persistent_id: %d\n",
1777 		    per_id);
1778 		goto out;
1779 	}
1780 
1781 	if (target->is_hidden) {
1782 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n",
1783 			per_id);
1784 		goto out;
1785 	}
1786 
1787 	if (!target->exposed_to_os && !sc->reset_in_progress) {
1788 		mpi3mr_rescan_target(sc, target);
1789 		mpi3mr_dprint(sc, MPI3MR_INFO,
1790 			"Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle);
1791 		target->exposed_to_os = 1;
1792 	}
1793 
1794 out:
1795 	mpi3mr_startup_decrement(sc->cam_sc);
1796 }
1797 
1798 int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
1799 {
1800 	U32 i = 0;
1801 	int retval = 0;
1802 	struct mpi3mr_target *target;
1803 
1804 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1805 		"Removing Device (dev_handle: %d)\n", handle);
1806 
1807 	target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1808 
1809 	if (!target) {
1810 		mpi3mr_dprint(sc, MPI3MR_INFO,
1811 			"Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n",
1812 			target->per_id, handle);
1813 		mpi3mr_rescan_target(sc, NULL);
1814 		retval = -1;
1815 		goto out;
1816 	}
1817 
1818 	target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
1819 
1820 	while (mpi3mr_atomic_read(&target->outstanding) && (i < 30)) {
1821 		i++;
1822 		if (!(i % 2)) {
1823 			mpi3mr_dprint(sc, MPI3MR_INFO,
1824 			    "[%2d]waiting for "
1825 			    "waiting for outstanding commands to complete on target: %d\n",
1826 			    i, target->per_id);
1827 		}
1828 		DELAY(1000 * 1000);
1829 	}
1830 
1831 	if (target->exposed_to_os && !sc->reset_in_progress) {
1832 		mpi3mr_rescan_target(sc, target);
1833 		mpi3mr_dprint(sc, MPI3MR_INFO,
1834 			"Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle);
1835 		target->exposed_to_os = 0;
1836 	}
1837 
1838 	target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL;
1839 out:
1840 	return retval;
1841 }
1842 
1843 void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
1844 	struct mpi3mr_target *target, bool must_delete)
1845 {
1846 	mtx_lock_spin(&sc->target_lock);
1847 	if ((target->state == MPI3MR_DEV_REMOVE_HS_STARTED) ||
1848 	    (must_delete == true)) {
1849 		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
1850 		target->state = MPI3MR_DEV_DELETED;
1851 	}
1852 	mtx_unlock_spin(&sc->target_lock);
1853 
1854 	if (target->state == MPI3MR_DEV_DELETED) {
1855  		free(target, M_MPI3MR);
1856  		target = NULL;
1857  	}
1858 
1859 	return;
1860 }
1861 
1862 /**
1863  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1864  * @sc: Adapter instance reference
1865  * @fwevt: Firmware event
1866  *
1867  * Process Device Status Change event and based on device's new
1868  * information, either expose the device to the upper layers, or
1869  * remove the device from upper layers.
1870  *
1871  * Return: Nothing.
1872  */
1873 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc,
1874 	struct mpi3mr_fw_event_work *fwevt)
1875 {
1876 	U16 dev_handle = 0;
1877 	U8 uhide = 0, delete = 0, cleanup = 0;
1878 	struct mpi3mr_target *tgtdev = NULL;
1879 	Mpi3EventDataDeviceStatusChange_t *evtdata =
1880 	    (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data;
1881 
1882 
1883 
1884 	dev_handle = le16toh(evtdata->DevHandle);
1885 	mpi3mr_dprint(sc, MPI3MR_INFO,
1886 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1887 	    __func__, dev_handle, evtdata->ReasonCode);
1888 	switch (evtdata->ReasonCode) {
1889 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1890 		delete = 1;
1891 		break;
1892 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1893 		uhide = 1;
1894 		break;
1895 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1896 		delete = 1;
1897 		cleanup = 1;
1898 		break;
1899 	default:
1900 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__,
1901 		    evtdata->ReasonCode);
1902 		break;
1903 	}
1904 
1905 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1906 	if (!tgtdev)
1907 		return;
1908 
1909 	if (uhide) {
1910 		if (!tgtdev->exposed_to_os)
1911 			mpi3mr_add_device(sc, tgtdev->per_id);
1912 	}
1913 
1914 	if (delete)
1915 		mpi3mr_remove_device_from_os(sc, dev_handle);
1916 
1917 	if (cleanup)
1918 		mpi3mr_remove_device_from_list(sc, tgtdev, false);
1919 }
1920 
1921 /**
1922  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1923  * @sc: Adapter instance reference
1924  * @dev_pg0: New device page0
1925  *
1926  * Process Device Info Change event and based on device's new
1927  * information, either expose the device to the upper layers, or
1928  * remove the device from upper layers or update the details of
1929  * the device.
1930  *
1931  * Return: Nothing.
1932  */
1933 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc,
1934 	Mpi3DevicePage0_t *dev_pg0)
1935 {
1936 	struct mpi3mr_target *tgtdev = NULL;
1937 	U16 dev_handle = 0, perst_id = 0;
1938 
1939 	perst_id = le16toh(dev_pg0->PersistentID);
1940 	dev_handle = le16toh(dev_pg0->DevHandle);
1941 	mpi3mr_dprint(sc, MPI3MR_INFO,
1942 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1943 	    __func__, dev_handle, perst_id);
1944 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1945 	if (!tgtdev)
1946 		return;
1947 
1948 	mpi3mr_update_device(sc, tgtdev, dev_pg0, false);
1949 	if (!tgtdev->is_hidden && !tgtdev->exposed_to_os)
1950 		mpi3mr_add_device(sc, perst_id);
1951 
1952 	if (tgtdev->is_hidden && tgtdev->exposed_to_os)
1953 		mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle);
1954 }
1955 
1956 static void
1957 mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1958 {
1959 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
1960 		goto out;
1961 
1962 	if (!fw_event->process_event)
1963 		goto evt_ack;
1964 
1965 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on  Event: [%x]\n",
1966 	    event_count++, __func__, fw_event->event);
1967 
1968 	switch (fw_event->event) {
1969 	case MPI3_EVENT_DEVICE_ADDED:
1970 	{
1971 		Mpi3DevicePage0_t *dev_pg0 =
1972 			(Mpi3DevicePage0_t *) fw_event->event_data;
1973 		mpi3mr_add_device(sc, dev_pg0->PersistentID);
1974 		break;
1975 	}
1976 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
1977 	{
1978 		mpi3mr_devinfochg_evt_bh(sc,
1979 		    (Mpi3DevicePage0_t *) fw_event->event_data);
1980 		break;
1981 	}
1982 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1983 	{
1984 		mpi3mr_devstatuschg_evt_bh(sc, fw_event);
1985 		break;
1986 	}
1987 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1988 	{
1989 		mpi3mr_process_sastopochg_evt(sc, fw_event);
1990 		break;
1991 	}
1992 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1993 	{
1994 		mpi3mr_process_pcietopochg_evt(sc, fw_event);
1995 		break;
1996 	}
1997 	case MPI3_EVENT_LOG_DATA:
1998 	{
1999 		mpi3mr_logdata_evt_bh(sc, fw_event);
2000 		break;
2001 	}
2002 	default:
2003 		mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
2004 		    fw_event->event);
2005 		break;
2006 
2007 	}
2008 
2009 evt_ack:
2010 	if (fw_event->send_ack) {
2011 		mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n",
2012 		    fw_event->event);
2013 		mpi3mr_process_event_ack(sc, fw_event->event,
2014 		    fw_event->event_context);
2015 	}
2016 
2017 out:
2018 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
2019 	    __func__, fw_event->event);
2020 
2021 	mpi3mr_fw_event_free(sc, fw_event);
2022 }
2023 
2024 void
2025 mpi3mr_firmware_event_work(void *arg, int pending)
2026 {
2027 	struct mpi3mr_fw_event_work *fw_event;
2028 	struct mpi3mr_softc *sc;
2029 
2030 	sc = (struct mpi3mr_softc *)arg;
2031 
2032 	mtx_lock(&sc->fwevt_lock);
2033 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
2034 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
2035 		mtx_unlock(&sc->fwevt_lock);
2036 		mpi3mr_fw_work(sc, fw_event);
2037 		mtx_lock(&sc->fwevt_lock);
2038 	}
2039 	mtx_unlock(&sc->fwevt_lock);
2040 }
2041 
2042 
2043 /*
2044  * mpi3mr_cam_attach - CAM layer registration
2045  * @sc: Adapter reference
2046  *
2047  * This function does simq allocation, cam registration, xpt_bus registration,
2048  * event taskqueue initialization and async event handler registration.
2049  *
2050  * Return: 0 on success and proper error codes on failure
2051  */
2052 int
2053 mpi3mr_cam_attach(struct mpi3mr_softc *sc)
2054 {
2055 	struct mpi3mr_cam_softc *cam_sc;
2056 	cam_status status;
2057 	int unit, error = 0, reqs;
2058 
2059 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
2060 
2061 	cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
2062 	if (!cam_sc) {
2063 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2064 		    "Failed to allocate memory for controller CAM instance\n");
2065 		return (ENOMEM);
2066 	}
2067 
2068 	cam_sc->maxtargets = sc->facts.max_perids + 1;
2069 
2070 	TAILQ_INIT(&cam_sc->tgt_list);
2071 
2072 	sc->cam_sc = cam_sc;
2073 	cam_sc->sc = sc;
2074 
2075 	reqs = sc->max_host_ios;
2076 
2077 	if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) {
2078 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n");
2079 		error = ENOMEM;
2080 		goto out;
2081 	}
2082 
2083 	unit = device_get_unit(sc->mpi3mr_dev);
2084 	cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc,
2085 	    unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq);
2086 	if (cam_sc->sim == NULL) {
2087 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n");
2088 		error = EINVAL;
2089 		goto out;
2090 	}
2091 
2092 	TAILQ_INIT(&cam_sc->ev_queue);
2093 
2094 	/* Initialize taskqueue for Event Handling */
2095 	TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc);
2096 	cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO,
2097 	    taskqueue_thread_enqueue, &cam_sc->ev_tq);
2098 	taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq",
2099 	    device_get_nameunit(sc->mpi3mr_dev));
2100 
2101 	mtx_lock(&sc->mpi3mr_mtx);
2102 
2103 	/*
2104 	 * XXX There should be a bus for every port on the adapter, but since
2105 	 * we're just going to fake the topology for now, we'll pretend that
2106 	 * everything is just a target on a single bus.
2107 	 */
2108 	if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) {
2109 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2110 		    "Error 0x%x registering SCSI bus\n", error);
2111 		mtx_unlock(&sc->mpi3mr_mtx);
2112 		goto out;
2113 	}
2114 
2115 	/*
2116 	 * Assume that discovery events will start right away.
2117 	 *
2118 	 * Hold off boot until discovery is complete.
2119 	 */
2120 	cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY;
2121 	sc->cam_sc->startup_refcount = 0;
2122 	mpi3mr_startup_increment(cam_sc);
2123 
2124 	callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/);
2125 
2126 	/*
2127 	 * Register for async events so we can determine the EEDP
2128 	 * capabilities of devices.
2129 	 */
2130 	status = xpt_create_path(&cam_sc->path, /*periph*/NULL,
2131 	    cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD,
2132 	    CAM_LUN_WILDCARD);
2133 	if (status != CAM_REQ_CMP) {
2134 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2135 		    "Error 0x%x creating sim path\n", status);
2136 		cam_sc->path = NULL;
2137 	}
2138 
2139 	if (status != CAM_REQ_CMP) {
2140 		/*
2141 		 * EEDP use is the exception, not the rule.
2142 		 * Warn the user, but do not fail to attach.
2143 		 */
2144 		mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n");
2145 	}
2146 
2147 	mtx_unlock(&sc->mpi3mr_mtx);
2148 
2149 	error = mpi3mr_register_events(sc);
2150 
2151 out:
2152 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error);
2153 	return (error);
2154 }
2155 
2156 int
2157 mpi3mr_cam_detach(struct mpi3mr_softc *sc)
2158 {
2159 	struct mpi3mr_cam_softc *cam_sc;
2160 	struct mpi3mr_target *target;
2161 
2162 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__);
2163 	if (sc->cam_sc == NULL)
2164 		return (0);
2165 
2166 	cam_sc = sc->cam_sc;
2167 
2168 	mpi3mr_freeup_events(sc);
2169 
2170 	/*
2171 	 * Drain and free the event handling taskqueue with the lock
2172 	 * unheld so that any parallel processing tasks drain properly
2173 	 * without deadlocking.
2174 	 */
2175 	if (cam_sc->ev_tq != NULL)
2176 		taskqueue_free(cam_sc->ev_tq);
2177 
2178 	mtx_lock(&sc->mpi3mr_mtx);
2179 
2180 	while (cam_sc->startup_refcount != 0)
2181 		mpi3mr_startup_decrement(cam_sc);
2182 
2183 	/* Deregister our async handler */
2184 	if (cam_sc->path != NULL) {
2185 		xpt_free_path(cam_sc->path);
2186 		cam_sc->path = NULL;
2187 	}
2188 
2189 	if (cam_sc->flags & MPI3MRSAS_IN_STARTUP)
2190 		xpt_release_simq(cam_sc->sim, 1);
2191 
2192 	if (cam_sc->sim != NULL) {
2193 		xpt_bus_deregister(cam_sim_path(cam_sc->sim));
2194 		cam_sim_free(cam_sc->sim, FALSE);
2195 	}
2196 
2197 	mtx_unlock(&sc->mpi3mr_mtx);
2198 
2199 	if (cam_sc->devq != NULL)
2200 		cam_simq_free(cam_sc->devq);
2201 
2202 get_target:
2203 	mtx_lock_spin(&sc->target_lock);
2204  	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
2205  		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
2206 		mtx_unlock_spin(&sc->target_lock);
2207 		goto out_tgt_free;
2208 	}
2209 	mtx_unlock_spin(&sc->target_lock);
2210 out_tgt_free:
2211 	if (target) {
2212 		free(target, M_MPI3MR);
2213 		target = NULL;
2214 		goto get_target;
2215  	}
2216 
2217 	free(cam_sc, M_MPI3MR);
2218 	sc->cam_sc = NULL;
2219 
2220 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__);
2221 	return (0);
2222 }
2223