xref: /freebsd/sys/dev/mpi3mr/mpi3mr_cam.c (revision 4d65a7c6951cea0333f1a0c1b32c38489cdfa6c5)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
5  * Support: <fbsd-storage-driver.pdl@broadcom.com>
6  *
7  * Authors: Sumit Saxena <sumit.saxena@broadcom.com>
8  *	    Chandrakanth Patil <chandrakanth.patil@broadcom.com>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  * 1. Redistributions of source code must retain the above copyright notice,
15  *    this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright notice,
17  *    this list of conditions and the following disclaimer in the documentation and/or other
18  *    materials provided with the distribution.
19  * 3. Neither the name of the Broadcom Inc. nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software without
21  *    specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * The views and conclusions contained in the software and documentation are
36  * those of the authors and should not be interpreted as representing
37  * official policies,either expressed or implied, of the FreeBSD Project.
38  *
39  * Mail to: Broadcom Inc 1320 Ridder Park Dr, San Jose, CA 95131
40  *
41  * Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
42  */
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/selinfo.h>
49 #include <sys/module.h>
50 #include <sys/bus.h>
51 #include <sys/conf.h>
52 #include <sys/bio.h>
53 #include <sys/malloc.h>
54 #include <sys/uio.h>
55 #include <sys/sysctl.h>
56 #include <sys/endian.h>
57 #include <sys/queue.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/sbuf.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <sys/rman.h>
65 
66 #include <machine/stdarg.h>
67 
68 #include <cam/cam.h>
69 #include <cam/cam_ccb.h>
70 #include <cam/cam_debug.h>
71 #include <cam/cam_sim.h>
72 #include <cam/cam_xpt_sim.h>
73 #include <cam/cam_xpt_periph.h>
74 #include <cam/cam_periph.h>
75 #include <cam/scsi/scsi_all.h>
76 #include <cam/scsi/scsi_message.h>
77 #include <cam/scsi/smp_all.h>
78 
79 #include <dev/nvme/nvme.h>
80 #include "mpi/mpi30_api.h"
81 #include "mpi3mr_cam.h"
82 #include "mpi3mr.h"
83 #include <sys/time.h>			/* XXX for pcpu.h */
84 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
85 
86 #define	smp_processor_id()  PCPU_GET(cpuid)
87 
88 static int
89 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm);
90 void
91 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
92 static void
93 mpi3mr_freeup_events(struct mpi3mr_softc *sc);
94 
95 extern int
96 mpi3mr_register_events(struct mpi3mr_softc *sc);
97 extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
98     bus_addr_t dma_addr);
99 extern void mpi3mr_build_zero_len_sge(void *paddr);
100 
101 static U32 event_count;
102 
103 static void mpi3mr_prepare_sgls(void *arg,
104 	bus_dma_segment_t *segs, int nsegs, int error)
105 {
106 	struct mpi3mr_softc *sc;
107 	struct mpi3mr_cmd *cm;
108 	u_int i;
109 	bus_addr_t chain_dma;
110 	void *chain;
111 	U8 *sg_local;
112 	U32 chain_length;
113 	int sges_left;
114 	U32 sges_in_segment;
115 	U8 simple_sgl_flags;
116 	U8 simple_sgl_flags_last;
117 	U8 last_chain_sgl_flags;
118 	struct mpi3mr_chain *chain_req;
119 	Mpi3SCSIIORequest_t *scsiio_req;
120 
121 	cm = (struct mpi3mr_cmd *)arg;
122 	sc = cm->sc;
123 	scsiio_req = (Mpi3SCSIIORequest_t *) &cm->io_request;
124 
125 	if (error) {
126 		cm->error_code = error;
127 		device_printf(sc->mpi3mr_dev, "%s: error=%d\n",__func__, error);
128 		if (error == EFBIG) {
129 			cm->ccb->ccb_h.status = CAM_REQ_TOO_BIG;
130 			return;
131 		}
132 	}
133 
134 	if (cm->data_dir == MPI3MR_READ)
135 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
136 		    BUS_DMASYNC_PREREAD);
137 	if (cm->data_dir == MPI3MR_WRITE)
138 		bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
139 		    BUS_DMASYNC_PREWRITE);
140 	if (nsegs > MPI3MR_SG_DEPTH) {
141 		device_printf(sc->mpi3mr_dev, "SGE count is too large or 0.\n");
142 		return;
143 	}
144 
145 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
146 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
147 	simple_sgl_flags_last = simple_sgl_flags |
148 	    MPI3_SGE_FLAGS_END_OF_LIST;
149 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
150 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
151 
152 	sg_local = (U8 *)&scsiio_req->SGL;
153 
154 	if (!scsiio_req->DataLength) {
155 		mpi3mr_build_zero_len_sge(sg_local);
156 		return;
157 	}
158 
159 	sges_left = nsegs;
160 
161 	if (sges_left < 0) {
162 		printf("scsi_dma_map failed: request for %d bytes!\n",
163 			scsiio_req->DataLength);
164 		return;
165 	}
166 	if (sges_left > MPI3MR_SG_DEPTH) {
167 		printf("scsi_dma_map returned unsupported sge count %d!\n",
168 			sges_left);
169 		return;
170 	}
171 
172 	sges_in_segment = (sc->facts.op_req_sz -
173 	    offsetof(Mpi3SCSIIORequest_t, SGL))/sizeof(Mpi3SGESimple_t);
174 
175 	i = 0;
176 
177 	mpi3mr_dprint(sc, MPI3MR_TRACE, "SGE count: %d IO size: %d\n",
178 		nsegs, scsiio_req->DataLength);
179 
180 	if (sges_left <= sges_in_segment)
181 		goto fill_in_last_segment;
182 
183 	/* fill in main message segment when there is a chain following */
184 	while (sges_in_segment > 1) {
185 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
186 		    segs[i].ds_len, segs[i].ds_addr);
187 		sg_local += sizeof(Mpi3SGESimple_t);
188 		sges_left--;
189 		sges_in_segment--;
190 		i++;
191 	}
192 
193 	chain_req = &sc->chain_sgl_list[cm->hosttag];
194 
195 	chain = chain_req->buf;
196 	chain_dma = chain_req->buf_phys;
197 	memset(chain_req->buf, 0, PAGE_SIZE);
198 	sges_in_segment = sges_left;
199 	chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
200 
201 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
202 	    chain_length, chain_dma);
203 
204 	sg_local = chain;
205 
206 fill_in_last_segment:
207 	while (sges_left > 0) {
208 		if (sges_left == 1)
209 			mpi3mr_add_sg_single(sg_local,
210 			    simple_sgl_flags_last, segs[i].ds_len,
211 			    segs[i].ds_addr);
212 		else
213 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
214 			    segs[i].ds_len, segs[i].ds_addr);
215 		sg_local += sizeof(Mpi3SGESimple_t);
216 		sges_left--;
217 		i++;
218 	}
219 
220 	return;
221 }
222 
223 int
224 mpi3mr_map_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cm)
225 {
226 	u_int32_t retcode = 0;
227 
228 	if (cm->data != NULL) {
229 		mtx_lock(&sc->io_lock);
230 		/* Map data buffer into bus space */
231 		retcode = bus_dmamap_load_ccb(sc->buffer_dmat, cm->dmamap,
232 		    cm->ccb, mpi3mr_prepare_sgls, cm, 0);
233 		mtx_unlock(&sc->io_lock);
234 		if (retcode)
235 			device_printf(sc->mpi3mr_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
236 		if (retcode == EINPROGRESS) {
237 			device_printf(sc->mpi3mr_dev, "request load in progress\n");
238 			xpt_freeze_simq(sc->cam_sc->sim, 1);
239 		}
240 	}
241 	if (cm->error_code)
242 		return cm->error_code;
243 	if (retcode)
244 		mpi3mr_set_ccbstatus(cm->ccb, CAM_REQ_INVALID);
245 
246 	return (retcode);
247 }
248 
249 void
250 mpi3mr_unmap_request(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd)
251 {
252 	if (cmd->data != NULL) {
253 		if (cmd->data_dir == MPI3MR_READ)
254 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTREAD);
255 		if (cmd->data_dir == MPI3MR_WRITE)
256 			bus_dmamap_sync(sc->buffer_dmat, cmd->dmamap, BUS_DMASYNC_POSTWRITE);
257 		mtx_lock(&sc->io_lock);
258 		bus_dmamap_unload(sc->buffer_dmat, cmd->dmamap);
259 		mtx_unlock(&sc->io_lock);
260 	}
261 }
262 
263 /**
264  * mpi3mr_allow_unmap_to_fw - Whether an unmap is allowed to fw
265  * @sc: Adapter instance reference
266  * @ccb: SCSI Command reference
267  *
268  * The controller hardware cannot handle certain unmap commands
269  * for NVMe drives, this routine checks those and return true
270  * and completes the SCSI command with proper status and sense
271  * data.
272  *
273  * Return: TRUE for allowed unmap, FALSE otherwise.
274  */
275 static bool mpi3mr_allow_unmap_to_fw(struct mpi3mr_softc *sc,
276 	union ccb *ccb)
277 {
278 	struct ccb_scsiio *csio;
279 	uint16_t param_list_len, block_desc_len, trunc_param_len = 0;
280 
281 	csio = &ccb->csio;
282 	param_list_len = (uint16_t) ((scsiio_cdb_ptr(csio)[7] << 8) | scsiio_cdb_ptr(csio)[8]);
283 
284 	switch(pci_get_revid(sc->mpi3mr_dev)) {
285 	case SAS4116_CHIP_REV_A0:
286 		if (!param_list_len) {
287 			mpi3mr_dprint(sc, MPI3MR_ERROR,
288 			    "%s: CDB received with zero parameter length\n",
289 			    __func__);
290 			mpi3mr_print_cdb(ccb);
291 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
292 			xpt_done(ccb);
293 			return false;
294 		}
295 
296 		if (param_list_len < 24) {
297 			mpi3mr_dprint(sc, MPI3MR_ERROR,
298 			    "%s: CDB received with invalid param_list_len: %d\n",
299 			    __func__, param_list_len);
300 			mpi3mr_print_cdb(ccb);
301 			scsi_set_sense_data(&ccb->csio.sense_data,
302 				/*sense_format*/ SSD_TYPE_FIXED,
303 				/*current_error*/ 1,
304 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
305 				/*asc*/ 0x1A,
306 				/*ascq*/ 0x00,
307 				/*extra args*/ SSD_ELEM_NONE);
308 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
309 			ccb->ccb_h.status =
310 			    CAM_SCSI_STATUS_ERROR |
311 			    CAM_AUTOSNS_VALID;
312 			return false;
313 		}
314 
315 		if (param_list_len != csio->dxfer_len) {
316 			mpi3mr_dprint(sc, MPI3MR_ERROR,
317 			    "%s: CDB received with param_list_len: %d bufflen: %d\n",
318 			    __func__, param_list_len, csio->dxfer_len);
319 			mpi3mr_print_cdb(ccb);
320 			scsi_set_sense_data(&ccb->csio.sense_data,
321 				/*sense_format*/ SSD_TYPE_FIXED,
322 				/*current_error*/ 1,
323 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
324 				/*asc*/ 0x1A,
325 				/*ascq*/ 0x00,
326 				/*extra args*/ SSD_ELEM_NONE);
327 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
328 			ccb->ccb_h.status =
329 			    CAM_SCSI_STATUS_ERROR |
330 			    CAM_AUTOSNS_VALID;
331 			xpt_done(ccb);
332 			return false;
333 		}
334 
335 		block_desc_len = (uint16_t) (csio->data_ptr[2] << 8 | csio->data_ptr[3]);
336 
337 		if (block_desc_len < 16) {
338 			mpi3mr_dprint(sc, MPI3MR_ERROR,
339 			    "%s: Invalid descriptor length in param list: %d\n",
340 			    __func__, block_desc_len);
341 			mpi3mr_print_cdb(ccb);
342 			scsi_set_sense_data(&ccb->csio.sense_data,
343 				/*sense_format*/ SSD_TYPE_FIXED,
344 				/*current_error*/ 1,
345 				/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
346 				/*asc*/ 0x26,
347 				/*ascq*/ 0x00,
348 				/*extra args*/ SSD_ELEM_NONE);
349 			ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
350 			ccb->ccb_h.status =
351 			    CAM_SCSI_STATUS_ERROR |
352 			    CAM_AUTOSNS_VALID;
353 			xpt_done(ccb);
354 			return false;
355 		}
356 
357 		if (param_list_len > (block_desc_len + 8)) {
358 			mpi3mr_print_cdb(ccb);
359 			mpi3mr_dprint(sc, MPI3MR_INFO,
360 			    "%s: Truncating param_list_len(%d) to block_desc_len+8(%d)\n",
361 			    __func__, param_list_len, (block_desc_len + 8));
362 			param_list_len = block_desc_len + 8;
363 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
364 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
365 			mpi3mr_print_cdb(ccb);
366 		}
367 		break;
368 
369 	case SAS4116_CHIP_REV_B0:
370 		if ((param_list_len > 24) && ((param_list_len - 8) & 0xF)) {
371 			trunc_param_len -= (param_list_len - 8) & 0xF;
372 			mpi3mr_print_cdb(ccb);
373 			mpi3mr_dprint(sc, MPI3MR_INFO,
374 			    "%s: Truncating param_list_len from (%d) to (%d)\n",
375 			    __func__, param_list_len, trunc_param_len);
376 			scsiio_cdb_ptr(csio)[7] = (param_list_len >> 8) | 0xff;
377 			scsiio_cdb_ptr(csio)[8] = param_list_len | 0xff;
378 			mpi3mr_print_cdb(ccb);
379 		}
380 		break;
381 	}
382 
383 	return true;
384 }
385 
386 /**
387  * mpi3mr_tm_response_name -  get TM response as a string
388  * @resp_code: TM response code
389  *
390  * Convert known task management response code as a readable
391  * string.
392  *
393  * Return: response code string.
394  */
395 static const char* mpi3mr_tm_response_name(U8 resp_code)
396 {
397 	char *desc;
398 
399 	switch (resp_code) {
400 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
401 		desc = "task management request completed";
402 		break;
403 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
404 		desc = "invalid frame";
405 		break;
406 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
407 		desc = "task management request not supported";
408 		break;
409 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
410 		desc = "task management request failed";
411 		break;
412 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
413 		desc = "task management request succeeded";
414 		break;
415 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
416 		desc = "invalid LUN";
417 		break;
418 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
419 		desc = "overlapped tag attempted";
420 		break;
421 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
422 		desc = "task queued, however not sent to target";
423 		break;
424 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
425 		desc = "task management request denied by NVMe device";
426 		break;
427 	default:
428 		desc = "unknown";
429 		break;
430 	}
431 
432 	return desc;
433 }
434 
435 void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
436 {
437 	int i;
438 	int num_of_reply_queues = sc->num_queues;
439 	struct mpi3mr_irq_context *irq_ctx;
440 
441 	for (i = 0; i < num_of_reply_queues; i++) {
442 		irq_ctx = &sc->irq_ctx[i];
443 		mpi3mr_complete_io_cmd(sc, irq_ctx);
444 	}
445 }
446 
447 void
448 trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason)
449 {
450 	if (sc->reset_in_progress) {
451 		mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
452 		return;
453 	}
454 	sc->reset.type = reset_type;
455 	sc->reset.reason = reset_reason;
456 
457 	return;
458 }
459 
460 /**
461  * mpi3mr_issue_tm - Issue Task Management request
462  * @sc: Adapter instance reference
463  * @tm_type: Task Management type
464  * @handle: Device handle
465  * @lun: lun ID
466  * @htag: Host tag of the TM request
467  * @timeout: TM timeout value
468  * @drv_cmd: Internal command tracker
469  * @resp_code: Response code place holder
470  * @cmd: Timed out command reference
471  *
472  * Issues a Task Management Request to the controller for a
473  * specified target, lun and command and wait for its completion
474  * and check TM response. Recover the TM if it timed out by
475  * issuing controller reset.
476  *
477  * Return: 0 on success, non-zero on errors
478  */
479 static int
480 mpi3mr_issue_tm(struct mpi3mr_softc *sc, struct mpi3mr_cmd *cmd,
481 		U8 tm_type, unsigned long timeout)
482 {
483 	int retval = 0;
484 	MPI3_SCSI_TASK_MGMT_REQUEST tm_req;
485 	MPI3_SCSI_TASK_MGMT_REPLY *tm_reply = NULL;
486 	struct mpi3mr_drvr_cmd *drv_cmd = NULL;
487 	struct mpi3mr_target *tgtdev = NULL;
488 	struct mpi3mr_op_req_queue *op_req_q = NULL;
489 	union ccb *ccb;
490 	U8 resp_code;
491 
492 
493 	if (sc->unrecoverable) {
494 		mpi3mr_dprint(sc, MPI3MR_INFO,
495 			"Controller is in unrecoverable state!! TM not required\n");
496 		return retval;
497 	}
498 	if (sc->reset_in_progress) {
499 		mpi3mr_dprint(sc, MPI3MR_INFO,
500 			"controller reset in progress!! TM not required\n");
501 		return retval;
502 	}
503 
504 	if (!cmd->ccb) {
505 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
506 		return retval;
507 	}
508 	ccb = cmd->ccb;
509 
510 	tgtdev = cmd->targ;
511 	if (tgtdev == NULL)  {
512 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device does not exist target ID:0x%x,"
513 			      "TM is not required\n", ccb->ccb_h.target_id);
514 		return retval;
515 	}
516 	if (tgtdev->dev_removed == 1)  {
517 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Device(0x%x) is removed, TM is not required\n",
518 			      ccb->ccb_h.target_id);
519 		return retval;
520 	}
521 
522 	drv_cmd = &sc->host_tm_cmds;
523 	mtx_lock(&drv_cmd->lock);
524 
525 	memset(&tm_req, 0, sizeof(tm_req));
526 	tm_req.DevHandle = htole16(tgtdev->dev_handle);
527 	tm_req.TaskType = tm_type;
528 	tm_req.HostTag = htole16(MPI3MR_HOSTTAG_TMS);
529 	int_to_lun(ccb->ccb_h.target_lun, tm_req.LUN);
530 	tm_req.Function = MPI3_FUNCTION_SCSI_TASK_MGMT;
531 	drv_cmd->state = MPI3MR_CMD_PENDING;
532 	drv_cmd->is_waiting = 1;
533 	drv_cmd->callback = NULL;
534 
535 	if (ccb) {
536 		if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
537 			op_req_q = &sc->op_req_q[cmd->req_qidx];
538 			tm_req.TaskHostTag = htole16(cmd->hosttag);
539 			tm_req.TaskRequestQueueID = htole16(op_req_q->qid);
540 		}
541 	}
542 
543 	if (tgtdev)
544 		mpi3mr_atomic_inc(&tgtdev->block_io);
545 
546 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
547 		if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
548 		     && tgtdev->dev_spec.pcie_inf.abort_to)
549  			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
550 		else if ((tm_type == MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET)
551 			 && tgtdev->dev_spec.pcie_inf.reset_to)
552 			 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
553 	}
554 
555 	sc->tm_chan = (void *)&drv_cmd;
556 
557 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
558 		      "posting task management request: type(%d), handle(0x%04x)\n",
559 		       tm_type, tgtdev->dev_handle);
560 
561 	init_completion(&drv_cmd->completion);
562 	retval = mpi3mr_submit_admin_cmd(sc, &tm_req, sizeof(tm_req));
563 	if (retval) {
564 		mpi3mr_dprint(sc, MPI3MR_ERROR,
565 			      "posting task management request is failed\n");
566 		retval = -1;
567 		goto out_unlock;
568 	}
569 	wait_for_completion_timeout_tm(&drv_cmd->completion, timeout, sc);
570 
571 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
572 		drv_cmd->is_waiting = 0;
573 		retval = -1;
574 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
575 			mpi3mr_dprint(sc, MPI3MR_ERROR,
576 				      "task management request timed out after %ld seconds\n", timeout);
577 			if (sc->mpi3mr_debug & MPI3MR_DEBUG_TM) {
578 				mpi3mr_dprint(sc, MPI3MR_INFO, "tm_request dump\n");
579 				mpi3mr_hexdump(&tm_req, sizeof(tm_req), 8);
580 			}
581 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_TM_TIMEOUT);
582 			retval = ETIMEDOUT;
583 		}
584 		goto out_unlock;
585 	}
586 
587 	if (!(drv_cmd->state & MPI3MR_CMD_REPLYVALID)) {
588 		mpi3mr_dprint(sc, MPI3MR_ERROR,
589 			      "invalid task management reply message\n");
590 		retval = -1;
591 		goto out_unlock;
592 	}
593 	tm_reply = (MPI3_SCSI_TASK_MGMT_REPLY *)drv_cmd->reply;
594 
595 	switch (drv_cmd->ioc_status) {
596 	case MPI3_IOCSTATUS_SUCCESS:
597 		resp_code = tm_reply->ResponseData & MPI3MR_RI_MASK_RESPCODE;
598 		break;
599 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
600 		resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
601 		break;
602 	default:
603 		mpi3mr_dprint(sc, MPI3MR_ERROR,
604 			      "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
605 			       tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
606 		retval = -1;
607 		goto out_unlock;
608 	}
609 
610 	switch (resp_code) {
611 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
612 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
613 		break;
614 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
615 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
616 			retval = -1;
617 		break;
618 	default:
619 		retval = -1;
620 		break;
621 	}
622 
623 	mpi3mr_dprint(sc, MPI3MR_DEBUG_TM,
624 		      "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x)"
625 		      "termination_count(%u), response:%s(0x%x)\n", tm_type, tgtdev->dev_handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
626 		      tm_reply->TerminationCount, mpi3mr_tm_response_name(resp_code), resp_code);
627 
628 	if (retval)
629 		goto out_unlock;
630 
631 	mpi3mr_disable_interrupts(sc);
632 	mpi3mr_poll_pend_io_completions(sc);
633 	mpi3mr_enable_interrupts(sc);
634 	mpi3mr_poll_pend_io_completions(sc);
635 
636 	switch (tm_type) {
637 	case MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
638 		if (cmd->state == MPI3MR_CMD_STATE_IN_TM) {
639 			mpi3mr_dprint(sc, MPI3MR_ERROR,
640 				      "%s: task abort returned success from firmware but corresponding CCB (%p) was not terminated"
641 				      "marking task abort failed!\n", sc->name, cmd->ccb);
642 			retval = -1;
643 		}
644 		break;
645 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
646 		if (mpi3mr_atomic_read(&tgtdev->outstanding)) {
647 			mpi3mr_dprint(sc, MPI3MR_ERROR,
648 				      "%s: target reset returned success from firmware but IOs are still pending on the target (%p)"
649 				      "marking target reset failed!\n",
650 				      sc->name, tgtdev);
651 			retval = -1;
652 		}
653 		break;
654 	default:
655 		break;
656 	}
657 
658 out_unlock:
659 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
660 	mtx_unlock(&drv_cmd->lock);
661 	if (tgtdev && mpi3mr_atomic_read(&tgtdev->block_io) > 0)
662 		mpi3mr_atomic_dec(&tgtdev->block_io);
663 
664 	return retval;
665 }
666 
667 /**
668  * mpi3mr_task_abort- Abort error handling callback
669  * @cmd: Timed out command reference
670  *
671  * Issue Abort Task Management if the command is in LLD scope
672  * and verify if it is aborted successfully and return status
673  * accordingly.
674  *
675  * Return: SUCCESS of successful abort the SCSI command else FAILED
676  */
677 static int mpi3mr_task_abort(struct mpi3mr_cmd *cmd)
678 {
679 	int retval = 0;
680 	struct mpi3mr_softc *sc;
681 	union ccb *ccb;
682 
683 	sc = cmd->sc;
684 
685 	if (!cmd->ccb) {
686 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
687 		return retval;
688 	}
689 	ccb = cmd->ccb;
690 
691 	mpi3mr_dprint(sc, MPI3MR_INFO,
692 		      "attempting abort task for ccb(%p)\n", ccb);
693 
694 	mpi3mr_print_cdb(ccb);
695 
696 	if (cmd->state != MPI3MR_CMD_STATE_BUSY) {
697 		mpi3mr_dprint(sc, MPI3MR_INFO,
698 			      "%s: ccb is not in driver scope, abort task is not required\n",
699 			      sc->name);
700 		return retval;
701 	}
702 	cmd->state = MPI3MR_CMD_STATE_IN_TM;
703 
704 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, MPI3MR_ABORTTM_TIMEOUT);
705 
706 	mpi3mr_dprint(sc, MPI3MR_INFO,
707 		      "abort task is %s for ccb(%p)\n", ((retval == 0) ? "SUCCESS" : "FAILED"), ccb);
708 
709 	return retval;
710 }
711 
712 /**
713  * mpi3mr_target_reset - Target reset error handling callback
714  * @cmd: Timed out command reference
715  *
716  * Issue Target reset Task Management and verify the SCSI commands are
717  * terminated successfully and return status accordingly.
718  *
719  * Return: SUCCESS of successful termination of the SCSI commands else
720  *         FAILED
721  */
722 static int mpi3mr_target_reset(struct mpi3mr_cmd *cmd)
723 {
724 	int retval = 0;
725 	struct mpi3mr_softc *sc;
726 	struct mpi3mr_target *target;
727 
728 	sc = cmd->sc;
729 
730 	target = cmd->targ;
731 	if (target == NULL)  {
732 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device does not exist for target:0x%p,"
733 			      "target reset is not required\n", target);
734 		return retval;
735 	}
736 
737 	mpi3mr_dprint(sc, MPI3MR_INFO,
738 		      "attempting target reset on target(%d)\n", target->per_id);
739 
740 
741 	if (mpi3mr_atomic_read(&target->outstanding)) {
742 		mpi3mr_dprint(sc, MPI3MR_INFO,
743 			      "no outstanding IOs on the target(%d),"
744 			      " target reset not required.\n", target->per_id);
745 		return retval;
746 	}
747 
748 	retval = mpi3mr_issue_tm(sc, cmd, MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, MPI3MR_RESETTM_TIMEOUT);
749 
750 	mpi3mr_dprint(sc, MPI3MR_INFO,
751 		      "target reset is %s for target(%d)\n", ((retval == 0) ? "SUCCESS" : "FAILED"),
752 		      target->per_id);
753 
754 	return retval;
755 }
756 
757 /**
758  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
759  * @sc: Adapter instance reference
760  *
761  * Calculate the pending I/Os for the controller and return.
762  *
763  * Return: Number of pending I/Os
764  */
765 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_softc *sc)
766 {
767 	U16 i, pend_ios = 0;
768 
769 	for (i = 0; i < sc->num_queues; i++)
770 		pend_ios += mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
771 	return pend_ios;
772 }
773 
774 /**
775  * mpi3mr_wait_for_host_io - block for I/Os to complete
776  * @sc: Adapter instance reference
777  * @timeout: time out in seconds
778  *
779  * Waits for pending I/Os for the given adapter to complete or
780  * to hit the timeout.
781  *
782  * Return: Nothing
783  */
784 static int mpi3mr_wait_for_host_io(struct mpi3mr_softc *sc, U32 timeout)
785 {
786 	enum mpi3mr_iocstate iocstate;
787 
788 	iocstate = mpi3mr_get_iocstate(sc);
789 	if (iocstate != MRIOC_STATE_READY) {
790 		mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller is in NON-READY state! Proceed with Reset\n", __func__);
791 		return -1;
792 	}
793 
794 	if (!mpi3mr_get_fw_pending_ios(sc))
795 		return 0;
796 
797 	mpi3mr_dprint(sc, MPI3MR_INFO,
798 		      "%s :Waiting for %d seconds prior to reset for %d pending I/Os to complete\n",
799 		      __func__, timeout, mpi3mr_get_fw_pending_ios(sc));
800 
801 	int i;
802 	for (i = 0; i < timeout; i++) {
803 		if (!mpi3mr_get_fw_pending_ios(sc)) {
804 			mpi3mr_dprint(sc, MPI3MR_INFO, "%s :All pending I/Os got completed while waiting! Reset not required\n", __func__);
805 			return 0;
806 
807 		}
808 		iocstate = mpi3mr_get_iocstate(sc);
809 		if (iocstate != MRIOC_STATE_READY) {
810 			mpi3mr_dprint(sc, MPI3MR_XINFO, "%s :Controller state becomes NON-READY while waiting! dont wait further"
811 				      "Proceed with Reset\n", __func__);
812 			return -1;
813 		}
814 		DELAY(1000 * 1000);
815 	}
816 
817 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Pending I/Os after wait exaust is %d! Proceed with Reset\n", __func__,
818 		      mpi3mr_get_fw_pending_ios(sc));
819 
820 	return -1;
821 }
822 
823 static void
824 mpi3mr_scsiio_timeout(void *data)
825 {
826 	int retval = 0;
827 	struct mpi3mr_softc *sc;
828 	struct mpi3mr_cmd *cmd;
829 	struct mpi3mr_target *targ_dev = NULL;
830 
831 	if (!data)
832 		return;
833 
834 	cmd = (struct mpi3mr_cmd *)data;
835 	sc = cmd->sc;
836 
837 	if (cmd->ccb == NULL) {
838 		mpi3mr_dprint(sc, MPI3MR_ERROR, "SCSIIO command timed-out with NULL ccb\n");
839 		return;
840 	}
841 
842 	/*
843 	 * TMs are not supported for IO timeouts on VD/LD, so directly issue controller reset
844 	 * with max timeout for outstanding IOs to complete is 180sec.
845 	 */
846 	targ_dev = cmd->targ;
847 	if (targ_dev && (targ_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)) {
848 		if (mpi3mr_wait_for_host_io(sc, MPI3MR_RAID_ERRREC_RESET_TIMEOUT))
849 			trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
850 		return;
851  	}
852 
853 	/* Issue task abort to recover the timed out IO */
854 	retval = mpi3mr_task_abort(cmd);
855 	if (!retval || (retval == ETIMEDOUT))
856 		return;
857 
858 	/*
859 	 * task abort has failed to recover the timed out IO,
860 	 * try with the target reset
861 	 */
862 	retval = mpi3mr_target_reset(cmd);
863 	if (!retval || (retval == ETIMEDOUT))
864 		return;
865 
866 	/*
867 	 * task abort and target reset has failed. So issue Controller reset(soft reset)
868 	 * through OCR thread context
869 	 */
870 	trigger_reset_from_watchdog(sc, MPI3MR_TRIGGER_SOFT_RESET, MPI3MR_RESET_FROM_SCSIIO_TIMEOUT);
871 
872 	return;
873 }
874 
875 void int_to_lun(unsigned int lun, U8 *req_lun)
876 {
877 	int i;
878 
879 	memset(req_lun, 0, sizeof(*req_lun));
880 
881 	for (i = 0; i < sizeof(lun); i += 2) {
882 		req_lun[i] = (lun >> 8) & 0xFF;
883 		req_lun[i+1] = lun & 0xFF;
884 		lun = lun >> 16;
885 	}
886 
887 }
888 
889 static U16 get_req_queue_index(struct mpi3mr_softc *sc)
890 {
891 	U16 i = 0, reply_q_index = 0, reply_q_pend_ios = 0;
892 
893 	reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[0].pend_ios);
894 	for (i = 0; i < sc->num_queues; i++) {
895 		if (reply_q_pend_ios > mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios)) {
896 			reply_q_pend_ios = mpi3mr_atomic_read(&sc->op_reply_q[i].pend_ios);
897 			reply_q_index = i;
898 		}
899 	}
900 
901 	return reply_q_index;
902 }
903 
904 static void
905 mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
906 {
907 	Mpi3SCSIIORequest_t *req = NULL;
908 	struct ccb_scsiio *csio;
909 	struct mpi3mr_softc *sc;
910 	struct mpi3mr_target *targ;
911 	struct mpi3mr_cmd *cm;
912 	uint8_t scsi_opcode, queue_idx;
913 	uint32_t mpi_control;
914 	struct mpi3mr_op_req_queue *opreqq = NULL;
915 	U32 data_len_blks = 0;
916 	U32 tracked_io_sz = 0;
917 	U32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
918 	struct mpi3mr_throttle_group_info *tg = NULL;
919 	static int ratelimit;
920 
921 	sc = cam_sc->sc;
922 	mtx_assert(&sc->mpi3mr_mtx, MA_OWNED);
923 
924 	if (sc->unrecoverable) {
925 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
926 		xpt_done(ccb);
927 		return;
928 	}
929 
930 	csio = &ccb->csio;
931 	KASSERT(csio->ccb_h.target_id < cam_sc->maxtargets,
932 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
933 	     csio->ccb_h.target_id));
934 
935 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
936 
937 	if ((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) &&
938 	    !((scsi_opcode == SYNCHRONIZE_CACHE) ||
939 	      (scsi_opcode == START_STOP_UNIT))) {
940 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
941 		xpt_done(ccb);
942 		return;
943 	}
944 
945 	targ = mpi3mr_find_target_by_per_id(cam_sc, csio->ccb_h.target_id);
946 	if (targ == NULL)  {
947 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x does not exist\n",
948 			      csio->ccb_h.target_id);
949 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
950 		xpt_done(ccb);
951 		return;
952 	}
953 
954 	if (targ && targ->is_hidden)  {
955 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is hidden\n",
956 			      csio->ccb_h.target_id);
957 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
958 		xpt_done(ccb);
959 		return;
960 	}
961 
962 	if (targ->dev_removed == 1)  {
963 		mpi3mr_dprint(sc, MPI3MR_XINFO, "Device with target ID: 0x%x is removed\n", csio->ccb_h.target_id);
964 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
965 		xpt_done(ccb);
966 		return;
967 	}
968 
969 	if (targ->dev_handle == 0x0) {
970 		mpi3mr_dprint(sc, MPI3MR_ERROR, "%s NULL handle for target 0x%x\n",
971 		    __func__, csio->ccb_h.target_id);
972 		mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
973 		xpt_done(ccb);
974 		return;
975 	}
976 
977 	if (mpi3mr_atomic_read(&targ->block_io) ||
978 		(sc->reset_in_progress == 1) || (sc->prepare_for_reset == 1)) {
979 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s target is busy target_id: 0x%x\n",
980 		    __func__, csio->ccb_h.target_id);
981 		mpi3mr_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
982 		xpt_done(ccb);
983 		return;
984 	}
985 
986 	/*
987 	 * Sometimes, it is possible to get a command that is not "In
988 	 * Progress" and was actually aborted by the upper layer.  Check for
989 	 * this here and complete the command without error.
990 	 */
991 	if (mpi3mr_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
992 		mpi3mr_dprint(sc, MPI3MR_TRACE, "%s Command is not in progress for "
993 		    "target %u\n", __func__, csio->ccb_h.target_id);
994 		xpt_done(ccb);
995 		return;
996 	}
997 	/*
998 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
999 	 * that the volume has timed out.  We want volumes to be enumerated
1000 	 * until they are deleted/removed, not just failed.
1001 	 */
1002 	if (targ->flags & MPI3MRSAS_TARGET_INREMOVAL) {
1003 		if (targ->devinfo == 0)
1004 			mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1005 		else
1006 			mpi3mr_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1007 		xpt_done(ccb);
1008 		return;
1009 	}
1010 
1011 	if ((scsi_opcode == UNMAP) &&
1012 		(pci_get_device(sc->mpi3mr_dev) == MPI3_MFGPAGE_DEVID_SAS4116) &&
1013 		(targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1014 		(mpi3mr_allow_unmap_to_fw(sc, ccb) == false))
1015 		return;
1016 
1017 	cm = mpi3mr_get_command(sc);
1018 	if (cm == NULL || (sc->mpi3mr_flags & MPI3MR_FLAGS_DIAGRESET)) {
1019 		if (cm != NULL) {
1020 			mpi3mr_release_command(cm);
1021 		}
1022 		if ((cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) == 0) {
1023 			xpt_freeze_simq(cam_sc->sim, 1);
1024 			cam_sc->flags |= MPI3MRSAS_QUEUE_FROZEN;
1025 		}
1026 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1027 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1028 		xpt_done(ccb);
1029 		return;
1030 	}
1031 
1032 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1033 	case CAM_DIR_IN:
1034 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
1035 		cm->data_dir = MPI3MR_READ;
1036 		break;
1037 	case CAM_DIR_OUT:
1038 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
1039 		cm->data_dir = MPI3MR_WRITE;
1040 		break;
1041 	case CAM_DIR_NONE:
1042 	default:
1043 		mpi_control = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
1044 		break;
1045 	}
1046 
1047 	if (csio->cdb_len > 16)
1048 		mpi_control |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
1049 
1050 	req = (Mpi3SCSIIORequest_t *)&cm->io_request;
1051 	bzero(req, sizeof(*req));
1052 	req->Function = MPI3_FUNCTION_SCSI_IO;
1053 	req->HostTag = cm->hosttag;
1054 	req->DataLength = htole32(csio->dxfer_len);
1055 	req->DevHandle = htole16(targ->dev_handle);
1056 
1057 	/*
1058 	 * It looks like the hardware doesn't require an explicit tag
1059 	 * number for each transaction.  SAM Task Management not supported
1060 	 * at the moment.
1061 	 */
1062 	switch (csio->tag_action) {
1063 	case MSG_HEAD_OF_Q_TAG:
1064 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ;
1065 		break;
1066 	case MSG_ORDERED_Q_TAG:
1067 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ;
1068 		break;
1069 	case MSG_ACA_TASK:
1070 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ;
1071 		break;
1072 	case CAM_TAG_ACTION_NONE:
1073 	case MSG_SIMPLE_Q_TAG:
1074 	default:
1075 		mpi_control |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
1076 		break;
1077 	}
1078 
1079 	req->Flags = htole32(mpi_control);
1080 
1081 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1082 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1083 	else {
1084 		KASSERT(csio->cdb_len <= IOCDBLEN,
1085 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
1086 		    "is not set", csio->cdb_len));
1087 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1088 	}
1089 
1090 	cm->length = csio->dxfer_len;
1091 	cm->targ = targ;
1092 	int_to_lun(csio->ccb_h.target_lun, req->LUN);
1093 	cm->ccb = ccb;
1094 	csio->ccb_h.qos.sim_data = sbinuptime();
1095 	queue_idx = get_req_queue_index(sc);
1096 	cm->req_qidx = queue_idx;
1097 
1098 	mpi3mr_dprint(sc, MPI3MR_TRACE, "[QID:%d]: func: %s line:%d CDB: 0x%x targetid: %x SMID: 0x%x\n",
1099 		(queue_idx + 1), __func__, __LINE__, scsi_opcode, csio->ccb_h.target_id, cm->hosttag);
1100 
1101 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1102 
1103 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
1104 	case CAM_DATA_PADDR:
1105 	case CAM_DATA_SG_PADDR:
1106 		device_printf(sc->mpi3mr_dev, "%s: physical addresses not supported\n",
1107 		    __func__);
1108 		mpi3mr_release_command(cm);
1109 		ccb->ccb_h.status = CAM_REQ_INVALID;
1110 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1111 		xpt_done(ccb);
1112 		return;
1113 	case CAM_DATA_SG:
1114 		device_printf(sc->mpi3mr_dev, "%s: scatter gather is not supported\n",
1115 		    __func__);
1116 		mpi3mr_release_command(cm);
1117 		ccb->ccb_h.status = CAM_REQ_INVALID;
1118 		xpt_done(ccb);
1119 		return;
1120 	case CAM_DATA_VADDR:
1121 	case CAM_DATA_BIO:
1122 		if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) {
1123 			mpi3mr_release_command(cm);
1124 			ccb->ccb_h.status = CAM_REQ_TOO_BIG;
1125 			xpt_done(ccb);
1126 			return;
1127 		}
1128 		cm->length = csio->dxfer_len;
1129 		if (cm->length)
1130 			cm->data = csio->data_ptr;
1131 		break;
1132 	default:
1133 		ccb->ccb_h.status = CAM_REQ_INVALID;
1134 		xpt_done(ccb);
1135 		return;
1136 	}
1137 
1138 	/* Prepare SGEs */
1139 	if (mpi3mr_map_request(sc, cm)) {
1140 		mpi3mr_release_command(cm);
1141 		xpt_done(ccb);
1142 		printf("func: %s line: %d Build SGLs failed\n", __func__, __LINE__);
1143 		return;
1144 	}
1145 
1146 	opreqq = &sc->op_req_q[queue_idx];
1147 
1148 	if (sc->iot_enable) {
1149 		data_len_blks = csio->dxfer_len >> 9;
1150 
1151 		if ((data_len_blks >= sc->io_throttle_data_length) &&
1152 		    targ->io_throttle_enabled) {
1153 			tracked_io_sz = data_len_blks;
1154 			tg = targ->throttle_group;
1155 			if (tg) {
1156 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1157 				mpi3mr_atomic_add(&tg->pend_large_data_sz, data_len_blks);
1158 
1159 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1160 				tg_pend_data_len = mpi3mr_atomic_read(&tg->pend_large_data_sz);
1161 
1162 				if (ratelimit % 1000) {
1163 					mpi3mr_dprint(sc, MPI3MR_IOT,
1164 						"large vd_io persist_id(%d), handle(0x%04x), data_len(%d),"
1165 						"ioc_pending(%d), tg_pending(%d), ioc_high(%d), tg_high(%d)\n",
1166 						targ->per_id, targ->dev_handle,
1167 						data_len_blks, ioc_pend_data_len,
1168 						tg_pend_data_len, sc->io_throttle_high,
1169 						tg->high);
1170 					ratelimit++;
1171 				}
1172 
1173 				if (!tg->io_divert  && ((ioc_pend_data_len >=
1174 				    sc->io_throttle_high) ||
1175 				    (tg_pend_data_len >= tg->high))) {
1176 					tg->io_divert = 1;
1177 					mpi3mr_dprint(sc, MPI3MR_IOT,
1178 						"VD: Setting divert flag for tg_id(%d), persist_id(%d)\n",
1179 						tg->id, targ->per_id);
1180 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1181 						mpi3mr_print_cdb(ccb);
1182 					mpi3mr_set_io_divert_for_all_vd_in_tg(sc,
1183 					    tg, 1);
1184 				}
1185 			} else {
1186 				mpi3mr_atomic_add(&sc->pend_large_data_sz, data_len_blks);
1187 				ioc_pend_data_len = mpi3mr_atomic_read(&sc->pend_large_data_sz);
1188 				if (ratelimit % 1000) {
1189 					mpi3mr_dprint(sc, MPI3MR_IOT,
1190 					    "large pd_io persist_id(%d), handle(0x%04x), data_len(%d), ioc_pending(%d), ioc_high(%d)\n",
1191 					    targ->per_id, targ->dev_handle,
1192 					    data_len_blks, ioc_pend_data_len,
1193 					    sc->io_throttle_high);
1194 					ratelimit++;
1195 				}
1196 
1197 				if (ioc_pend_data_len >= sc->io_throttle_high) {
1198 					targ->io_divert = 1;
1199 					mpi3mr_dprint(sc, MPI3MR_IOT,
1200 						"PD: Setting divert flag for persist_id(%d)\n",
1201 						targ->per_id);
1202 					if (sc->mpi3mr_debug | MPI3MR_IOT)
1203 						mpi3mr_print_cdb(ccb);
1204 				}
1205 			}
1206 		}
1207 
1208 		if (targ->io_divert) {
1209 			req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
1210 			mpi_control |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
1211 		}
1212 	}
1213 	req->Flags = htole32(mpi_control);
1214 
1215 	if (mpi3mr_submit_io(sc, opreqq,
1216 	    	(U8 *)&cm->io_request)) {
1217 		mpi3mr_release_command(cm);
1218 		if (tracked_io_sz) {
1219 			mpi3mr_atomic_sub(&sc->pend_large_data_sz, tracked_io_sz);
1220 			if (tg)
1221 				mpi3mr_atomic_sub(&tg->pend_large_data_sz, tracked_io_sz);
1222 		}
1223 		mpi3mr_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
1224 		xpt_done(ccb);
1225 	} else {
1226 		callout_reset_sbt(&cm->callout, SBT_1S * 90 , 0,
1227 				  mpi3mr_scsiio_timeout, cm, 0);
1228 		mpi3mr_atomic_inc(&sc->fw_outstanding);
1229 		mpi3mr_atomic_inc(&targ->outstanding);
1230 		if (mpi3mr_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
1231 			sc->io_cmds_highwater++;
1232 	}
1233 
1234 	cm->callout_owner = true;
1235 	return;
1236 }
1237 
1238 static void
1239 mpi3mr_cam_poll(struct cam_sim *sim)
1240 {
1241 	struct mpi3mr_cam_softc *cam_sc;
1242 	struct mpi3mr_irq_context *irq_ctx;
1243 	struct mpi3mr_softc *sc;
1244 	int i;
1245 
1246 	cam_sc = cam_sim_softc(sim);
1247 	sc = cam_sc->sc;
1248 
1249 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "func: %s line: %d is called\n",
1250 		__func__, __LINE__);
1251 
1252 	for (i = 0; i < sc->num_queues; i++) {
1253 		irq_ctx = sc->irq_ctx + i;
1254 		if (irq_ctx->op_reply_q->qid) {
1255 			mpi3mr_complete_io_cmd(sc, irq_ctx);
1256 		}
1257 	}
1258 }
1259 
1260 static void
1261 mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
1262 {
1263 	struct mpi3mr_cam_softc *cam_sc;
1264 	struct mpi3mr_target *targ;
1265 
1266 	cam_sc = cam_sim_softc(sim);
1267 
1268 	mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
1269 	    ccb->ccb_h.func_code, ccb->ccb_h.target_id);
1270 
1271 	mtx_assert(&cam_sc->sc->mpi3mr_mtx, MA_OWNED);
1272 
1273 	switch (ccb->ccb_h.func_code) {
1274 	case XPT_PATH_INQ:
1275 	{
1276 		struct ccb_pathinq *cpi = &ccb->cpi;
1277 
1278 		cpi->version_num = 1;
1279 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1280 		cpi->target_sprt = 0;
1281 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1282 		cpi->hba_eng_cnt = 0;
1283 		cpi->max_target = cam_sc->maxtargets - 1;
1284 		cpi->max_lun = 0;
1285 
1286 		/*
1287 		 * initiator_id is set here to an ID outside the set of valid
1288 		 * target IDs (including volumes).
1289 		 */
1290 		cpi->initiator_id = cam_sc->maxtargets;
1291 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1292 		strlcpy(cpi->hba_vid, "Broadcom", HBA_IDLEN);
1293 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1294 		cpi->unit_number = cam_sim_unit(sim);
1295 		cpi->bus_id = cam_sim_bus(sim);
1296 		/*
1297 		 * XXXSLM-I think this needs to change based on config page or
1298 		 * something instead of hardcoded to 150000.
1299 		 */
1300 		cpi->base_transfer_speed = 150000;
1301 		cpi->transport = XPORT_SAS;
1302 		cpi->transport_version = 0;
1303 		cpi->protocol = PROTO_SCSI;
1304 		cpi->protocol_version = SCSI_REV_SPC;
1305 
1306 		targ = mpi3mr_find_target_by_per_id(cam_sc, ccb->ccb_h.target_id);
1307 
1308 		if (targ && (targ->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
1309 		    ((targ->dev_spec.pcie_inf.dev_info &
1310 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1311 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)) {
1312 			cpi->maxio = targ->dev_spec.pcie_inf.mdts;
1313 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1314 				"PCI device target_id: %u max io size: %u\n",
1315 				ccb->ccb_h.target_id, cpi->maxio);
1316 		} else {
1317 			cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1);
1318 		}
1319 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1320 		break;
1321 	}
1322 	case XPT_GET_TRAN_SETTINGS:
1323 	{
1324 		struct ccb_trans_settings	*cts;
1325 		struct ccb_trans_settings_sas	*sas;
1326 		struct ccb_trans_settings_scsi	*scsi;
1327 
1328 		cts = &ccb->cts;
1329 		sas = &cts->xport_specific.sas;
1330 		scsi = &cts->proto_specific.scsi;
1331 
1332 		KASSERT(cts->ccb_h.target_id < cam_sc->maxtargets,
1333 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1334 		    cts->ccb_h.target_id));
1335 		targ = mpi3mr_find_target_by_per_id(cam_sc, cts->ccb_h.target_id);
1336 
1337 		if (targ == NULL) {
1338 			mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "Device with target ID: 0x%x does not exist\n",
1339 			cts->ccb_h.target_id);
1340 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1341 			break;
1342 		}
1343 
1344 		if ((targ->dev_handle == 0x0) || (targ->dev_removed == 1))  {
1345 			mpi3mr_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1346 			break;
1347 		}
1348 
1349 		cts->protocol_version = SCSI_REV_SPC2;
1350 		cts->transport = XPORT_SAS;
1351 		cts->transport_version = 0;
1352 
1353 		sas->valid = CTS_SAS_VALID_SPEED;
1354 
1355 		switch (targ->link_rate) {
1356 		case 0x08:
1357 			sas->bitrate = 150000;
1358 			break;
1359 		case 0x09:
1360 			sas->bitrate = 300000;
1361 			break;
1362 		case 0x0a:
1363 			sas->bitrate = 600000;
1364 			break;
1365 		case 0x0b:
1366 			sas->bitrate = 1200000;
1367 			break;
1368 		default:
1369 			sas->valid = 0;
1370 		}
1371 
1372 		cts->protocol = PROTO_SCSI;
1373 		scsi->valid = CTS_SCSI_VALID_TQ;
1374 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1375 
1376 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1377 		break;
1378 	}
1379 	case XPT_CALC_GEOMETRY:
1380 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1381 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1382 		break;
1383 	case XPT_RESET_DEV:
1384 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action "
1385 		    "XPT_RESET_DEV\n");
1386 		return;
1387 	case XPT_RESET_BUS:
1388 	case XPT_ABORT:
1389 	case XPT_TERM_IO:
1390 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "mpi3mr_action faking success "
1391 		    "for abort or reset\n");
1392 		mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
1393 		break;
1394 	case XPT_SCSI_IO:
1395 		mpi3mr_action_scsiio(cam_sc, ccb);
1396 		return;
1397 	default:
1398 		mpi3mr_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1399 		break;
1400 	}
1401 	xpt_done(ccb);
1402 }
1403 
1404 void
1405 mpi3mr_startup_increment(struct mpi3mr_cam_softc *cam_sc)
1406 {
1407 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1408 		if (cam_sc->startup_refcount++ == 0) {
1409 			/* just starting, freeze the simq */
1410 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1411 			    "%s freezing simq\n", __func__);
1412 			xpt_hold_boot();
1413 		}
1414 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1415 		    cam_sc->startup_refcount);
1416 	}
1417 }
1418 
1419 void
1420 mpi3mr_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc)
1421 {
1422 	if (cam_sc->flags & MPI3MRSAS_QUEUE_FROZEN) {
1423 		cam_sc->flags &= ~MPI3MRSAS_QUEUE_FROZEN;
1424 		xpt_release_simq(cam_sc->sim, 1);
1425 		mpi3mr_dprint(cam_sc->sc, MPI3MR_INFO, "Unfreezing SIM queue\n");
1426 	}
1427 }
1428 
1429 void
1430 mpi3mr_rescan_target(struct mpi3mr_softc *sc, struct mpi3mr_target *targ)
1431 {
1432 	struct mpi3mr_cam_softc *cam_sc = sc->cam_sc;
1433 	path_id_t pathid;
1434 	target_id_t targetid;
1435 	union ccb *ccb;
1436 
1437 	pathid = cam_sim_path(cam_sc->sim);
1438 	if (targ == NULL)
1439 		targetid = CAM_TARGET_WILDCARD;
1440 	else
1441 		targetid = targ->per_id;
1442 
1443 	/*
1444 	 * Allocate a CCB and schedule a rescan.
1445 	 */
1446 	ccb = xpt_alloc_ccb_nowait();
1447 	if (ccb == NULL) {
1448 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to alloc CCB for rescan\n");
1449 		return;
1450 	}
1451 
1452 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
1453 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1454 		mpi3mr_dprint(sc, MPI3MR_ERROR, "unable to create path for rescan\n");
1455 		xpt_free_ccb(ccb);
1456 		return;
1457 	}
1458 
1459 	if (targetid == CAM_TARGET_WILDCARD)
1460 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
1461 	else
1462 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
1463 
1464 	mpi3mr_dprint(sc, MPI3MR_EVENT, "%s target id 0x%x\n", __func__, targetid);
1465 	xpt_rescan(ccb);
1466 }
1467 
1468 void
1469 mpi3mr_startup_decrement(struct mpi3mr_cam_softc *cam_sc)
1470 {
1471 	if ((cam_sc->flags & MPI3MRSAS_IN_STARTUP) != 0) {
1472 		if (--cam_sc->startup_refcount == 0) {
1473 			/* finished all discovery-related actions, release
1474 			 * the simq and rescan for the latest topology.
1475 			 */
1476 			mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO,
1477 			    "%s releasing simq\n", __func__);
1478 			cam_sc->flags &= ~MPI3MRSAS_IN_STARTUP;
1479 			xpt_release_simq(cam_sc->sim, 1);
1480 			xpt_release_boot();
1481 		}
1482 		mpi3mr_dprint(cam_sc->sc, MPI3MR_XINFO, "%s refcount %u\n", __func__,
1483 		    cam_sc->startup_refcount);
1484 	}
1485 }
1486 
1487 static void
1488 mpi3mr_fw_event_free(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1489 {
1490 	if (!fw_event)
1491 		return;
1492 
1493 	if (fw_event->event_data != NULL) {
1494 		free(fw_event->event_data, M_MPI3MR);
1495 		fw_event->event_data = NULL;
1496 	}
1497 
1498 	free(fw_event, M_MPI3MR);
1499 	fw_event = NULL;
1500 }
1501 
1502 static void
1503 mpi3mr_freeup_events(struct mpi3mr_softc *sc)
1504 {
1505 	struct mpi3mr_fw_event_work *fw_event = NULL;
1506 	mtx_lock(&sc->mpi3mr_mtx);
1507 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
1508 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
1509 		mpi3mr_fw_event_free(sc, fw_event);
1510 	}
1511 	mtx_unlock(&sc->mpi3mr_mtx);
1512 }
1513 
1514 static void
1515 mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
1516 	Mpi3EventDataSasTopologyChangeList_t *event_data)
1517 {
1518 	int i;
1519 	U16 handle;
1520 	U8 reason_code, phy_number;
1521 	char *status_str = NULL;
1522 	U8 link_rate, prev_link_rate;
1523 
1524 	switch (event_data->ExpStatus) {
1525 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1526 		status_str = "remove";
1527 		break;
1528 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1529 		status_str =  "responding";
1530 		break;
1531 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1532 		status_str = "remove delay";
1533 		break;
1534 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1535 		status_str = "direct attached";
1536 		break;
1537 	default:
1538 		status_str = "unknown status";
1539 		break;
1540 	}
1541 
1542 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :sas topology change: (%s)\n",
1543 	    __func__, status_str);
1544 	mpi3mr_dprint(sc, MPI3MR_INFO,
1545 		"%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) "
1546 	    "start_phy(%02d), num_entries(%d)\n", __func__,
1547 	    (event_data->ExpanderDevHandle),
1548 	    (event_data->EnclosureHandle),
1549 	    event_data->StartPhyNum, event_data->NumEntries);
1550 	for (i = 0; i < event_data->NumEntries; i++) {
1551 		handle = (event_data->PhyEntry[i].AttachedDevHandle);
1552 		if (!handle)
1553 			continue;
1554 		phy_number = event_data->StartPhyNum + i;
1555 		reason_code = event_data->PhyEntry[i].Status &
1556 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1557 		switch (reason_code) {
1558 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1559 			status_str = "target remove";
1560 			break;
1561 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1562 			status_str = "delay target remove";
1563 			break;
1564 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1565 			status_str = "link rate change";
1566 			break;
1567 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1568 			status_str = "target responding";
1569 			break;
1570 		default:
1571 			status_str = "unknown";
1572 			break;
1573 		}
1574 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1575 		prev_link_rate = event_data->PhyEntry[i].LinkRate & 0xF;
1576 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tphy(%02d), attached_handle(0x%04x): %s:"
1577 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1578 		    phy_number, handle, status_str, link_rate, prev_link_rate);
1579 	}
1580 }
1581 
1582 static void
1583 mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fwevt)
1584 {
1585 
1586 	Mpi3EventDataSasTopologyChangeList_t *event_data =
1587 		    (Mpi3EventDataSasTopologyChangeList_t *)fwevt->event_data;
1588 	int i;
1589 	U16 handle;
1590 	U8 reason_code, link_rate;
1591 	struct mpi3mr_target *target = NULL;
1592 
1593 
1594 	mpi3mr_sastopochg_evt_debug(sc, event_data);
1595 
1596 	for (i = 0; i < event_data->NumEntries; i++) {
1597 		handle = le16toh(event_data->PhyEntry[i].AttachedDevHandle);
1598 		link_rate = event_data->PhyEntry[i].LinkRate >> 4;
1599 
1600 		if (!handle)
1601 			continue;
1602 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1603 
1604 		if (!target)
1605 			continue;
1606 
1607 		target->link_rate = link_rate;
1608 		reason_code = event_data->PhyEntry[i].Status &
1609 			MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1610 
1611 		switch (reason_code) {
1612 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1613 			if (target->exposed_to_os)
1614 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1615 			mpi3mr_remove_device_from_list(sc, target, false);
1616 			break;
1617 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1618 			break;
1619 		default:
1620 			break;
1621 		}
1622 	}
1623 
1624 	/*
1625 	 * refcount was incremented for this event in
1626 	 * mpi3mr_evt_handler. Decrement it here because the event has
1627 	 * been processed.
1628 	 */
1629 	mpi3mr_startup_decrement(sc->cam_sc);
1630 	return;
1631 }
1632 
1633 static inline void
1634 mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc,
1635 		      struct mpi3mr_fw_event_work *fwevt)
1636 {
1637 	mpi3mr_app_save_logdata(sc, fwevt->event_data,
1638 				fwevt->event_data_size);
1639 }
1640 
1641 static void
1642 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
1643 	Mpi3EventDataPcieTopologyChangeList_t *event_data)
1644 {
1645 	int i;
1646 	U16 handle;
1647 	U16 reason_code;
1648 	U8 port_number;
1649 	char *status_str = NULL;
1650 	U8 link_rate, prev_link_rate;
1651 
1652 	switch (event_data->SwitchStatus) {
1653 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1654 		status_str = "remove";
1655 		break;
1656 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1657 		status_str =  "responding";
1658 		break;
1659 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1660 		status_str = "remove delay";
1661 		break;
1662 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1663 		status_str = "direct attached";
1664 		break;
1665 	default:
1666 		status_str = "unknown status";
1667 		break;
1668 	}
1669 	mpi3mr_dprint(sc, MPI3MR_INFO, "%s :pcie topology change: (%s)\n",
1670 		__func__, status_str);
1671 	mpi3mr_dprint(sc, MPI3MR_INFO,
1672 		"%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
1673 		"start_port(%02d), num_entries(%d)\n", __func__,
1674 		le16toh(event_data->SwitchDevHandle),
1675 		le16toh(event_data->EnclosureHandle),
1676 		event_data->StartPortNum, event_data->NumEntries);
1677 	for (i = 0; i < event_data->NumEntries; i++) {
1678 		handle =
1679 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1680 		if (!handle)
1681 			continue;
1682 		port_number = event_data->StartPortNum + i;
1683 		reason_code = event_data->PortEntry[i].PortStatus;
1684 		switch (reason_code) {
1685 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1686 			status_str = "target remove";
1687 			break;
1688 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1689 			status_str = "delay target remove";
1690 			break;
1691 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1692 			status_str = "link rate change";
1693 			break;
1694 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1695 			status_str = "target responding";
1696 			break;
1697 		default:
1698 			status_str = "unknown";
1699 			break;
1700 		}
1701 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1702 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1703 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
1704 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1705 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :\tport(%02d), attached_handle(0x%04x): %s:"
1706 		    " link rate: new(0x%02x), old(0x%02x)\n", __func__,
1707 		    port_number, handle, status_str, link_rate, prev_link_rate);
1708 	}
1709 }
1710 
1711 static void mpi3mr_process_pcietopochg_evt(struct mpi3mr_softc *sc,
1712     struct mpi3mr_fw_event_work *fwevt)
1713 {
1714 	Mpi3EventDataPcieTopologyChangeList_t *event_data =
1715 		    (Mpi3EventDataPcieTopologyChangeList_t *)fwevt->event_data;
1716 	int i;
1717 	U16 handle;
1718 	U8 reason_code, link_rate;
1719 	struct mpi3mr_target *target = NULL;
1720 
1721 
1722 	mpi3mr_pcietopochg_evt_debug(sc, event_data);
1723 
1724 	for (i = 0; i < event_data->NumEntries; i++) {
1725 		handle =
1726 			le16toh(event_data->PortEntry[i].AttachedDevHandle);
1727 		if (!handle)
1728 			continue;
1729 		target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1730 		if (!target)
1731 			continue;
1732 
1733 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
1734 			MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1735 		target->link_rate = link_rate;
1736 
1737 		reason_code = event_data->PortEntry[i].PortStatus;
1738 
1739 		switch (reason_code) {
1740 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1741 			if (target->exposed_to_os)
1742 				mpi3mr_remove_device_from_os(sc, target->dev_handle);
1743 			mpi3mr_remove_device_from_list(sc, target, false);
1744 			break;
1745 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1746 			break;
1747 		default:
1748 			break;
1749 		}
1750 	}
1751 
1752 	/*
1753 	 * refcount was incremented for this event in
1754 	 * mpi3mr_evt_handler. Decrement it here because the event has
1755 	 * been processed.
1756 	 */
1757 	mpi3mr_startup_decrement(sc->cam_sc);
1758 	return;
1759 }
1760 
1761 void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id)
1762 {
1763 	struct mpi3mr_target *target;
1764 
1765 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1766 		"Adding device(persistent id: 0x%x)\n", per_id);
1767 
1768 	mpi3mr_startup_increment(sc->cam_sc);
1769 	target = mpi3mr_find_target_by_per_id(sc->cam_sc, per_id);
1770 
1771 	if (!target) {
1772 		mpi3mr_dprint(sc, MPI3MR_INFO, "Not available in driver's"
1773 		    "internal target list, persistent_id: %d\n",
1774 		    per_id);
1775 		goto out;
1776 	}
1777 
1778 	if (target->is_hidden) {
1779 		mpi3mr_dprint(sc, MPI3MR_EVENT, "Target is hidden, persistent_id: %d\n",
1780 			per_id);
1781 		goto out;
1782 	}
1783 
1784 	if (!target->exposed_to_os && !sc->reset_in_progress) {
1785 		mpi3mr_rescan_target(sc, target);
1786 		mpi3mr_dprint(sc, MPI3MR_INFO,
1787 			"Added device persistent_id: %d dev_handle: %d\n", per_id, target->dev_handle);
1788 		target->exposed_to_os = 1;
1789 	}
1790 
1791 out:
1792 	mpi3mr_startup_decrement(sc->cam_sc);
1793 }
1794 
1795 int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
1796 {
1797 	U32 i = 0;
1798 	int retval = 0;
1799 	struct mpi3mr_target *target;
1800 
1801 	mpi3mr_dprint(sc, MPI3MR_EVENT,
1802 		"Removing Device (dev_handle: %d)\n", handle);
1803 
1804 	target = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
1805 
1806 	if (!target) {
1807 		mpi3mr_dprint(sc, MPI3MR_INFO,
1808 			"Device (persistent_id: %d dev_handle: %d) is already removed from driver's list\n",
1809 			target->per_id, handle);
1810 		mpi3mr_rescan_target(sc, NULL);
1811 		retval = -1;
1812 		goto out;
1813 	}
1814 
1815 	target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
1816 
1817 	while (mpi3mr_atomic_read(&target->outstanding) && (i < 30)) {
1818 		i++;
1819 		if (!(i % 2)) {
1820 			mpi3mr_dprint(sc, MPI3MR_INFO,
1821 			    "[%2d]waiting for "
1822 			    "waiting for outstanding commands to complete on target: %d\n",
1823 			    i, target->per_id);
1824 		}
1825 		DELAY(1000 * 1000);
1826 	}
1827 
1828 	if (target->exposed_to_os && !sc->reset_in_progress) {
1829 		mpi3mr_rescan_target(sc, target);
1830 		mpi3mr_dprint(sc, MPI3MR_INFO,
1831 			"Removed device(persistent_id: %d dev_handle: %d)\n", target->per_id, handle);
1832 		target->exposed_to_os = 0;
1833 	}
1834 
1835 	target->flags &= ~MPI3MRSAS_TARGET_INREMOVAL;
1836 out:
1837 	return retval;
1838 }
1839 
1840 void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
1841 	struct mpi3mr_target *target, bool must_delete)
1842 {
1843 	mtx_lock_spin(&sc->target_lock);
1844 	if ((target->state == MPI3MR_DEV_REMOVE_HS_STARTED) ||
1845 	    (must_delete == true)) {
1846 		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
1847 		target->state = MPI3MR_DEV_DELETED;
1848 	}
1849 	mtx_unlock_spin(&sc->target_lock);
1850 
1851 	if (target->state == MPI3MR_DEV_DELETED) {
1852  		free(target, M_MPI3MR);
1853  		target = NULL;
1854  	}
1855 
1856 	return;
1857 }
1858 
1859 /**
1860  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1861  * @sc: Adapter instance reference
1862  * @fwevt: Firmware event
1863  *
1864  * Process Device Status Change event and based on device's new
1865  * information, either expose the device to the upper layers, or
1866  * remove the device from upper layers.
1867  *
1868  * Return: Nothing.
1869  */
1870 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_softc *sc,
1871 	struct mpi3mr_fw_event_work *fwevt)
1872 {
1873 	U16 dev_handle = 0;
1874 	U8 uhide = 0, delete = 0, cleanup = 0;
1875 	struct mpi3mr_target *tgtdev = NULL;
1876 	Mpi3EventDataDeviceStatusChange_t *evtdata =
1877 	    (Mpi3EventDataDeviceStatusChange_t *)fwevt->event_data;
1878 
1879 
1880 
1881 	dev_handle = le16toh(evtdata->DevHandle);
1882 	mpi3mr_dprint(sc, MPI3MR_INFO,
1883 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1884 	    __func__, dev_handle, evtdata->ReasonCode);
1885 	switch (evtdata->ReasonCode) {
1886 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1887 		delete = 1;
1888 		break;
1889 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1890 		uhide = 1;
1891 		break;
1892 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1893 		delete = 1;
1894 		cleanup = 1;
1895 		break;
1896 	default:
1897 		mpi3mr_dprint(sc, MPI3MR_INFO, "%s :Unhandled reason code(0x%x)\n", __func__,
1898 		    evtdata->ReasonCode);
1899 		break;
1900 	}
1901 
1902 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1903 	if (!tgtdev)
1904 		return;
1905 
1906 	if (uhide) {
1907 		if (!tgtdev->exposed_to_os)
1908 			mpi3mr_add_device(sc, tgtdev->per_id);
1909 	}
1910 
1911 	if (delete)
1912 		mpi3mr_remove_device_from_os(sc, dev_handle);
1913 
1914 	if (cleanup)
1915 		mpi3mr_remove_device_from_list(sc, tgtdev, false);
1916 }
1917 
1918 /**
1919  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1920  * @sc: Adapter instance reference
1921  * @dev_pg0: New device page0
1922  *
1923  * Process Device Info Change event and based on device's new
1924  * information, either expose the device to the upper layers, or
1925  * remove the device from upper layers or update the details of
1926  * the device.
1927  *
1928  * Return: Nothing.
1929  */
1930 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_softc *sc,
1931 	Mpi3DevicePage0_t *dev_pg0)
1932 {
1933 	struct mpi3mr_target *tgtdev = NULL;
1934 	U16 dev_handle = 0, perst_id = 0;
1935 
1936 	perst_id = le16toh(dev_pg0->PersistentID);
1937 	dev_handle = le16toh(dev_pg0->DevHandle);
1938 	mpi3mr_dprint(sc, MPI3MR_INFO,
1939 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1940 	    __func__, dev_handle, perst_id);
1941 	tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, dev_handle);
1942 	if (!tgtdev)
1943 		return;
1944 
1945 	mpi3mr_update_device(sc, tgtdev, dev_pg0, false);
1946 	if (!tgtdev->is_hidden && !tgtdev->exposed_to_os)
1947 		mpi3mr_add_device(sc, perst_id);
1948 
1949 	if (tgtdev->is_hidden && tgtdev->exposed_to_os)
1950 		mpi3mr_remove_device_from_os(sc, tgtdev->dev_handle);
1951 }
1952 
1953 static void
1954 mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
1955 {
1956 	if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN)
1957 		goto out;
1958 
1959 	if (!fw_event->process_event)
1960 		goto evt_ack;
1961 
1962 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Working on  Event: [%x]\n",
1963 	    event_count++, __func__, fw_event->event);
1964 
1965 	switch (fw_event->event) {
1966 	case MPI3_EVENT_DEVICE_ADDED:
1967 	{
1968 		Mpi3DevicePage0_t *dev_pg0 =
1969 			(Mpi3DevicePage0_t *) fw_event->event_data;
1970 		mpi3mr_add_device(sc, dev_pg0->PersistentID);
1971 		break;
1972 	}
1973 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
1974 	{
1975 		mpi3mr_devinfochg_evt_bh(sc,
1976 		    (Mpi3DevicePage0_t *) fw_event->event_data);
1977 		break;
1978 	}
1979 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1980 	{
1981 		mpi3mr_devstatuschg_evt_bh(sc, fw_event);
1982 		break;
1983 	}
1984 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1985 	{
1986 		mpi3mr_process_sastopochg_evt(sc, fw_event);
1987 		break;
1988 	}
1989 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1990 	{
1991 		mpi3mr_process_pcietopochg_evt(sc, fw_event);
1992 		break;
1993 	}
1994 	case MPI3_EVENT_LOG_DATA:
1995 	{
1996 		mpi3mr_logdata_evt_bh(sc, fw_event);
1997 		break;
1998 	}
1999 	default:
2000 		mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
2001 		    fw_event->event);
2002 		break;
2003 
2004 	}
2005 
2006 evt_ack:
2007 	if (fw_event->send_ack) {
2008 		mpi3mr_dprint(sc, MPI3MR_EVENT,"Process event ACK for event 0x%0X\n",
2009 		    fw_event->event);
2010 		mpi3mr_process_event_ack(sc, fw_event->event,
2011 		    fw_event->event_context);
2012 	}
2013 
2014 out:
2015 	mpi3mr_dprint(sc, MPI3MR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
2016 	    __func__, fw_event->event);
2017 
2018 	mpi3mr_fw_event_free(sc, fw_event);
2019 }
2020 
2021 void
2022 mpi3mr_firmware_event_work(void *arg, int pending)
2023 {
2024 	struct mpi3mr_fw_event_work *fw_event;
2025 	struct mpi3mr_softc *sc;
2026 
2027 	sc = (struct mpi3mr_softc *)arg;
2028 
2029 	mtx_lock(&sc->fwevt_lock);
2030 	while ((fw_event = TAILQ_FIRST(&sc->cam_sc->ev_queue)) != NULL) {
2031 		TAILQ_REMOVE(&sc->cam_sc->ev_queue, fw_event, ev_link);
2032 		mtx_unlock(&sc->fwevt_lock);
2033 		mpi3mr_fw_work(sc, fw_event);
2034 		mtx_lock(&sc->fwevt_lock);
2035 	}
2036 	mtx_unlock(&sc->fwevt_lock);
2037 }
2038 
2039 
2040 /*
2041  * mpi3mr_cam_attach - CAM layer registration
2042  * @sc: Adapter reference
2043  *
2044  * This function does simq allocation, cam registration, xpt_bus registration,
2045  * event taskqueue initialization and async event handler registration.
2046  *
2047  * Return: 0 on success and proper error codes on failure
2048  */
2049 int
2050 mpi3mr_cam_attach(struct mpi3mr_softc *sc)
2051 {
2052 	struct mpi3mr_cam_softc *cam_sc;
2053 	cam_status status;
2054 	int unit, error = 0, reqs;
2055 
2056 	mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
2057 
2058 	cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
2059 	if (!cam_sc) {
2060 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2061 		    "Failed to allocate memory for controller CAM instance\n");
2062 		return (ENOMEM);
2063 	}
2064 
2065 	cam_sc->maxtargets = sc->facts.max_perids + 1;
2066 
2067 	TAILQ_INIT(&cam_sc->tgt_list);
2068 
2069 	sc->cam_sc = cam_sc;
2070 	cam_sc->sc = sc;
2071 
2072 	reqs = sc->max_host_ios;
2073 
2074 	if ((cam_sc->devq = cam_simq_alloc(reqs)) == NULL) {
2075 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIMQ\n");
2076 		error = ENOMEM;
2077 		goto out;
2078 	}
2079 
2080 	unit = device_get_unit(sc->mpi3mr_dev);
2081 	cam_sc->sim = cam_sim_alloc(mpi3mr_cam_action, mpi3mr_cam_poll, "mpi3mr", cam_sc,
2082 	    unit, &sc->mpi3mr_mtx, reqs, reqs, cam_sc->devq);
2083 	if (cam_sc->sim == NULL) {
2084 		mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate SIM\n");
2085 		error = EINVAL;
2086 		goto out;
2087 	}
2088 
2089 	TAILQ_INIT(&cam_sc->ev_queue);
2090 
2091 	/* Initialize taskqueue for Event Handling */
2092 	TASK_INIT(&cam_sc->ev_task, 0, mpi3mr_firmware_event_work, sc);
2093 	cam_sc->ev_tq = taskqueue_create("mpi3mr_taskq", M_NOWAIT | M_ZERO,
2094 	    taskqueue_thread_enqueue, &cam_sc->ev_tq);
2095 	taskqueue_start_threads(&cam_sc->ev_tq, 1, PRIBIO, "%s taskq",
2096 	    device_get_nameunit(sc->mpi3mr_dev));
2097 
2098 	mtx_lock(&sc->mpi3mr_mtx);
2099 
2100 	/*
2101 	 * XXX There should be a bus for every port on the adapter, but since
2102 	 * we're just going to fake the topology for now, we'll pretend that
2103 	 * everything is just a target on a single bus.
2104 	 */
2105 	if ((error = xpt_bus_register(cam_sc->sim, sc->mpi3mr_dev, 0)) != 0) {
2106 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2107 		    "Error 0x%x registering SCSI bus\n", error);
2108 		mtx_unlock(&sc->mpi3mr_mtx);
2109 		goto out;
2110 	}
2111 
2112 	/*
2113 	 * Assume that discovery events will start right away.
2114 	 *
2115 	 * Hold off boot until discovery is complete.
2116 	 */
2117 	cam_sc->flags |= MPI3MRSAS_IN_STARTUP | MPI3MRSAS_IN_DISCOVERY;
2118 	sc->cam_sc->startup_refcount = 0;
2119 	mpi3mr_startup_increment(cam_sc);
2120 
2121 	callout_init(&cam_sc->discovery_callout, 1 /*mpsafe*/);
2122 
2123 	/*
2124 	 * Register for async events so we can determine the EEDP
2125 	 * capabilities of devices.
2126 	 */
2127 	status = xpt_create_path(&cam_sc->path, /*periph*/NULL,
2128 	    cam_sim_path(sc->cam_sc->sim), CAM_TARGET_WILDCARD,
2129 	    CAM_LUN_WILDCARD);
2130 	if (status != CAM_REQ_CMP) {
2131 		mpi3mr_dprint(sc, MPI3MR_ERROR,
2132 		    "Error 0x%x creating sim path\n", status);
2133 		cam_sc->path = NULL;
2134 	}
2135 
2136 	if (status != CAM_REQ_CMP) {
2137 		/*
2138 		 * EEDP use is the exception, not the rule.
2139 		 * Warn the user, but do not fail to attach.
2140 		 */
2141 		mpi3mr_dprint(sc, MPI3MR_INFO, "EEDP capabilities disabled.\n");
2142 	}
2143 
2144 	mtx_unlock(&sc->mpi3mr_mtx);
2145 
2146 	error = mpi3mr_register_events(sc);
2147 
2148 out:
2149 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s Exiting CAM attach, error: 0x%x n", __func__, error);
2150 	return (error);
2151 }
2152 
2153 int
2154 mpi3mr_cam_detach(struct mpi3mr_softc *sc)
2155 {
2156 	struct mpi3mr_cam_softc *cam_sc;
2157 	struct mpi3mr_target *target;
2158 
2159 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Starting CAM detach\n", __func__);
2160 	if (sc->cam_sc == NULL)
2161 		return (0);
2162 
2163 	cam_sc = sc->cam_sc;
2164 
2165 	mpi3mr_freeup_events(sc);
2166 
2167 	/*
2168 	 * Drain and free the event handling taskqueue with the lock
2169 	 * unheld so that any parallel processing tasks drain properly
2170 	 * without deadlocking.
2171 	 */
2172 	if (cam_sc->ev_tq != NULL)
2173 		taskqueue_free(cam_sc->ev_tq);
2174 
2175 	mtx_lock(&sc->mpi3mr_mtx);
2176 
2177 	while (cam_sc->startup_refcount != 0)
2178 		mpi3mr_startup_decrement(cam_sc);
2179 
2180 	/* Deregister our async handler */
2181 	if (cam_sc->path != NULL) {
2182 		xpt_free_path(cam_sc->path);
2183 		cam_sc->path = NULL;
2184 	}
2185 
2186 	if (cam_sc->flags & MPI3MRSAS_IN_STARTUP)
2187 		xpt_release_simq(cam_sc->sim, 1);
2188 
2189 	if (cam_sc->sim != NULL) {
2190 		xpt_bus_deregister(cam_sim_path(cam_sc->sim));
2191 		cam_sim_free(cam_sc->sim, FALSE);
2192 	}
2193 
2194 	mtx_unlock(&sc->mpi3mr_mtx);
2195 
2196 	if (cam_sc->devq != NULL)
2197 		cam_simq_free(cam_sc->devq);
2198 
2199 get_target:
2200 	mtx_lock_spin(&sc->target_lock);
2201  	TAILQ_FOREACH(target, &cam_sc->tgt_list, tgt_next) {
2202  		TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
2203 		mtx_unlock_spin(&sc->target_lock);
2204 		goto out_tgt_free;
2205 	}
2206 	mtx_unlock_spin(&sc->target_lock);
2207 out_tgt_free:
2208 	if (target) {
2209 		free(target, M_MPI3MR);
2210 		target = NULL;
2211 		goto get_target;
2212  	}
2213 
2214 	free(cam_sc, M_MPI3MR);
2215 	sc->cam_sc = NULL;
2216 
2217 	mpi3mr_dprint(sc, MPI3MR_XINFO, "%s, Exiting CAM detach\n", __func__);
2218 	return (0);
2219 }
2220