xref: /freebsd/sys/dev/mrsas/mrsas_cam.c (revision 681ce946f33e75c590e97c53076e86dff1fe8f4a)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "dev/mrsas/mrsas.h"
37 
38 #include <cam/cam.h>
39 #include <cam/cam_ccb.h>
40 #include <cam/cam_sim.h>
41 #include <cam/cam_xpt_sim.h>
42 #include <cam/cam_debug.h>
43 #include <cam/cam_periph.h>
44 #include <cam/cam_xpt_periph.h>
45 
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_message.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50 
51 #include <sys/time.h>			/* XXX for pcpu.h */
52 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
53 
54 #define	smp_processor_id()  PCPU_GET(cpuid)
55 
56 /*
57  * Function prototypes
58  */
59 int	mrsas_cam_attach(struct mrsas_softc *sc);
60 int	mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb);
61 int	mrsas_bus_scan(struct mrsas_softc *sc);
62 int	mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
63 int
64 mrsas_map_request(struct mrsas_softc *sc,
65     struct mrsas_mpt_cmd *cmd, union ccb *ccb);
66 int
67 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
68     union ccb *ccb);
69 int
70 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
71     union ccb *ccb);
72 int
73 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
74     union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
75 int
76 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
77     union ccb *ccb, u_int32_t device_id,
78     MRSAS_RAID_SCSI_IO_REQUEST * io_request);
79 void	mrsas_xpt_freeze(struct mrsas_softc *sc);
80 void	mrsas_xpt_release(struct mrsas_softc *sc);
81 void	mrsas_cam_detach(struct mrsas_softc *sc);
82 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
83 void	mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
84 void	mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
85 void
86 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
87     u_int32_t req_desc_hi);
88 void
89 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
90     u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
91     MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
92     u_int32_t ld_block_size);
93 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
94 static void mrsas_cam_poll(struct cam_sim *sim);
95 static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
96 static void mrsas_scsiio_timeout(void *data);
97 static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id);
98 static void mrsas_tm_response_code(struct mrsas_softc *sc,
99     MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply);
100 static int mrsas_issue_tm(struct mrsas_softc *sc,
101     MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
102 static void
103 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
104     int nseg, int error);
105 static int32_t
106 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
107     union ccb *ccb);
108 
109 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
110 	bus_dma_segment_t *segs, int nsegs);
111 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd,
112 	bus_dma_segment_t *segs, int nseg);
113 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd,
114 	bus_dma_segment_t *segs, int nseg);
115 
116 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
117 MRSAS_REQUEST_DESCRIPTOR_UNION *
118 	mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
119 
120 extern int mrsas_reset_targets(struct mrsas_softc *sc);
121 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
122 extern u_int32_t
123 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
124 extern void mrsas_isr(void *arg);
125 extern void mrsas_aen_handler(struct mrsas_softc *sc);
126 extern u_int8_t
127 MR_BuildRaidContext(struct mrsas_softc *sc,
128     struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
129     MR_DRV_RAID_MAP_ALL * map);
130 extern u_int16_t
131 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
132     MR_DRV_RAID_MAP_ALL * map);
133 extern u_int16_t
134 mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
135     PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
136 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
137 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
138 extern void mrsas_disable_intr(struct mrsas_softc *sc);
139 extern void mrsas_enable_intr(struct mrsas_softc *sc);
140 void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
141     struct mrsas_mpt_cmd *cmd);
142 
143 /*
144  * mrsas_cam_attach:	Main entry to CAM subsystem
145  * input:				Adapter instance soft state
146  *
147  * This function is called from mrsas_attach() during initialization to perform
148  * SIM allocations and XPT bus registration.  If the kernel version is 7.4 or
149  * earlier, it would also initiate a bus scan.
150  */
151 int
152 mrsas_cam_attach(struct mrsas_softc *sc)
153 {
154 	struct cam_devq *devq;
155 	int mrsas_cam_depth;
156 
157 	mrsas_cam_depth = sc->max_scsi_cmds;
158 
159 	if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
160 		device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
161 		return (ENOMEM);
162 	}
163 	/*
164 	 * Create SIM for bus 0 and register, also create path
165 	 */
166 	sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
167 	    device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
168 	    mrsas_cam_depth, devq);
169 	if (sc->sim_0 == NULL) {
170 		cam_simq_free(devq);
171 		device_printf(sc->mrsas_dev, "Cannot register SIM\n");
172 		return (ENXIO);
173 	}
174 	/* Initialize taskqueue for Event Handling */
175 	TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
176 	sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
177 	    taskqueue_thread_enqueue, &sc->ev_tq);
178 
179 	/* Run the task queue with lowest priority */
180 	taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
181 	    device_get_nameunit(sc->mrsas_dev));
182 	mtx_lock(&sc->sim_lock);
183 	if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
184 		cam_sim_free(sc->sim_0, TRUE);	/* passing true frees the devq */
185 		mtx_unlock(&sc->sim_lock);
186 		return (ENXIO);
187 	}
188 	if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
189 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
190 		xpt_bus_deregister(cam_sim_path(sc->sim_0));
191 		cam_sim_free(sc->sim_0, TRUE);	/* passing true will free the
192 						 * devq */
193 		mtx_unlock(&sc->sim_lock);
194 		return (ENXIO);
195 	}
196 	mtx_unlock(&sc->sim_lock);
197 
198 	/*
199 	 * Create SIM for bus 1 and register, also create path
200 	 */
201 	sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
202 	    device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
203 	    mrsas_cam_depth, devq);
204 	if (sc->sim_1 == NULL) {
205 		cam_simq_free(devq);
206 		device_printf(sc->mrsas_dev, "Cannot register SIM\n");
207 		return (ENXIO);
208 	}
209 	mtx_lock(&sc->sim_lock);
210 	if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
211 		cam_sim_free(sc->sim_1, TRUE);	/* passing true frees the devq */
212 		mtx_unlock(&sc->sim_lock);
213 		return (ENXIO);
214 	}
215 	if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
216 	    CAM_TARGET_WILDCARD,
217 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
218 		xpt_bus_deregister(cam_sim_path(sc->sim_1));
219 		cam_sim_free(sc->sim_1, TRUE);
220 		mtx_unlock(&sc->sim_lock);
221 		return (ENXIO);
222 	}
223 	mtx_unlock(&sc->sim_lock);
224 
225 #if (__FreeBSD_version <= 704000)
226 	if (mrsas_bus_scan(sc)) {
227 		device_printf(sc->mrsas_dev, "Error in bus scan.\n");
228 		return (1);
229 	}
230 #endif
231 	return (0);
232 }
233 
234 /*
235  * mrsas_cam_detach:	De-allocates and teardown CAM
236  * input:				Adapter instance soft state
237  *
238  * De-registers and frees the paths and SIMs.
239  */
240 void
241 mrsas_cam_detach(struct mrsas_softc *sc)
242 {
243 	if (sc->ev_tq != NULL)
244 		taskqueue_free(sc->ev_tq);
245 	mtx_lock(&sc->sim_lock);
246 	if (sc->path_0)
247 		xpt_free_path(sc->path_0);
248 	if (sc->sim_0) {
249 		xpt_bus_deregister(cam_sim_path(sc->sim_0));
250 		cam_sim_free(sc->sim_0, FALSE);
251 	}
252 	if (sc->path_1)
253 		xpt_free_path(sc->path_1);
254 	if (sc->sim_1) {
255 		xpt_bus_deregister(cam_sim_path(sc->sim_1));
256 		cam_sim_free(sc->sim_1, TRUE);
257 	}
258 	mtx_unlock(&sc->sim_lock);
259 }
260 
261 /*
262  * mrsas_action:	SIM callback entry point
263  * input:			pointer to SIM pointer to CAM Control Block
264  *
265  * This function processes CAM subsystem requests. The type of request is stored
266  * in ccb->ccb_h.func_code.  The preprocessor #ifdef is necessary because
267  * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
268  */
269 static void
270 mrsas_action(struct cam_sim *sim, union ccb *ccb)
271 {
272 	struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
273 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
274 	u_int32_t device_id;
275 
276 	/*
277      * Check if the system going down
278      * or the adapter is in unrecoverable critical error
279      */
280     if (sc->remove_in_progress ||
281         (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
282         ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
283         xpt_done(ccb);
284         return;
285     }
286 
287 	switch (ccb->ccb_h.func_code) {
288 	case XPT_SCSI_IO:
289 		{
290 			device_id = ccb_h->target_id;
291 
292 			/*
293 			 * bus 0 is LD, bus 1 is for system-PD
294 			 */
295 			if (cam_sim_bus(sim) == 1 &&
296 			    sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
297 				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
298 				xpt_done(ccb);
299 			} else {
300 				if (mrsas_startio(sc, sim, ccb)) {
301 					ccb->ccb_h.status |= CAM_REQ_INVALID;
302 					xpt_done(ccb);
303 				}
304 			}
305 			break;
306 		}
307 	case XPT_ABORT:
308 		{
309 			ccb->ccb_h.status = CAM_UA_ABORT;
310 			xpt_done(ccb);
311 			break;
312 		}
313 	case XPT_RESET_BUS:
314 		{
315 			xpt_done(ccb);
316 			break;
317 		}
318 	case XPT_GET_TRAN_SETTINGS:
319 		{
320 			ccb->cts.protocol = PROTO_SCSI;
321 			ccb->cts.protocol_version = SCSI_REV_2;
322 			ccb->cts.transport = XPORT_SPI;
323 			ccb->cts.transport_version = 2;
324 			ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
325 			ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
326 			ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
327 			ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
328 			ccb->ccb_h.status = CAM_REQ_CMP;
329 			xpt_done(ccb);
330 			break;
331 		}
332 	case XPT_SET_TRAN_SETTINGS:
333 		{
334 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
335 			xpt_done(ccb);
336 			break;
337 		}
338 	case XPT_CALC_GEOMETRY:
339 		{
340 			cam_calc_geometry(&ccb->ccg, 1);
341 			xpt_done(ccb);
342 			break;
343 		}
344 	case XPT_PATH_INQ:
345 		{
346 			ccb->cpi.version_num = 1;
347 			ccb->cpi.hba_inquiry = 0;
348 			ccb->cpi.target_sprt = 0;
349 #if (__FreeBSD_version >= 902001)
350 			ccb->cpi.hba_misc = PIM_UNMAPPED;
351 #else
352 			ccb->cpi.hba_misc = 0;
353 #endif
354 			ccb->cpi.hba_eng_cnt = 0;
355 			ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
356 			ccb->cpi.unit_number = cam_sim_unit(sim);
357 			ccb->cpi.bus_id = cam_sim_bus(sim);
358 			ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
359 			ccb->cpi.base_transfer_speed = 150000;
360 			strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
361 			strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN);
362 			strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
363 			ccb->cpi.transport = XPORT_SPI;
364 			ccb->cpi.transport_version = 2;
365 			ccb->cpi.protocol = PROTO_SCSI;
366 			ccb->cpi.protocol_version = SCSI_REV_2;
367 			if (ccb->cpi.bus_id == 0)
368 				ccb->cpi.max_target = MRSAS_MAX_PD - 1;
369 			else
370 				ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
371 #if (__FreeBSD_version > 704000)
372 			ccb->cpi.maxio = sc->max_sectors_per_req * 512;
373 #endif
374 			ccb->ccb_h.status = CAM_REQ_CMP;
375 			xpt_done(ccb);
376 			break;
377 		}
378 	default:
379 		{
380 			ccb->ccb_h.status = CAM_REQ_INVALID;
381 			xpt_done(ccb);
382 			break;
383 		}
384 	}
385 }
386 
387 /*
388  * mrsas_scsiio_timeout:	Callback function for IO timed out
389  * input:					mpt command context
390  *
391  * This function will execute after timeout value provided by ccb header from
392  * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
393  * coming from CAM layer. This function is callback function for IO timeout
394  * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
395  * so that it will execute OCR/Kill adpter from ocr_thread context.
396  */
397 static void
398 mrsas_scsiio_timeout(void *data)
399 {
400 	struct mrsas_mpt_cmd *cmd;
401 	struct mrsas_softc *sc;
402 	u_int32_t target_id;
403 
404 	if (!data)
405 		return;
406 
407 	cmd = (struct mrsas_mpt_cmd *)data;
408 	sc = cmd->sc;
409 
410 	if (cmd->ccb_ptr == NULL) {
411 		printf("command timeout with NULL ccb\n");
412 		return;
413 	}
414 
415 	/*
416 	 * Below callout is dummy entry so that it will be cancelled from
417 	 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
418 	 * on OCR enable/disable property of Controller from ocr_thread
419 	 * context.
420 	 */
421 #if (__FreeBSD_version >= 1000510)
422 	callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
423 	    mrsas_scsiio_timeout, cmd, 0);
424 #else
425 	callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
426 	    mrsas_scsiio_timeout, cmd);
427 #endif
428 
429 	if (cmd->ccb_ptr->cpi.bus_id == 0)
430 		target_id = cmd->ccb_ptr->ccb_h.target_id;
431 	else
432 		target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1));
433 
434 	/* Save the cmd to be processed for TM, if it is not there in the array */
435 	if (sc->target_reset_pool[target_id] == NULL) {
436 		sc->target_reset_pool[target_id] = cmd;
437 		mrsas_atomic_inc(&sc->target_reset_outstanding);
438 	}
439 
440 	return;
441 }
442 
443 /*
444  * mrsas_startio:	SCSI IO entry point
445  * input:			Adapter instance soft state
446  * 					pointer to CAM Control Block
447  *
448  * This function is the SCSI IO entry point and it initiates IO processing. It
449  * copies the IO and depending if the IO is read/write or inquiry, it would
450  * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively.  It returns 0
451  * if the command is sent to firmware successfully, otherwise it returns 1.
452  */
453 static int32_t
454 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
455     union ccb *ccb)
456 {
457 	struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL;
458 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
459 	struct ccb_scsiio *csio = &(ccb->csio);
460 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
461 	u_int8_t cmd_type;
462 
463 	if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
464 		(!sc->fw_sync_cache_support)) {
465 		ccb->ccb_h.status = CAM_REQ_CMP;
466 		xpt_done(ccb);
467 		return (0);
468 	}
469 	ccb_h->status |= CAM_SIM_QUEUED;
470 
471 	if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) {
472 		ccb_h->status |= CAM_REQUEUE_REQ;
473 		xpt_done(ccb);
474 		mrsas_atomic_dec(&sc->fw_outstanding);
475 		return (0);
476 	}
477 
478 	cmd = mrsas_get_mpt_cmd(sc);
479 
480 	if (!cmd) {
481 		ccb_h->status |= CAM_REQUEUE_REQ;
482 		xpt_done(ccb);
483 		mrsas_atomic_dec(&sc->fw_outstanding);
484 		return (0);
485 	}
486 
487 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
488 		if (ccb_h->flags & CAM_DIR_IN)
489 			cmd->flags |= MRSAS_DIR_IN;
490 		if (ccb_h->flags & CAM_DIR_OUT)
491 			cmd->flags |= MRSAS_DIR_OUT;
492 	} else
493 		cmd->flags = MRSAS_DIR_NONE;	/* no data */
494 
495 /* For FreeBSD 9.2 and higher */
496 #if (__FreeBSD_version >= 902001)
497 	/*
498 	 * XXX We don't yet support physical addresses here.
499 	 */
500 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
501 	case CAM_DATA_PADDR:
502 	case CAM_DATA_SG_PADDR:
503 		device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
504 		    __func__);
505 		mrsas_release_mpt_cmd(cmd);
506 		ccb_h->status = CAM_REQ_INVALID;
507 		ccb_h->status &= ~CAM_SIM_QUEUED;
508 		goto done;
509 	case CAM_DATA_SG:
510 		device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
511 		    __func__);
512 		mrsas_release_mpt_cmd(cmd);
513 		ccb_h->status = CAM_REQ_INVALID;
514 		goto done;
515 	case CAM_DATA_VADDR:
516 		cmd->length = csio->dxfer_len;
517 		if (cmd->length)
518 			cmd->data = csio->data_ptr;
519 		break;
520 	case CAM_DATA_BIO:
521 		cmd->length = csio->dxfer_len;
522 		if (cmd->length)
523 			cmd->data = csio->data_ptr;
524 		break;
525 	default:
526 		ccb->ccb_h.status = CAM_REQ_INVALID;
527 		goto done;
528 	}
529 #else
530 	if (!(ccb_h->flags & CAM_DATA_PHYS)) {	/* Virtual data address */
531 		if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
532 			cmd->length = csio->dxfer_len;
533 			if (cmd->length)
534 				cmd->data = csio->data_ptr;
535 		} else {
536 			mrsas_release_mpt_cmd(cmd);
537 			ccb_h->status = CAM_REQ_INVALID;
538 			goto done;
539 		}
540 	} else {			/* Data addresses are physical. */
541 		mrsas_release_mpt_cmd(cmd);
542 		ccb_h->status = CAM_REQ_INVALID;
543 		ccb_h->status &= ~CAM_SIM_QUEUED;
544 		goto done;
545 	}
546 #endif
547 	/* save ccb ptr */
548 	cmd->ccb_ptr = ccb;
549 
550 	req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
551 	if (!req_desc) {
552 		device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
553 		return (FAIL);
554 	}
555 	memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
556 	cmd->request_desc = req_desc;
557 
558 	if (ccb_h->flags & CAM_CDB_POINTER)
559 		bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
560 	else
561 		bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
562 	mtx_lock(&sc->raidmap_lock);
563 
564 	/* Check for IO type READ-WRITE targeted for Logical Volume */
565 	cmd_type = mrsas_find_io_type(sim, ccb);
566 	switch (cmd_type) {
567 	case READ_WRITE_LDIO:
568 		/* Build READ-WRITE IO for Logical Volume  */
569 		if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
570 			device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
571 			mtx_unlock(&sc->raidmap_lock);
572 			mrsas_release_mpt_cmd(cmd);
573 			return (1);
574 		}
575 		break;
576 	case NON_READ_WRITE_LDIO:
577 		/* Build NON READ-WRITE IO for Logical Volume  */
578 		if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
579 			device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
580 			mtx_unlock(&sc->raidmap_lock);
581 			mrsas_release_mpt_cmd(cmd);
582 			return (1);
583 		}
584 		break;
585 	case READ_WRITE_SYSPDIO:
586 	case NON_READ_WRITE_SYSPDIO:
587 		if (sc->secure_jbod_support &&
588 		    (cmd_type == NON_READ_WRITE_SYSPDIO)) {
589 			/* Build NON-RW IO for JBOD */
590 			if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
591 				device_printf(sc->mrsas_dev,
592 				    "Build SYSPDIO failed.\n");
593 				mtx_unlock(&sc->raidmap_lock);
594 				mrsas_release_mpt_cmd(cmd);
595 				return (1);
596 			}
597 		} else {
598 			/* Build RW IO for JBOD */
599 			if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
600 				device_printf(sc->mrsas_dev,
601 				    "Build SYSPDIO failed.\n");
602 				mtx_unlock(&sc->raidmap_lock);
603 				mrsas_release_mpt_cmd(cmd);
604 				return (1);
605 			}
606 		}
607 	}
608 	mtx_unlock(&sc->raidmap_lock);
609 
610 	if (cmd->flags == MRSAS_DIR_IN)	/* from device */
611 		cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_READ);
612 	else if (cmd->flags == MRSAS_DIR_OUT)	/* to device */
613 		cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_WRITE);
614 
615 	cmd->io_request->SGLFlags = htole16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
616 	cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
617 	cmd->io_request->SenseBufferLowAddress = htole32(cmd->sense_phys_addr & 0xFFFFFFFF);
618 	cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
619 
620 	req_desc = cmd->request_desc;
621 	req_desc->SCSIIO.SMID = htole16(cmd->index);
622 
623 	/*
624 	 * Start timer for IO timeout. Default timeout value is 90 second.
625 	 */
626 	cmd->callout_owner = true;
627 #if (__FreeBSD_version >= 1000510)
628 	callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
629 	    mrsas_scsiio_timeout, cmd, 0);
630 #else
631 	callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
632 	    mrsas_scsiio_timeout, cmd);
633 #endif
634 
635 	if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
636 		sc->io_cmds_highwater++;
637 
638 	/*
639 	 *  if it is raid 1/10 fp write capable.
640 	 *  try to get second command from pool and construct it.
641 	 *  From FW, it has confirmed that lba values of two PDs corresponds to
642 	 *  single R1/10 LD are always same
643 	 *
644 	 */
645 	/*
646 	 * driver side count always should be less than max_fw_cmds to get
647 	 * new command
648 	 */
649 	if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
650 		mrsas_prepare_secondRaid1_IO(sc, cmd);
651 		mrsas_fire_cmd(sc, req_desc->addr.u.low,
652 			req_desc->addr.u.high);
653 		r1_cmd = cmd->peer_cmd;
654 		mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low,
655 				r1_cmd->request_desc->addr.u.high);
656 	} else {
657 		mrsas_fire_cmd(sc, req_desc->addr.u.low,
658 			req_desc->addr.u.high);
659 	}
660 
661 	return (0);
662 
663 done:
664 	xpt_done(ccb);
665 	mrsas_atomic_dec(&sc->fw_outstanding);
666 	return (0);
667 }
668 
669 /*
670  * mrsas_find_io_type:	Determines if IO is read/write or inquiry
671  * input:			pointer to CAM Control Block
672  *
673  * This function determines if the IO is read/write or inquiry.  It returns a 1
674  * if the IO is read/write and 0 if it is inquiry.
675  */
676 int
677 mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb)
678 {
679 	struct ccb_scsiio *csio = &(ccb->csio);
680 
681 	switch (csio->cdb_io.cdb_bytes[0]) {
682 	case READ_10:
683 	case WRITE_10:
684 	case READ_12:
685 	case WRITE_12:
686 	case READ_6:
687 	case WRITE_6:
688 	case READ_16:
689 	case WRITE_16:
690 		return (cam_sim_bus(sim) ?
691 		    READ_WRITE_SYSPDIO : READ_WRITE_LDIO);
692 	default:
693 		return (cam_sim_bus(sim) ?
694 		    NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO);
695 	}
696 }
697 
698 /*
699  * mrsas_get_mpt_cmd:	Get a cmd from free command pool
700  * input:				Adapter instance soft state
701  *
702  * This function removes an MPT command from the command free list and
703  * initializes it.
704  */
705 struct mrsas_mpt_cmd *
706 mrsas_get_mpt_cmd(struct mrsas_softc *sc)
707 {
708 	struct mrsas_mpt_cmd *cmd = NULL;
709 
710 	mtx_lock(&sc->mpt_cmd_pool_lock);
711 	if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
712 		cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
713 		TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
714 	} else {
715 		goto out;
716 	}
717 
718 	memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
719 	cmd->data = NULL;
720 	cmd->length = 0;
721 	cmd->flags = 0;
722 	cmd->error_code = 0;
723 	cmd->load_balance = 0;
724 	cmd->ccb_ptr = NULL;
725 out:
726 	mtx_unlock(&sc->mpt_cmd_pool_lock);
727 	return cmd;
728 }
729 
730 /*
731  * mrsas_release_mpt_cmd:	Return a cmd to free command pool
732  * input:					Command packet for return to free command pool
733  *
734  * This function returns an MPT command to the free command list.
735  */
736 void
737 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
738 {
739 	struct mrsas_softc *sc = cmd->sc;
740 
741 	mtx_lock(&sc->mpt_cmd_pool_lock);
742 	cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
743 	cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
744 	cmd->peer_cmd = NULL;
745 	cmd->cmd_completed = 0;
746 	memset((uint8_t *)cmd->io_request, 0,
747 		sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
748 	TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
749 	mtx_unlock(&sc->mpt_cmd_pool_lock);
750 
751 	return;
752 }
753 
754 /*
755  * mrsas_get_request_desc:	Get request descriptor from array
756  * input:					Adapter instance soft state
757  * 							SMID index
758  *
759  * This function returns a pointer to the request descriptor.
760  */
761 MRSAS_REQUEST_DESCRIPTOR_UNION *
762 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
763 {
764 	u_int8_t *p;
765 
766 	KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range"));
767 	p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
768 
769 	return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
770 }
771 
772 /* mrsas_prepare_secondRaid1_IO
773  * It prepares the raid 1 second IO
774  */
775 void
776 mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
777     struct mrsas_mpt_cmd *cmd)
778 {
779 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
780 	struct mrsas_mpt_cmd *r1_cmd;
781 
782 	r1_cmd = cmd->peer_cmd;
783 	req_desc = cmd->request_desc;
784 
785 	/*
786 	 * copy the io request frame as well as 8 SGEs data for r1
787 	 * command
788 	 */
789 	memcpy(r1_cmd->io_request, cmd->io_request,
790 	    (sizeof(MRSAS_RAID_SCSI_IO_REQUEST)));
791 	memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
792 	    (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION)));
793 
794 	/* sense buffer is different for r1 command */
795 	r1_cmd->io_request->SenseBufferLowAddress = htole32(r1_cmd->sense_phys_addr & 0xFFFFFFFF);
796 	r1_cmd->ccb_ptr = cmd->ccb_ptr;
797 
798 	req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1);
799 	req_desc2->addr.Words = 0;
800 	r1_cmd->request_desc = req_desc2;
801 	req_desc2->SCSIIO.SMID = r1_cmd->index;
802 	req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
803 	r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
804 	r1_cmd->r1_alt_dev_handle =  cmd->io_request->DevHandle;
805 	r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
806 	cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
807 	    r1_cmd->index;
808 	r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
809 		cmd->index;
810 	/*
811 	 * MSIxIndex of both commands request descriptors
812 	 * should be same
813 	 */
814 	r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex;
815 	/* span arm is different for r1 cmd */
816 	r1_cmd->io_request->RaidContext.raid_context_g35.spanArm =
817 	    cmd->io_request->RaidContext.raid_context_g35.spanArm + 1;
818 
819 }
820 
821 /*
822  * mrsas_build_ldio_rw:	Builds an LDIO command
823  * input:				Adapter instance soft state
824  * 						Pointer to command packet
825  * 						Pointer to CCB
826  *
827  * This function builds the LDIO command packet.  It returns 0 if the command is
828  * built successfully, otherwise it returns a 1.
829  */
830 int
831 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
832     union ccb *ccb)
833 {
834 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
835 	struct ccb_scsiio *csio = &(ccb->csio);
836 	u_int32_t device_id;
837 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
838 
839 	device_id = ccb_h->target_id;
840 
841 	io_request = cmd->io_request;
842 	io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
843 	io_request->RaidContext.raid_context.status = 0;
844 	io_request->RaidContext.raid_context.exStatus = 0;
845 
846 	/* just the cdb len, other flags zero, and ORed-in later for FP */
847 	io_request->IoFlags = htole16(csio->cdb_len);
848 
849 	if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
850 		device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
851 
852 	io_request->DataLength = htole32(cmd->length);
853 
854 	if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
855 		if (sc->is_ventura || sc->is_aero)
856 			io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
857 		else {
858 			/*
859 			 * numSGE store lower 8 bit of sge_count. numSGEExt store
860 			 * higher 8 bit of sge_count
861 			 */
862 			io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
863 			io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
864 		}
865 
866 	} else {
867 		device_printf(sc->mrsas_dev, "Data map/load failed.\n");
868 		return (FAIL);
869 	}
870 	return (0);
871 }
872 
873 /* stream detection on read and and write IOs */
874 static void
875 mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
876     struct IO_REQUEST_INFO *io_info)
877 {
878 	u_int32_t device_id = io_info->ldTgtId;
879 	LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id];
880 	u_int32_t *track_stream = &current_ld_SD->mruBitMap;
881 	u_int32_t streamNum, shiftedValues, unshiftedValues;
882 	u_int32_t indexValueMask, shiftedValuesMask;
883 	int i;
884 	boolean_t isReadAhead = false;
885 	STREAM_DETECT *current_SD;
886 
887 	/* find possible stream */
888 	for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
889 		streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) &
890 				STREAM_MASK;
891 		current_SD = &current_ld_SD->streamTrack[streamNum];
892 		/*
893 		 * if we found a stream, update the raid context and
894 		 * also update the mruBitMap
895 		 */
896 		if (current_SD->nextSeqLBA &&
897 		    io_info->ldStartBlock >= current_SD->nextSeqLBA &&
898 		    (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) &&
899 		    (current_SD->isRead == io_info->isRead)) {
900 			if (io_info->ldStartBlock != current_SD->nextSeqLBA &&
901 			    (!io_info->isRead || !isReadAhead)) {
902 				/*
903 				 * Once the API availible we need to change this.
904 				 * At this point we are not allowing any gap
905 				 */
906 				continue;
907 			}
908 			cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE;
909 			current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
910 			/*
911 			 * update the mruBitMap LRU
912 			 */
913 			shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ;
914 			shiftedValues = ((*track_stream & shiftedValuesMask) <<
915 			    BITS_PER_INDEX_STREAM);
916 			indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM;
917 			unshiftedValues = (*track_stream) &
918 			    (~(shiftedValuesMask | indexValueMask));
919 			*track_stream =
920 			    (unshiftedValues | shiftedValues | streamNum);
921 			return;
922 		}
923 	}
924 	/*
925 	 * if we did not find any stream, create a new one from the least recently used
926 	 */
927 	streamNum = (*track_stream >>
928 	    ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK;
929 	current_SD = &current_ld_SD->streamTrack[streamNum];
930 	current_SD->isRead = io_info->isRead;
931 	current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
932 	*track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum);
933 	return;
934 }
935 
936 /*
937  * mrsas_setup_io:	Set up data including Fast Path I/O
938  * input:			Adapter instance soft state
939  * 					Pointer to command packet
940  * 					Pointer to CCB
941  *
942  * This function builds the DCDB inquiry command.  It returns 0 if the command
943  * is built successfully, otherwise it returns a 1.
944  */
945 int
946 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
947     union ccb *ccb, u_int32_t device_id,
948     MRSAS_RAID_SCSI_IO_REQUEST * io_request)
949 {
950 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
951 	struct ccb_scsiio *csio = &(ccb->csio);
952 	struct IO_REQUEST_INFO io_info;
953 	MR_DRV_RAID_MAP_ALL *map_ptr;
954 	struct mrsas_mpt_cmd *r1_cmd = NULL;
955 
956 	MR_LD_RAID *raid;
957 	u_int8_t fp_possible;
958 	u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
959 	u_int32_t datalength = 0;
960 
961 	io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
962 
963 	start_lba_lo = 0;
964 	start_lba_hi = 0;
965 	fp_possible = 0;
966 
967 	/*
968 	 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
969 	 */
970 	if (csio->cdb_len == 6) {
971 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
972 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
973 		    ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
974 		    (u_int32_t)csio->cdb_io.cdb_bytes[3];
975 		start_lba_lo &= 0x1FFFFF;
976 	}
977 	/*
978 	 * READ_10 (0x28) or WRITE_6 (0x2A) cdb
979 	 */
980 	else if (csio->cdb_len == 10) {
981 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
982 		    ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
983 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
984 		    ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
985 		    (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
986 		    ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
987 	}
988 	/*
989 	 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
990 	 */
991 	else if (csio->cdb_len == 12) {
992 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
993 		    ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
994 		    ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
995 		    ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
996 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
997 		    ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
998 		    (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
999 		    ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1000 	}
1001 	/*
1002 	 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
1003 	 */
1004 	else if (csio->cdb_len == 16) {
1005 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
1006 		    ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
1007 		    ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
1008 		    ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
1009 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
1010 		    ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
1011 		    (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
1012 		    ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
1013 		start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
1014 		    ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
1015 		    (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
1016 		    ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1017 	}
1018 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1019 	io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
1020 	io_info.numBlocks = datalength;
1021 	io_info.ldTgtId = device_id;
1022 	io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1023 
1024 	io_request->DataLength = htole32(cmd->length);
1025 
1026 	switch (ccb_h->flags & CAM_DIR_MASK) {
1027 	case CAM_DIR_IN:
1028 		io_info.isRead = 1;
1029 		break;
1030 	case CAM_DIR_OUT:
1031 		io_info.isRead = 0;
1032 		break;
1033 	case CAM_DIR_NONE:
1034 	default:
1035 		mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
1036 		break;
1037 	}
1038 
1039 	map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1040 	ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr);
1041 
1042 	ld = MR_TargetIdToLdGet(device_id, map_ptr);
1043 	if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
1044 		io_request->RaidContext.raid_context.regLockFlags = 0;
1045 		fp_possible = 0;
1046 	} else {
1047 		if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr))
1048 			fp_possible = io_info.fpOkForIo;
1049 	}
1050 
1051 	raid = MR_LdRaidGet(ld, map_ptr);
1052 	/* Store the TM capability value in cmd */
1053 	cmd->tmCapable = raid->capability.tmCapable;
1054 
1055 	cmd->request_desc->SCSIIO.MSIxIndex =
1056 	    sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1057 
1058 	if (sc->is_ventura || sc->is_aero) {
1059 		if (sc->streamDetectByLD) {
1060 			mtx_lock(&sc->stream_lock);
1061 			mrsas_stream_detect(sc, cmd, &io_info);
1062 			mtx_unlock(&sc->stream_lock);
1063 			/* In ventura if stream detected for a read and
1064 			 * it is read ahead capable make this IO as LDIO */
1065 			if (io_request->RaidContext.raid_context_g35.streamDetected &&
1066 					io_info.isRead && io_info.raCapable)
1067 				fp_possible = FALSE;
1068 		}
1069 
1070 		/* Set raid 1/10 fast path write capable bit in io_info.
1071 		 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible
1072 		 * disabled after this point. Try not to add more check for
1073 		 * fp_possible toggle after this.
1074 		 */
1075 		if (fp_possible &&
1076 				(io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) &&
1077 				(raid->level == 1) && !io_info.isRead) {
1078 			r1_cmd = mrsas_get_mpt_cmd(sc);
1079 			if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) {
1080 				fp_possible = FALSE;
1081 				mrsas_atomic_dec(&sc->fw_outstanding);
1082 			} else {
1083 				r1_cmd = mrsas_get_mpt_cmd(sc);
1084 				if (!r1_cmd) {
1085 					fp_possible = FALSE;
1086 					mrsas_atomic_dec(&sc->fw_outstanding);
1087 				}
1088 				else {
1089 					cmd->peer_cmd = r1_cmd;
1090 					r1_cmd->peer_cmd = cmd;
1091 				}
1092  			}
1093 		}
1094 	}
1095 
1096 	if (fp_possible) {
1097 		mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
1098 		    start_lba_lo, ld_block_size);
1099 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1100 		cmd->request_desc->SCSIIO.RequestFlags =
1101 		    (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1102 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1103 		if (sc->mrsas_gen3_ctrl) {
1104 			if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1105 				cmd->request_desc->SCSIIO.RequestFlags =
1106 				    (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1107 				    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1108 			io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1109 			io_request->RaidContext.raid_context.nseg = 0x1;
1110 			io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1111 			io_request->RaidContext.raid_context.regLockFlags |=
1112 			    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1113 			    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1114 		} else if (sc->is_ventura || sc->is_aero) {
1115 			io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1116 			io_request->RaidContext.raid_context_g35.nseg = 0x1;
1117 			io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1118 			io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1119 			if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) {
1120 					io_request->RaidContext.raid_context_g35.RAIDFlags =
1121 					(MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
1122 					<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1123 			}
1124 		}
1125 		if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
1126 		    (io_info.isRead)) {
1127 			io_info.devHandle =
1128 			    mrsas_get_updated_dev_handle(sc,
1129 			    &sc->load_balance_info[device_id], &io_info);
1130 			cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
1131 			cmd->pd_r1_lb = io_info.pd_after_lb;
1132 			if (sc->is_ventura || sc->is_aero)
1133 				io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm;
1134 			else
1135 				io_request->RaidContext.raid_context.spanArm = io_info.span_arm;
1136 		} else
1137 			cmd->load_balance = 0;
1138 
1139 		if (sc->is_ventura || sc->is_aero)
1140 				cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
1141 		else
1142 				cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1143 
1144 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1145 		io_request->DevHandle = io_info.devHandle;
1146 		cmd->pdInterface = io_info.pdInterface;
1147 	} else {
1148 		/* Not FP IO */
1149 		io_request->RaidContext.raid_context.timeoutValue = htole16(map_ptr->raidMap.fpPdIoTimeoutSec);
1150 		cmd->request_desc->SCSIIO.RequestFlags =
1151 		    (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
1152 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1153 		if (sc->mrsas_gen3_ctrl) {
1154 			if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1155 				cmd->request_desc->SCSIIO.RequestFlags =
1156 				    (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1157 				    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1158 			io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1159 			io_request->RaidContext.raid_context.regLockFlags |=
1160 			    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1161 			    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1162 			io_request->RaidContext.raid_context.nseg = 0x1;
1163 		} else if (sc->is_ventura || sc->is_aero) {
1164 			io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1165 			io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1166 			io_request->RaidContext.raid_context_g35.nseg = 0x1;
1167 		}
1168 		io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1169 		io_request->DevHandle = htole16(device_id);
1170 	}
1171 	return (0);
1172 }
1173 
1174 /*
1175  * mrsas_build_ldio_nonrw:	Builds an LDIO command
1176  * input:				Adapter instance soft state
1177  * 						Pointer to command packet
1178  * 						Pointer to CCB
1179  *
1180  * This function builds the LDIO command packet.  It returns 0 if the command is
1181  * built successfully, otherwise it returns a 1.
1182  */
1183 int
1184 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1185     union ccb *ccb)
1186 {
1187 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1188 	u_int32_t device_id, ld;
1189 	MR_DRV_RAID_MAP_ALL *map_ptr;
1190 	MR_LD_RAID *raid;
1191 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1192 
1193 	io_request = cmd->io_request;
1194 	device_id = ccb_h->target_id;
1195 
1196 	map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1197 	ld = MR_TargetIdToLdGet(device_id, map_ptr);
1198 	raid = MR_LdRaidGet(ld, map_ptr);
1199 	/* Store the TM capability value in cmd */
1200 	cmd->tmCapable = raid->capability.tmCapable;
1201 
1202 	/* FW path for LD Non-RW (SCSI management commands) */
1203 	io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1204 	io_request->DevHandle = device_id;
1205 	cmd->request_desc->SCSIIO.RequestFlags =
1206 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1207 	    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1208 
1209 	io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1210 	io_request->LUN[1] = ccb_h->target_lun & 0xF;
1211 	io_request->DataLength = cmd->length;
1212 
1213 	if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1214 		if (sc->is_ventura || sc->is_aero)
1215 			io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1216 		else {
1217 			/*
1218 			 * numSGE store lower 8 bit of sge_count. numSGEExt store
1219 			 * higher 8 bit of sge_count
1220 			 */
1221 			io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1222 			io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1223 		}
1224 	} else {
1225 		device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1226 		return (1);
1227 	}
1228 	return (0);
1229 }
1230 
1231 /*
1232  * mrsas_build_syspdio:	Builds an DCDB command
1233  * input:				Adapter instance soft state
1234  * 						Pointer to command packet
1235  * 						Pointer to CCB
1236  *
1237  * This function builds the DCDB inquiry command.  It returns 0 if the command
1238  * is built successfully, otherwise it returns a 1.
1239  */
1240 int
1241 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1242     union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
1243 {
1244 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1245 	u_int32_t device_id;
1246 	MR_DRV_RAID_MAP_ALL *local_map_ptr;
1247 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1248 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1249 
1250 	io_request = cmd->io_request;
1251 	device_id = ccb_h->target_id;
1252 	local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1253 	io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1254 	    << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1255 	io_request->RaidContext.raid_context.regLockFlags = 0;
1256 	io_request->RaidContext.raid_context.regLockRowLBA = 0;
1257 	io_request->RaidContext.raid_context.regLockLength = 0;
1258 
1259 	cmd->pdInterface = sc->target_list[device_id].interface_type;
1260 
1261 	/* If FW supports PD sequence number */
1262 	if (sc->use_seqnum_jbod_fp &&
1263 	    sc->pd_list[device_id].driveType == 0x00) {
1264 		//printf("Using Drv seq num\n");
1265 		pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
1266 		cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
1267 		/* More than 256 PD/JBOD support for Ventura */
1268 		if (sc->support_morethan256jbod)
1269 			io_request->RaidContext.raid_context.VirtualDiskTgtId =
1270 				pd_sync->seq[device_id].pdTargetId;
1271 		else
1272 			io_request->RaidContext.raid_context.VirtualDiskTgtId =
1273 				htole16(device_id + 255);
1274 		io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum;
1275 		io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1276 		if (sc->is_ventura || sc->is_aero)
1277 			io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1278 		else
1279 			io_request->RaidContext.raid_context.regLockFlags |=
1280 			    (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1281 		/* raid_context.Type = MPI2_TYPE_CUDA is valid only,
1282 		 * if FW support Jbod Sequence number
1283 		 */
1284 		io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1285 		io_request->RaidContext.raid_context.nseg = 0x1;
1286 	} else if (sc->fast_path_io) {
1287 		//printf("Using LD RAID map\n");
1288 		io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1289 		io_request->RaidContext.raid_context.configSeqNum = 0;
1290 		local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1291 		io_request->DevHandle =
1292 		    local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1293 	} else {
1294 		//printf("Using FW PATH\n");
1295 		/* Want to send all IO via FW path */
1296 		io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1297 		io_request->RaidContext.raid_context.configSeqNum = 0;
1298 		io_request->DevHandle = MR_DEVHANDLE_INVALID;
1299 	}
1300 
1301 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1302 	cmd->request_desc->SCSIIO.MSIxIndex =
1303 	    sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1304 
1305 	if (!fp_possible) {
1306 		/* system pd firmware path */
1307 		io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1308 		cmd->request_desc->SCSIIO.RequestFlags =
1309 		    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1310 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1311 		io_request->RaidContext.raid_context.timeoutValue =
1312 		    htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1313 		io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1314 	} else {
1315 		/* system pd fast path */
1316 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1317 		io_request->RaidContext.raid_context.timeoutValue = htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1318 
1319 		/*
1320 		 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1321 		 * Because the NON RW cmds will now go via FW Queue
1322 		 * and not the Exception queue
1323 		 */
1324 		if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1325 			io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1326 
1327 		cmd->request_desc->SCSIIO.RequestFlags =
1328 		    (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1329 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1330 	}
1331 
1332 	io_request->LUN[1] = ccb_h->target_lun & 0xF;
1333 	io_request->DataLength = htole32(cmd->length);
1334 
1335 	if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1336 		if (sc->is_ventura || sc->is_aero)
1337 			io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1338 		else {
1339 			/*
1340 			 * numSGE store lower 8 bit of sge_count. numSGEExt store
1341 			 * higher 8 bit of sge_count
1342 			 */
1343 			io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1344 			io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1345 		}
1346 	} else {
1347 		device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1348 		return (1);
1349 	}
1350 	return (0);
1351 }
1352 
1353 /*
1354  * mrsas_is_prp_possible:	This function will tell whether PRPs should be built or not
1355  * sc:						Adapter instance soft state
1356  * cmd:						MPT command frame pointer
1357  * nsesg:					Number of OS SGEs
1358  *
1359  * This function will check whether IO is qualified to build PRPs
1360  * return:				true: if PRP should be built
1361  *						false: if IEEE SGLs should be built
1362  */
1363 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
1364 	bus_dma_segment_t *segs, int nsegs)
1365 {
1366 	struct mrsas_softc *sc = cmd->sc;
1367 	int i;
1368 	u_int32_t data_length = 0;
1369 	bool build_prp = false;
1370 	u_int32_t mr_nvme_pg_size;
1371 
1372 	mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE);
1373 	data_length = cmd->length;
1374 
1375 	if (data_length > (mr_nvme_pg_size * 5))
1376 		build_prp = true;
1377 	else if ((data_length > (mr_nvme_pg_size * 4)) &&
1378 		(data_length <= (mr_nvme_pg_size * 5)))  {
1379 		/* check if 1st SG entry size is < residual beyond 4 pages */
1380 		if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4)))
1381 			build_prp = true;
1382 	}
1383 
1384 	/*check for SGE holes here*/
1385 	for (i = 0; i < nsegs; i++) {
1386 		/* check for mid SGEs */
1387 		if ((i != 0) && (i != (nsegs - 1))) {
1388 				if ((segs[i].ds_addr % mr_nvme_pg_size) ||
1389 					(segs[i].ds_len % mr_nvme_pg_size)) {
1390 					build_prp = false;
1391 					mrsas_atomic_inc(&sc->sge_holes);
1392 					break;
1393 				}
1394 		}
1395 
1396 		/* check for first SGE*/
1397 		if ((nsegs > 1) && (i == 0)) {
1398 				if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) {
1399 					build_prp = false;
1400 					mrsas_atomic_inc(&sc->sge_holes);
1401 					break;
1402 				}
1403 		}
1404 
1405 		/* check for Last SGE*/
1406 		if ((nsegs > 1) && (i == (nsegs - 1))) {
1407 				if (segs[i].ds_addr % mr_nvme_pg_size) {
1408 					build_prp = false;
1409 					mrsas_atomic_inc(&sc->sge_holes);
1410 					break;
1411 				}
1412 		}
1413 	}
1414 
1415 	return build_prp;
1416 }
1417 
1418 /*
1419  * mrsas_map_request:	Map and load data
1420  * input:				Adapter instance soft state
1421  * 						Pointer to command packet
1422  *
1423  * For data from OS, map and load the data buffer into bus space.  The SG list
1424  * is built in the callback.  If the  bus dmamap load is not successful,
1425  * cmd->error_code will contain the  error code and a 1 is returned.
1426  */
1427 int
1428 mrsas_map_request(struct mrsas_softc *sc,
1429     struct mrsas_mpt_cmd *cmd, union ccb *ccb)
1430 {
1431 	u_int32_t retcode = 0;
1432 	struct cam_sim *sim;
1433 
1434 	sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
1435 
1436 	if (cmd->data != NULL) {
1437 		/* Map data buffer into bus space */
1438 		mtx_lock(&sc->io_lock);
1439 #if (__FreeBSD_version >= 902001)
1440 		retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb,
1441 		    mrsas_data_load_cb, cmd, 0);
1442 #else
1443 		retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
1444 		    cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT);
1445 #endif
1446 		mtx_unlock(&sc->io_lock);
1447 		if (retcode)
1448 			device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
1449 		if (retcode == EINPROGRESS) {
1450 			device_printf(sc->mrsas_dev, "request load in progress\n");
1451 			mrsas_freeze_simq(cmd, sim);
1452 		}
1453 	}
1454 	if (cmd->error_code)
1455 		return (1);
1456 	return (retcode);
1457 }
1458 
1459 /*
1460  * mrsas_unmap_request:	Unmap and unload data
1461  * input:				Adapter instance soft state
1462  * 						Pointer to command packet
1463  *
1464  * This function unmaps and unloads data from OS.
1465  */
1466 void
1467 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1468 {
1469 	if (cmd->data != NULL) {
1470 		if (cmd->flags & MRSAS_DIR_IN)
1471 			bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
1472 		if (cmd->flags & MRSAS_DIR_OUT)
1473 			bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1474 		mtx_lock(&sc->io_lock);
1475 		bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1476 		mtx_unlock(&sc->io_lock);
1477 	}
1478 }
1479 
1480 /**
1481  * mrsas_build_ieee_sgl -	Prepare IEEE SGLs
1482  * @sc:						Adapter soft state
1483  * @segs:					OS SGEs pointers
1484  * @nseg:					Number of OS SGEs
1485  * @cmd:					Fusion command frame
1486  * return:					void
1487  */
1488 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1489 {
1490 	struct mrsas_softc *sc = cmd->sc;
1491 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1492 	pMpi25IeeeSgeChain64_t sgl_ptr;
1493 	int i = 0, sg_processed = 0;
1494 
1495 	io_request = cmd->io_request;
1496 	sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1497 
1498 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1499 		pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1500 
1501 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1502 		sgl_ptr_end->Flags = 0;
1503 	}
1504 	if (nseg != 0) {
1505 		for (i = 0; i < nseg; i++) {
1506 			sgl_ptr->Address = htole64(segs[i].ds_addr);
1507 			sgl_ptr->Length = htole32(segs[i].ds_len);
1508 			sgl_ptr->Flags = 0;
1509 			if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1510 				if (i == nseg - 1)
1511 					sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1512 			}
1513 			sgl_ptr++;
1514 			sg_processed = i + 1;
1515 			if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1516 				(nseg > sc->max_sge_in_main_msg)) {
1517 				pMpi25IeeeSgeChain64_t sg_chain;
1518 
1519 				if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1520 					if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1521 						!= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1522 						cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1523 					else
1524 						cmd->io_request->ChainOffset = 0;
1525 				} else
1526 					cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1527 				sg_chain = sgl_ptr;
1528 				if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1529 					sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1530 				else
1531 					sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1532 				sg_chain->Length = htole32((sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)));
1533 				sg_chain->Address = htole64(cmd->chain_frame_phys_addr);
1534 				sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1535 			}
1536 		}
1537 	}
1538 }
1539 
1540 /**
1541  * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1542  * @sc:						Adapter soft state
1543  * @segs:					OS SGEs pointers
1544  * @nseg:					Number of OS SGEs
1545  * @cmd:					Fusion command frame
1546  * return:					void
1547  */
1548 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1549 {
1550 	struct mrsas_softc *sc = cmd->sc;
1551 	int sge_len, offset, num_prp_in_chain = 0;
1552 	pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr;
1553 	u_int64_t *ptr_sgl;
1554 	bus_addr_t ptr_sgl_phys;
1555 	u_int64_t sge_addr;
1556 	u_int32_t page_mask, page_mask_result, i = 0;
1557 	u_int32_t first_prp_len;
1558 	int data_len = cmd->length;
1559 	u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size,
1560 					MR_DEFAULT_NVME_PAGE_SIZE);
1561 
1562 	sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL;
1563 	/*
1564 	 * NVMe has a very convoluted PRP format.  One PRP is required
1565 	 * for each page or partial page.  We need to split up OS SG
1566 	 * entries if they are longer than one page or cross a page
1567 	 * boundary.  We also have to insert a PRP list pointer entry as
1568 	 * the last entry in each physical page of the PRP list.
1569 	 *
1570 	 * NOTE: The first PRP "entry" is actually placed in the first
1571 	 * SGL entry in the main message in IEEE 64 format.  The 2nd
1572 	 * entry in the main message is the chain element, and the rest
1573 	 * of the PRP entries are built in the contiguous PCIe buffer.
1574 	 */
1575 	page_mask = mr_nvme_pg_size - 1;
1576 	ptr_sgl = (u_int64_t *) cmd->chain_frame;
1577 	ptr_sgl_phys = cmd->chain_frame_phys_addr;
1578 	memset(ptr_sgl, 0, sc->max_chain_frame_sz);
1579 
1580 	/* Build chain frame element which holds all PRPs except first*/
1581 	main_chain_element = (pMpi25IeeeSgeChain64_t)
1582 	    ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64));
1583 
1584 	main_chain_element->Address = cmd->chain_frame_phys_addr;
1585 	main_chain_element->NextChainOffset = 0;
1586 	main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1587 					IEEE_SGE_FLAGS_SYSTEM_ADDR |
1588 					MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1589 
1590 	/* Build first PRP, SGE need not to be PAGE aligned*/
1591 	ptr_first_sgl = sgl_ptr;
1592 	sge_addr = segs[i].ds_addr;
1593 	sge_len = segs[i].ds_len;
1594 	i++;
1595 
1596 	offset = (u_int32_t) (sge_addr & page_mask);
1597 	first_prp_len = mr_nvme_pg_size - offset;
1598 
1599 	ptr_first_sgl->Address = sge_addr;
1600 	ptr_first_sgl->Length = first_prp_len;
1601 
1602 	data_len -= first_prp_len;
1603 
1604 	if (sge_len > first_prp_len) {
1605 		sge_addr += first_prp_len;
1606 		sge_len -= first_prp_len;
1607 	} else if (sge_len == first_prp_len) {
1608 		sge_addr = segs[i].ds_addr;
1609 		sge_len = segs[i].ds_len;
1610 		i++;
1611 	}
1612 
1613 	for (;;) {
1614 		offset = (u_int32_t) (sge_addr & page_mask);
1615 
1616 		/* Put PRP pointer due to page boundary*/
1617 		page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1618 		if (!page_mask_result) {
1619 			device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary"
1620 					" ptr_sgl: 0x%p\n", ptr_sgl);
1621 			ptr_sgl_phys++;
1622 			*ptr_sgl = (uintptr_t)ptr_sgl_phys;
1623 			ptr_sgl++;
1624 			num_prp_in_chain++;
1625 		}
1626 
1627 		*ptr_sgl = sge_addr;
1628 		ptr_sgl++;
1629 		ptr_sgl_phys++;
1630 		num_prp_in_chain++;
1631 
1632 		sge_addr += mr_nvme_pg_size;
1633 		sge_len -= mr_nvme_pg_size;
1634 		data_len -= mr_nvme_pg_size;
1635 
1636 		if (data_len <= 0)
1637 			break;
1638 
1639 		if (sge_len > 0)
1640 			continue;
1641 
1642 		sge_addr = segs[i].ds_addr;
1643 		sge_len = segs[i].ds_len;
1644 		i++;
1645 	}
1646 
1647 	main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t);
1648 	mrsas_atomic_inc(&sc->prp_count);
1649 
1650 }
1651 
1652 /*
1653  * mrsas_data_load_cb:	Callback entry point to build SGLs
1654  * input:				Pointer to command packet as argument
1655  *						Pointer to segment
1656  *						Number of segments Error
1657  *
1658  * This is the callback function of the bus dma map load.  It builds SG list
1659  */
1660 static void
1661 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1662 {
1663 	struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1664 	struct mrsas_softc *sc = cmd->sc;
1665 	boolean_t build_prp = false;
1666 
1667 	if (error) {
1668 		cmd->error_code = error;
1669 		device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error);
1670 		if (error == EFBIG) {
1671 			cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1672 			return;
1673 		}
1674 	}
1675 	if (cmd->flags & MRSAS_DIR_IN)
1676 		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1677 		    BUS_DMASYNC_PREREAD);
1678 	if (cmd->flags & MRSAS_DIR_OUT)
1679 		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1680 		    BUS_DMASYNC_PREWRITE);
1681 
1682 	/* Check for whether PRPs should be built or IEEE SGLs*/
1683 	if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1684 			(cmd->pdInterface == NVME_PD))
1685 		build_prp = mrsas_is_prp_possible(cmd, segs, nseg);
1686 
1687 	if (build_prp == true)
1688 		mrsas_build_prp_nvme(cmd, segs, nseg);
1689 	else
1690 		mrsas_build_ieee_sgl(cmd, segs, nseg);
1691 
1692 	cmd->sge_count = nseg;
1693 }
1694 
1695 /*
1696  * mrsas_freeze_simq:	Freeze SIM queue
1697  * input:				Pointer to command packet
1698  * 						Pointer to SIM
1699  *
1700  * This function freezes the sim queue.
1701  */
1702 static void
1703 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
1704 {
1705 	union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
1706 
1707 	xpt_freeze_simq(sim, 1);
1708 	ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1709 	ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1710 }
1711 
1712 void
1713 mrsas_xpt_freeze(struct mrsas_softc *sc)
1714 {
1715 	xpt_freeze_simq(sc->sim_0, 1);
1716 	xpt_freeze_simq(sc->sim_1, 1);
1717 }
1718 
1719 void
1720 mrsas_xpt_release(struct mrsas_softc *sc)
1721 {
1722 	xpt_release_simq(sc->sim_0, 1);
1723 	xpt_release_simq(sc->sim_1, 1);
1724 }
1725 
1726 /*
1727  * mrsas_cmd_done:	Perform remaining command completion
1728  * input:			Adapter instance soft state  Pointer to command packet
1729  *
1730  * This function calls ummap request and releases the MPT command.
1731  */
1732 void
1733 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1734 {
1735 	mrsas_unmap_request(sc, cmd);
1736 
1737 	mtx_lock(&sc->sim_lock);
1738 	if (cmd->callout_owner) {
1739 		callout_stop(&cmd->cm_callout);
1740 		cmd->callout_owner  = false;
1741 	}
1742 	xpt_done(cmd->ccb_ptr);
1743 	cmd->ccb_ptr = NULL;
1744 	mtx_unlock(&sc->sim_lock);
1745 	mrsas_release_mpt_cmd(cmd);
1746 }
1747 
1748 /*
1749  * mrsas_cam_poll:	Polling entry point
1750  * input:			Pointer to SIM
1751  *
1752  * This is currently a stub function.
1753  */
1754 static void
1755 mrsas_cam_poll(struct cam_sim *sim)
1756 {
1757 	int i;
1758 	struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
1759 
1760 	if (sc->msix_vectors != 0){
1761 		for (i=0; i<sc->msix_vectors; i++){
1762 			mrsas_complete_cmd(sc, i);
1763 		}
1764 	} else {
1765 		mrsas_complete_cmd(sc, 0);
1766 	}
1767 }
1768 
1769 /*
1770  * mrsas_bus_scan:	Perform bus scan
1771  * input:			Adapter instance soft state
1772  *
1773  * This mrsas_bus_scan function is needed for FreeBSD 7.x.  Also, it should not
1774  * be called in FreeBSD 8.x and later versions, where the bus scan is
1775  * automatic.
1776  */
1777 int
1778 mrsas_bus_scan(struct mrsas_softc *sc)
1779 {
1780 	union ccb *ccb_0;
1781 	union ccb *ccb_1;
1782 
1783 	if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
1784 		return (ENOMEM);
1785 	}
1786 	if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
1787 		xpt_free_ccb(ccb_0);
1788 		return (ENOMEM);
1789 	}
1790 	mtx_lock(&sc->sim_lock);
1791 	if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
1792 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1793 		xpt_free_ccb(ccb_0);
1794 		xpt_free_ccb(ccb_1);
1795 		mtx_unlock(&sc->sim_lock);
1796 		return (EIO);
1797 	}
1798 	if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
1799 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1800 		xpt_free_ccb(ccb_0);
1801 		xpt_free_ccb(ccb_1);
1802 		mtx_unlock(&sc->sim_lock);
1803 		return (EIO);
1804 	}
1805 	mtx_unlock(&sc->sim_lock);
1806 	xpt_rescan(ccb_0);
1807 	xpt_rescan(ccb_1);
1808 
1809 	return (0);
1810 }
1811 
1812 /*
1813  * mrsas_bus_scan_sim:	Perform bus scan per SIM
1814  * input:				adapter instance soft state
1815  *
1816  * This function will be called from Event handler on LD creation/deletion,
1817  * JBOD on/off.
1818  */
1819 int
1820 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
1821 {
1822 	union ccb *ccb;
1823 
1824 	if ((ccb = xpt_alloc_ccb()) == NULL) {
1825 		return (ENOMEM);
1826 	}
1827 	mtx_lock(&sc->sim_lock);
1828 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
1829 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1830 		xpt_free_ccb(ccb);
1831 		mtx_unlock(&sc->sim_lock);
1832 		return (EIO);
1833 	}
1834 	mtx_unlock(&sc->sim_lock);
1835 	xpt_rescan(ccb);
1836 
1837 	return (0);
1838 }
1839 
1840 /*
1841  * mrsas_track_scsiio:  Track IOs for a given target in the mpt_cmd_list
1842  * input:           Adapter instance soft state
1843  *                  Target ID of target
1844  *                  Bus ID of the target
1845  *
1846  * This function checks for any pending IO in the whole mpt_cmd_list pool
1847  * with the bus_id and target_id passed in arguments. If some IO is found
1848  * that means target reset is not successfully completed.
1849  *
1850  * Returns FAIL if IOs pending to the target device, else return SUCCESS
1851  */
1852 static int
1853 mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id)
1854 {
1855 	int i;
1856 	struct mrsas_mpt_cmd *mpt_cmd = NULL;
1857 
1858 	for (i = 0 ; i < sc->max_fw_cmds; i++) {
1859 		mpt_cmd = sc->mpt_cmd_list[i];
1860 
1861 		/*
1862 		 * Check if the target_id and bus_id is same as the timeout IO
1863 		 */
1864 		if (mpt_cmd->ccb_ptr) {
1865 			/* bus_id = 1 denotes a VD */
1866 			if (bus_id == 1)
1867 				tgt_id =
1868 				    (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1));
1869 
1870 			if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id &&
1871 			    mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) {
1872 				device_printf(sc->mrsas_dev,
1873 				    "IO commands pending to target id %d\n", tgt_id);
1874 				return FAIL;
1875 			}
1876 		}
1877 	}
1878 
1879 	return SUCCESS;
1880 }
1881 
1882 #if TM_DEBUG
1883 /*
1884  * mrsas_tm_response_code: Prints TM response code received from FW
1885  * input:           Adapter instance soft state
1886  *                  MPI reply returned from firmware
1887  *
1888  * Returns nothing.
1889  */
1890 static void
1891 mrsas_tm_response_code(struct mrsas_softc *sc,
1892 	MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
1893 {
1894 	char *desc;
1895 
1896 	switch (mpi_reply->ResponseCode) {
1897 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1898 		desc = "task management request completed";
1899 		break;
1900 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1901 		desc = "invalid frame";
1902 		break;
1903 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1904 		desc = "task management request not supported";
1905 		break;
1906 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1907 		desc = "task management request failed";
1908 		break;
1909 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1910 		desc = "task management request succeeded";
1911 		break;
1912 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1913 		desc = "invalid lun";
1914 		break;
1915 	case 0xA:
1916 		desc = "overlapped tag attempted";
1917 		break;
1918 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1919 		desc = "task queued, however not sent to target";
1920 		break;
1921 	default:
1922 		desc = "unknown";
1923 		break;
1924 	}
1925 	device_printf(sc->mrsas_dev, "response_code(%01x): %s\n",
1926 	    mpi_reply->ResponseCode, desc);
1927 	device_printf(sc->mrsas_dev,
1928 	    "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n"
1929 	    "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
1930 	    mpi_reply->TerminationCount, mpi_reply->DevHandle,
1931 	    mpi_reply->Function, mpi_reply->TaskType,
1932 	    mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
1933 }
1934 #endif
1935 
1936 /*
1937  * mrsas_issue_tm:  Fires the TM command to FW and waits for completion
1938  * input:           Adapter instance soft state
1939  *                  reqest descriptor compiled by mrsas_reset_targets
1940  *
1941  * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS.
1942  */
1943 static int
1944 mrsas_issue_tm(struct mrsas_softc *sc,
1945 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc)
1946 {
1947 	int sleep_stat;
1948 
1949 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
1950 	sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz);
1951 
1952 	if (sleep_stat == EWOULDBLOCK) {
1953 		device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n");
1954 		return FAIL;
1955 	}
1956 
1957 	return SUCCESS;
1958 }
1959 
1960 /*
1961  * mrsas_reset_targets : Gathers info to fire a target reset command
1962  * input:           Adapter instance soft state
1963  *
1964  * This function compiles data for a target reset command to be fired to the FW
1965  * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs.
1966  *
1967  * Returns SUCCESS or FAIL
1968  */
1969 int mrsas_reset_targets(struct mrsas_softc *sc)
1970 {
1971 	struct mrsas_mpt_cmd *tm_mpt_cmd = NULL;
1972 	struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL;
1973 	MR_TASK_MANAGE_REQUEST *mr_request;
1974 	MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request;
1975 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1976 	int retCode = FAIL, count, i, outstanding;
1977 	u_int32_t MSIxIndex, bus_id;
1978 	target_id_t tgt_id;
1979 #if TM_DEBUG
1980 	MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
1981 #endif
1982 
1983 	outstanding = mrsas_atomic_read(&sc->fw_outstanding);
1984 
1985 	if (!outstanding) {
1986 		device_printf(sc->mrsas_dev, "NO IOs pending...\n");
1987 		mrsas_atomic_set(&sc->target_reset_outstanding, 0);
1988 		retCode = SUCCESS;
1989 		goto return_status;
1990 	} else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) {
1991 		device_printf(sc->mrsas_dev, "Controller is not operational\n");
1992 		goto return_status;
1993 	} else {
1994 		/* Some more error checks will be added in future */
1995 	}
1996 
1997 	/* Get an mpt frame and an index to fire the TM cmd */
1998 	tm_mpt_cmd = mrsas_get_mpt_cmd(sc);
1999 	if (!tm_mpt_cmd) {
2000 		retCode = FAIL;
2001 		goto return_status;
2002 	}
2003 
2004 	req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1);
2005 	if (!req_desc) {
2006 		device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n");
2007 		retCode = FAIL;
2008 		goto release_mpt;
2009 	}
2010 	memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
2011 
2012 	req_desc->HighPriority.SMID = tm_mpt_cmd->index;
2013 	req_desc->HighPriority.RequestFlags =
2014 	    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
2015 	    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2016 	req_desc->HighPriority.MSIxIndex =  0;
2017 	req_desc->HighPriority.LMID = 0;
2018 	req_desc->HighPriority.Reserved1 = 0;
2019 	tm_mpt_cmd->request_desc = req_desc;
2020 
2021 	mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request;
2022 	memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST));
2023 
2024 	tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
2025 	tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2026 	tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2027 	tm_mpi_request->TaskMID = 0; /* smid task */
2028 	tm_mpi_request->LUN[1] = 0;
2029 
2030 	/* Traverse the tm_mpt pool to get valid entries */
2031 	for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) {
2032 		if(!sc->target_reset_pool[i]) {
2033 			continue;
2034 		} else {
2035 			tgt_mpt_cmd = sc->target_reset_pool[i];
2036 		}
2037 
2038 		tgt_id = i;
2039 
2040 		/* See if the target is tm capable or NOT */
2041 		if (!tgt_mpt_cmd->tmCapable) {
2042 			device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for "
2043 			    "CAM target:%d\n", tgt_id);
2044 
2045 			retCode = FAIL;
2046 			goto release_mpt;
2047 		}
2048 
2049 		tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle;
2050 
2051 		if (i < (MRSAS_MAX_PD - 1)) {
2052 			mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1;
2053 			bus_id = 0;
2054 		} else {
2055 			mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1;
2056 			bus_id = 1;
2057 		}
2058 
2059 		device_printf(sc->mrsas_dev, "TM will be fired for "
2060 		    "CAM target:%d and bus_id %d\n", tgt_id, bus_id);
2061 
2062 		sc->ocr_chan = (void *)&tm_mpt_cmd;
2063 		retCode = mrsas_issue_tm(sc, req_desc);
2064 		if (retCode == FAIL)
2065 			goto release_mpt;
2066 
2067 #if TM_DEBUG
2068 		mpi_reply =
2069 		    (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply;
2070 		mrsas_tm_response_code(sc, mpi_reply);
2071 #endif
2072 		mrsas_atomic_dec(&sc->target_reset_outstanding);
2073 		sc->target_reset_pool[i] = NULL;
2074 
2075 		/* Check for pending cmds in the mpt_cmd_pool with the tgt_id */
2076 		mrsas_disable_intr(sc);
2077 		/* Wait for 1 second to complete parallel ISR calling same
2078 		 * mrsas_complete_cmd()
2079 		 */
2080 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup",
2081 		   1 * hz);
2082 		count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2083 		mtx_unlock(&sc->sim_lock);
2084 		for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2085 		    mrsas_complete_cmd(sc, MSIxIndex);
2086 		mtx_lock(&sc->sim_lock);
2087 		retCode = mrsas_track_scsiio(sc, tgt_id, bus_id);
2088 		mrsas_enable_intr(sc);
2089 
2090 		if (retCode == FAIL)
2091 			goto release_mpt;
2092 	}
2093 
2094 	device_printf(sc->mrsas_dev, "Number of targets outstanding "
2095 	    "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding));
2096 
2097 release_mpt:
2098 	mrsas_release_mpt_cmd(tm_mpt_cmd);
2099 return_status:
2100 	device_printf(sc->mrsas_dev, "target reset %s!!\n",
2101 		(retCode == SUCCESS) ? "SUCCESS" : "FAIL");
2102 
2103 	return retCode;
2104 }
2105