xref: /freebsd/sys/dev/mrsas/mrsas_cam.c (revision ff0ba87247820afbdfdc1b307c803f7923d0e4d3)
1 /*
2  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
3  * Support: freebsdraid@lsi.com
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer. 2. Redistributions
11  * in binary form must reproduce the above copyright notice, this list of
12  * conditions and the following disclaimer in the documentation and/or other
13  * materials provided with the distribution. 3. Neither the name of the
14  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
15  * promote products derived from this software without specific prior written
16  * permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "dev/mrsas/mrsas.h"
36 
37 #include <cam/cam.h>
38 #include <cam/cam_ccb.h>
39 #include <cam/cam_sim.h>
40 #include <cam/cam_xpt_sim.h>
41 #include <cam/cam_debug.h>
42 #include <cam/cam_periph.h>
43 #include <cam/cam_xpt_periph.h>
44 
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_message.h>
47 #include <sys/taskqueue.h>
48 #include <sys/kernel.h>
49 
50 
51 #include <sys/time.h>			/* XXX for pcpu.h */
52 #include <sys/pcpu.h>			/* XXX for PCPU_GET */
53 
54 #define	smp_processor_id()  PCPU_GET(cpuid)
55 
56 /*
57  * Function prototypes
58  */
59 int	mrsas_cam_attach(struct mrsas_softc *sc);
60 int	mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb);
61 int	mrsas_bus_scan(struct mrsas_softc *sc);
62 int	mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
63 int	mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
64 int
65 mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
66     union ccb *ccb);
67 int
68 mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
69     union ccb *ccb, struct cam_sim *sim);
70 int
71 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
72     union ccb *ccb, u_int32_t device_id,
73     MRSAS_RAID_SCSI_IO_REQUEST * io_request);
74 void	mrsas_xpt_freeze(struct mrsas_softc *sc);
75 void	mrsas_xpt_release(struct mrsas_softc *sc);
76 void	mrsas_cam_detach(struct mrsas_softc *sc);
77 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
78 void	mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
79 void	mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
80 void
81 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
82     u_int32_t req_desc_hi);
83 void
84 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
85     u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
86     MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
87     u_int32_t ld_block_size);
88 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
89 static void mrsas_cam_poll(struct cam_sim *sim);
90 static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
91 static void mrsas_scsiio_timeout(void *data);
92 static void
93 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
94     int nseg, int error);
95 static int32_t
96 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
97     union ccb *ccb);
98 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
99 MRSAS_REQUEST_DESCRIPTOR_UNION *
100 	mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
101 
102 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
103 extern u_int32_t
104 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map,
105     struct mrsas_softc *sc);
106 extern void mrsas_isr(void *arg);
107 extern void mrsas_aen_handler(struct mrsas_softc *sc);
108 extern u_int8_t
109 MR_BuildRaidContext(struct mrsas_softc *sc,
110     struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
111     MR_DRV_RAID_MAP_ALL * map);
112 extern u_int16_t
113 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
114     MR_DRV_RAID_MAP_ALL * map);
115 extern u_int16_t
116 mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
117     struct IO_REQUEST_INFO *io_info);
118 extern u_int8_t
119 megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
120     u_int64_t block, u_int32_t count);
121 
122 
123 /*
124  * mrsas_cam_attach:	Main entry to CAM subsystem
125  * input:				Adapter instance soft state
126  *
127  * This function is called from mrsas_attach() during initialization to perform
128  * SIM allocations and XPT bus registration.  If the kernel version is 7.4 or
129  * earlier, it would also initiate a bus scan.
130  */
131 int
132 mrsas_cam_attach(struct mrsas_softc *sc)
133 {
134 	struct cam_devq *devq;
135 	int mrsas_cam_depth;
136 
137 	mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
138 
139 	if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
140 		device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
141 		return (ENOMEM);
142 	}
143 	/*
144 	 * Create SIM for bus 0 and register, also create path
145 	 */
146 	sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
147 	    device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
148 	    mrsas_cam_depth, devq);
149 	if (sc->sim_0 == NULL) {
150 		cam_simq_free(devq);
151 		device_printf(sc->mrsas_dev, "Cannot register SIM\n");
152 		return (ENXIO);
153 	}
154 	/* Initialize taskqueue for Event Handling */
155 	TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
156 	sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
157 	    taskqueue_thread_enqueue, &sc->ev_tq);
158 
159 	/* Run the task queue with lowest priority */
160 	taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
161 	    device_get_nameunit(sc->mrsas_dev));
162 	mtx_lock(&sc->sim_lock);
163 	if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
164 		cam_sim_free(sc->sim_0, TRUE);	/* passing true frees the devq */
165 		mtx_unlock(&sc->sim_lock);
166 		return (ENXIO);
167 	}
168 	if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
169 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
170 		xpt_bus_deregister(cam_sim_path(sc->sim_0));
171 		cam_sim_free(sc->sim_0, TRUE);	/* passing true will free the
172 						 * devq */
173 		mtx_unlock(&sc->sim_lock);
174 		return (ENXIO);
175 	}
176 	mtx_unlock(&sc->sim_lock);
177 
178 	/*
179 	 * Create SIM for bus 1 and register, also create path
180 	 */
181 	sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
182 	    device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
183 	    mrsas_cam_depth, devq);
184 	if (sc->sim_1 == NULL) {
185 		cam_simq_free(devq);
186 		device_printf(sc->mrsas_dev, "Cannot register SIM\n");
187 		return (ENXIO);
188 	}
189 	mtx_lock(&sc->sim_lock);
190 	if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
191 		cam_sim_free(sc->sim_1, TRUE);	/* passing true frees the devq */
192 		mtx_unlock(&sc->sim_lock);
193 		return (ENXIO);
194 	}
195 	if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
196 	    CAM_TARGET_WILDCARD,
197 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
198 		xpt_bus_deregister(cam_sim_path(sc->sim_1));
199 		cam_sim_free(sc->sim_1, TRUE);
200 		mtx_unlock(&sc->sim_lock);
201 		return (ENXIO);
202 	}
203 	mtx_unlock(&sc->sim_lock);
204 
205 #if (__FreeBSD_version <= 704000)
206 	if (mrsas_bus_scan(sc)) {
207 		device_printf(sc->mrsas_dev, "Error in bus scan.\n");
208 		return (1);
209 	}
210 #endif
211 	return (0);
212 }
213 
214 /*
215  * mrsas_cam_detach:	De-allocates and teardown CAM
216  * input:				Adapter instance soft state
217  *
218  * De-registers and frees the paths and SIMs.
219  */
220 void
221 mrsas_cam_detach(struct mrsas_softc *sc)
222 {
223 	if (sc->ev_tq != NULL)
224 		taskqueue_free(sc->ev_tq);
225 	mtx_lock(&sc->sim_lock);
226 	if (sc->path_0)
227 		xpt_free_path(sc->path_0);
228 	if (sc->sim_0) {
229 		xpt_bus_deregister(cam_sim_path(sc->sim_0));
230 		cam_sim_free(sc->sim_0, FALSE);
231 	}
232 	if (sc->path_1)
233 		xpt_free_path(sc->path_1);
234 	if (sc->sim_1) {
235 		xpt_bus_deregister(cam_sim_path(sc->sim_1));
236 		cam_sim_free(sc->sim_1, TRUE);
237 	}
238 	mtx_unlock(&sc->sim_lock);
239 }
240 
241 /*
242  * mrsas_action:	SIM callback entry point
243  * input:			pointer to SIM pointer to CAM Control Block
244  *
245  * This function processes CAM subsystem requests. The type of request is stored
246  * in ccb->ccb_h.func_code.  The preprocessor #ifdef is necessary because
247  * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
248  */
249 static void
250 mrsas_action(struct cam_sim *sim, union ccb *ccb)
251 {
252 	struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
253 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
254 	u_int32_t device_id;
255 
256 	switch (ccb->ccb_h.func_code) {
257 	case XPT_SCSI_IO:
258 		{
259 			device_id = ccb_h->target_id;
260 
261 			/*
262 			 * bus 0 is LD, bus 1 is for system-PD
263 			 */
264 			if (cam_sim_bus(sim) == 1 &&
265 			    sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
266 				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
267 				xpt_done(ccb);
268 			} else {
269 				if (mrsas_startio(sc, sim, ccb)) {
270 					ccb->ccb_h.status |= CAM_REQ_INVALID;
271 					xpt_done(ccb);
272 				}
273 			}
274 			break;
275 		}
276 	case XPT_ABORT:
277 		{
278 			ccb->ccb_h.status = CAM_UA_ABORT;
279 			xpt_done(ccb);
280 			break;
281 		}
282 	case XPT_RESET_BUS:
283 		{
284 			xpt_done(ccb);
285 			break;
286 		}
287 	case XPT_GET_TRAN_SETTINGS:
288 		{
289 			ccb->cts.protocol = PROTO_SCSI;
290 			ccb->cts.protocol_version = SCSI_REV_2;
291 			ccb->cts.transport = XPORT_SPI;
292 			ccb->cts.transport_version = 2;
293 			ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
294 			ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
295 			ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
296 			ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
297 			ccb->ccb_h.status = CAM_REQ_CMP;
298 			xpt_done(ccb);
299 			break;
300 		}
301 	case XPT_SET_TRAN_SETTINGS:
302 		{
303 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
304 			xpt_done(ccb);
305 			break;
306 		}
307 	case XPT_CALC_GEOMETRY:
308 		{
309 			cam_calc_geometry(&ccb->ccg, 1);
310 			xpt_done(ccb);
311 			break;
312 		}
313 	case XPT_PATH_INQ:
314 		{
315 			ccb->cpi.version_num = 1;
316 			ccb->cpi.hba_inquiry = 0;
317 			ccb->cpi.target_sprt = 0;
318 			ccb->cpi.hba_misc = 0;
319 			ccb->cpi.hba_eng_cnt = 0;
320 			ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
321 			ccb->cpi.unit_number = cam_sim_unit(sim);
322 			ccb->cpi.bus_id = cam_sim_bus(sim);
323 			ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
324 			ccb->cpi.base_transfer_speed = 150000;
325 			strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
326 			strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN);
327 			strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
328 			ccb->cpi.transport = XPORT_SPI;
329 			ccb->cpi.transport_version = 2;
330 			ccb->cpi.protocol = PROTO_SCSI;
331 			ccb->cpi.protocol_version = SCSI_REV_2;
332 			if (ccb->cpi.bus_id == 0)
333 				ccb->cpi.max_target = MRSAS_MAX_PD - 1;
334 			else
335 				ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
336 #if (__FreeBSD_version > 704000)
337 			ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
338 #endif
339 			ccb->ccb_h.status = CAM_REQ_CMP;
340 			xpt_done(ccb);
341 			break;
342 		}
343 	default:
344 		{
345 			ccb->ccb_h.status = CAM_REQ_INVALID;
346 			xpt_done(ccb);
347 			break;
348 		}
349 	}
350 }
351 
352 /*
353  * mrsas_scsiio_timeout:	Callback function for IO timed out
354  * input:					mpt command context
355  *
356  * This function will execute after timeout value provided by ccb header from
357  * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
358  * comming from CAM layer. This function is callback function for IO timeout
359  * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
360  * so that it will execute OCR/Kill adpter from ocr_thread context.
361  */
362 static void
363 mrsas_scsiio_timeout(void *data)
364 {
365 	struct mrsas_mpt_cmd *cmd;
366 	struct mrsas_softc *sc;
367 
368 	cmd = (struct mrsas_mpt_cmd *)data;
369 	sc = cmd->sc;
370 
371 	if (cmd->ccb_ptr == NULL) {
372 		printf("command timeout with NULL ccb\n");
373 		return;
374 	}
375 	/*
376 	 * Below callout is dummy entry so that it will be cancelled from
377 	 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
378 	 * on OCR enable/disable property of Controller from ocr_thread
379 	 * context.
380 	 */
381 	callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
382 	    mrsas_scsiio_timeout, cmd);
383 	sc->do_timedout_reset = 1;
384 	if (sc->ocr_thread_active)
385 		wakeup(&sc->ocr_chan);
386 }
387 
388 /*
389  * mrsas_startio:	SCSI IO entry point
390  * input:			Adapter instance soft state
391  * 					pointer to CAM Control Block
392  *
393  * This function is the SCSI IO entry point and it initiates IO processing. It
394  * copies the IO and depending if the IO is read/write or inquiry, it would
395  * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively.  It returns 0
396  * if the command is sent to firmware successfully, otherwise it returns 1.
397  */
398 static int32_t
399 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
400     union ccb *ccb)
401 {
402 	struct mrsas_mpt_cmd *cmd;
403 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
404 	struct ccb_scsiio *csio = &(ccb->csio);
405 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
406 
407 	if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
408 		ccb->ccb_h.status = CAM_REQ_CMP;
409 		xpt_done(ccb);
410 		return (0);
411 	}
412 	ccb_h->status |= CAM_SIM_QUEUED;
413 	cmd = mrsas_get_mpt_cmd(sc);
414 
415 	if (!cmd) {
416 		ccb_h->status |= CAM_REQUEUE_REQ;
417 		xpt_done(ccb);
418 		return (0);
419 	}
420 	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
421 		if (ccb_h->flags & CAM_DIR_IN)
422 			cmd->flags |= MRSAS_DIR_IN;
423 		if (ccb_h->flags & CAM_DIR_OUT)
424 			cmd->flags |= MRSAS_DIR_OUT;
425 	} else
426 		cmd->flags = MRSAS_DIR_NONE;	/* no data */
427 
428 	/* For FreeBSD 10.0 and higher */
429 #if (__FreeBSD_version >= 1000000)
430 	/*
431 	 * XXX We don't yet support physical addresses here.
432 	 */
433 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
434 	case CAM_DATA_PADDR:
435 	case CAM_DATA_SG_PADDR:
436 		device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
437 		    __func__);
438 		mrsas_release_mpt_cmd(cmd);
439 		ccb_h->status = CAM_REQ_INVALID;
440 		ccb_h->status &= ~CAM_SIM_QUEUED;
441 		goto done;
442 	case CAM_DATA_SG:
443 		device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
444 		    __func__);
445 		mrsas_release_mpt_cmd(cmd);
446 		ccb_h->status = CAM_REQ_INVALID;
447 		goto done;
448 	case CAM_DATA_VADDR:
449 		if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
450 			mrsas_release_mpt_cmd(cmd);
451 			ccb_h->status = CAM_REQ_TOO_BIG;
452 			goto done;
453 		}
454 		cmd->length = csio->dxfer_len;
455 		if (cmd->length)
456 			cmd->data = csio->data_ptr;
457 		break;
458 	default:
459 		ccb->ccb_h.status = CAM_REQ_INVALID;
460 		goto done;
461 	}
462 #else
463 	if (!(ccb_h->flags & CAM_DATA_PHYS)) {	/* Virtual data address */
464 		if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
465 			if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
466 				mrsas_release_mpt_cmd(cmd);
467 				ccb_h->status = CAM_REQ_TOO_BIG;
468 				goto done;
469 			}
470 			cmd->length = csio->dxfer_len;
471 			if (cmd->length)
472 				cmd->data = csio->data_ptr;
473 		} else {
474 			mrsas_release_mpt_cmd(cmd);
475 			ccb_h->status = CAM_REQ_INVALID;
476 			goto done;
477 		}
478 	} else {			/* Data addresses are physical. */
479 		mrsas_release_mpt_cmd(cmd);
480 		ccb_h->status = CAM_REQ_INVALID;
481 		ccb_h->status &= ~CAM_SIM_QUEUED;
482 		goto done;
483 	}
484 #endif
485 	/* save ccb ptr */
486 	cmd->ccb_ptr = ccb;
487 
488 	req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
489 	if (!req_desc) {
490 		device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
491 		return (FAIL);
492 	}
493 	memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
494 	cmd->request_desc = req_desc;
495 
496 	if (ccb_h->flags & CAM_CDB_POINTER)
497 		bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
498 	else
499 		bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
500 	mtx_lock(&sc->raidmap_lock);
501 
502 	if (mrsas_ldio_inq(sim, ccb)) {
503 		if (mrsas_build_ldio(sc, cmd, ccb)) {
504 			device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
505 			mtx_unlock(&sc->raidmap_lock);
506 			return (1);
507 		}
508 	} else {
509 		if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
510 			device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
511 			mtx_unlock(&sc->raidmap_lock);
512 			return (1);
513 		}
514 	}
515 	mtx_unlock(&sc->raidmap_lock);
516 
517 	if (cmd->flags == MRSAS_DIR_IN)	/* from device */
518 		cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
519 	else if (cmd->flags == MRSAS_DIR_OUT)	/* to device */
520 		cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
521 
522 	cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
523 	cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
524 	cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
525 	cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
526 
527 	req_desc = cmd->request_desc;
528 	req_desc->SCSIIO.SMID = cmd->index;
529 
530 	/*
531 	 * Start timer for IO timeout. Default timeout value is 90 second.
532 	 */
533 	callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000,
534 	    mrsas_scsiio_timeout, cmd);
535 	mrsas_atomic_inc(&sc->fw_outstanding);
536 
537 	if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
538 		sc->io_cmds_highwater++;
539 
540 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
541 	return (0);
542 
543 done:
544 	xpt_done(ccb);
545 	return (0);
546 }
547 
548 /*
549  * mrsas_ldio_inq:	Determines if IO is read/write or inquiry
550  * input:			pointer to CAM Control Block
551  *
552  * This function determines if the IO is read/write or inquiry.  It returns a 1
553  * if the IO is read/write and 0 if it is inquiry.
554  */
555 int
556 mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb)
557 {
558 	struct ccb_scsiio *csio = &(ccb->csio);
559 
560 	if (cam_sim_bus(sim) == 1)
561 		return (0);
562 
563 	switch (csio->cdb_io.cdb_bytes[0]) {
564 	case READ_10:
565 	case WRITE_10:
566 	case READ_12:
567 	case WRITE_12:
568 	case READ_6:
569 	case WRITE_6:
570 	case READ_16:
571 	case WRITE_16:
572 		return 1;
573 	default:
574 		return 0;
575 	}
576 }
577 
578 /*
579  * mrsas_get_mpt_cmd:	Get a cmd from free command pool
580  * input:				Adapter instance soft state
581  *
582  * This function removes an MPT command from the command free list and
583  * initializes it.
584  */
585 struct mrsas_mpt_cmd *
586 mrsas_get_mpt_cmd(struct mrsas_softc *sc)
587 {
588 	struct mrsas_mpt_cmd *cmd = NULL;
589 
590 	mtx_lock(&sc->mpt_cmd_pool_lock);
591 	if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
592 		cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
593 		TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
594 	}
595 	memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
596 	cmd->data = NULL;
597 	cmd->length = 0;
598 	cmd->flags = 0;
599 	cmd->error_code = 0;
600 	cmd->load_balance = 0;
601 	cmd->ccb_ptr = NULL;
602 	mtx_unlock(&sc->mpt_cmd_pool_lock);
603 
604 	return cmd;
605 }
606 
607 /*
608  * mrsas_release_mpt_cmd:	Return a cmd to free command pool
609  * input:					Command packet for return to free command pool
610  *
611  * This function returns an MPT command to the free command list.
612  */
613 void
614 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
615 {
616 	struct mrsas_softc *sc = cmd->sc;
617 
618 	mtx_lock(&sc->mpt_cmd_pool_lock);
619 	cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
620 	TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
621 	mtx_unlock(&sc->mpt_cmd_pool_lock);
622 
623 	return;
624 }
625 
626 /*
627  * mrsas_get_request_desc:	Get request descriptor from array
628  * input:					Adapter instance soft state
629  * 							SMID index
630  *
631  * This function returns a pointer to the request descriptor.
632  */
633 MRSAS_REQUEST_DESCRIPTOR_UNION *
634 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
635 {
636 	u_int8_t *p;
637 
638 	if (index >= sc->max_fw_cmds) {
639 		device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
640 		return NULL;
641 	}
642 	p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
643 
644 	return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
645 }
646 
647 /*
648  * mrsas_build_ldio:	Builds an LDIO command
649  * input:				Adapter instance soft state
650  * 						Pointer to command packet
651  * 						Pointer to CCB
652  *
653  * This function builds the LDIO command packet.  It returns 0 if the command is
654  * built successfully, otherwise it returns a 1.
655  */
656 int
657 mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
658     union ccb *ccb)
659 {
660 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
661 	struct ccb_scsiio *csio = &(ccb->csio);
662 	u_int32_t device_id;
663 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
664 
665 	device_id = ccb_h->target_id;
666 
667 	io_request = cmd->io_request;
668 	io_request->RaidContext.VirtualDiskTgtId = device_id;
669 	io_request->RaidContext.status = 0;
670 	io_request->RaidContext.exStatus = 0;
671 
672 	/* just the cdb len, other flags zero, and ORed-in later for FP */
673 	io_request->IoFlags = csio->cdb_len;
674 
675 	if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
676 		device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
677 
678 	io_request->DataLength = cmd->length;
679 
680 	if (mrsas_map_request(sc, cmd) == SUCCESS) {
681 		if (cmd->sge_count > MRSAS_MAX_SGL) {
682 			device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
683 			    "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
684 			return (FAIL);
685 		}
686 		io_request->RaidContext.numSGE = cmd->sge_count;
687 	} else {
688 		device_printf(sc->mrsas_dev, "Data map/load failed.\n");
689 		return (FAIL);
690 	}
691 	return (0);
692 }
693 
694 /*
695  * mrsas_setup_io:	Set up data including Fast Path I/O
696  * input:			Adapter instance soft state
697  * 					Pointer to command packet
698  * 					Pointer to CCB
699  *
700  * This function builds the DCDB inquiry command.  It returns 0 if the command
701  * is built successfully, otherwise it returns a 1.
702  */
703 int
704 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
705     union ccb *ccb, u_int32_t device_id,
706     MRSAS_RAID_SCSI_IO_REQUEST * io_request)
707 {
708 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
709 	struct ccb_scsiio *csio = &(ccb->csio);
710 	struct IO_REQUEST_INFO io_info;
711 	MR_DRV_RAID_MAP_ALL *map_ptr;
712 	u_int8_t fp_possible;
713 	u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
714 	u_int32_t datalength = 0;
715 
716 	start_lba_lo = 0;
717 	start_lba_hi = 0;
718 	fp_possible = 0;
719 
720 	/*
721 	 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
722 	 */
723 	if (csio->cdb_len == 6) {
724 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
725 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
726 		    ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
727 		    (u_int32_t)csio->cdb_io.cdb_bytes[3];
728 		start_lba_lo &= 0x1FFFFF;
729 	}
730 	/*
731 	 * READ_10 (0x28) or WRITE_6 (0x2A) cdb
732 	 */
733 	else if (csio->cdb_len == 10) {
734 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
735 		    ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
736 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
737 		    ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
738 		    (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
739 		    ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
740 	}
741 	/*
742 	 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
743 	 */
744 	else if (csio->cdb_len == 12) {
745 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
746 		    ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
747 		    ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
748 		    ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
749 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
750 		    ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
751 		    (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
752 		    ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
753 	}
754 	/*
755 	 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
756 	 */
757 	else if (csio->cdb_len == 16) {
758 		datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
759 		    ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
760 		    ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
761 		    ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
762 		start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
763 		    ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
764 		    (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
765 		    ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
766 		start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
767 		    ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
768 		    (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
769 		    ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
770 	}
771 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
772 	io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
773 	io_info.numBlocks = datalength;
774 	io_info.ldTgtId = device_id;
775 
776 	switch (ccb_h->flags & CAM_DIR_MASK) {
777 	case CAM_DIR_IN:
778 		io_info.isRead = 1;
779 		break;
780 	case CAM_DIR_OUT:
781 		io_info.isRead = 0;
782 		break;
783 	case CAM_DIR_NONE:
784 	default:
785 		mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
786 		break;
787 	}
788 
789 	map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
790 	ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
791 
792 	if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES_EXT) ||
793 	    (!sc->fast_path_io)) {
794 		io_request->RaidContext.regLockFlags = 0;
795 		fp_possible = 0;
796 	} else {
797 		if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
798 			fp_possible = io_info.fpOkForIo;
799 	}
800 
801 	cmd->request_desc->SCSIIO.MSIxIndex =
802 	    sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
803 
804 
805 	if (fp_possible) {
806 		mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
807 		    start_lba_lo, ld_block_size);
808 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
809 		cmd->request_desc->SCSIIO.RequestFlags =
810 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
811 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
812 		if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
813 			if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
814 				cmd->request_desc->SCSIIO.RequestFlags =
815 				    (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
816 				    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
817 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
818 			io_request->RaidContext.nseg = 0x1;
819 			io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
820 			io_request->RaidContext.regLockFlags |=
821 			    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
822 			    MR_RL_FLAGS_SEQ_NUM_ENABLE);
823 		}
824 		if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
825 		    (io_info.isRead)) {
826 			io_info.devHandle =
827 			    mrsas_get_updated_dev_handle(&sc->load_balance_info[device_id],
828 			    &io_info);
829 			cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
830 		} else
831 			cmd->load_balance = 0;
832 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
833 		io_request->DevHandle = io_info.devHandle;
834 	} else {
835 		/* Not FP IO */
836 		io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
837 		cmd->request_desc->SCSIIO.RequestFlags =
838 		    (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
839 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
840 		if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
841 			if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
842 				cmd->request_desc->SCSIIO.RequestFlags =
843 				    (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
844 				    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
845 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
846 			io_request->RaidContext.regLockFlags |=
847 			    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
848 			    MR_RL_FLAGS_SEQ_NUM_ENABLE);
849 			io_request->RaidContext.nseg = 0x1;
850 		}
851 		io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
852 		io_request->DevHandle = device_id;
853 	}
854 	return (0);
855 }
856 
857 /*
858  * mrsas_build_dcdb:	Builds an DCDB command
859  * input:				Adapter instance soft state
860  * 						Pointer to command packet
861  * 						Pointer to CCB
862  *
863  * This function builds the DCDB inquiry command.  It returns 0 if the command
864  * is built successfully, otherwise it returns a 1.
865  */
866 int
867 mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
868     union ccb *ccb, struct cam_sim *sim)
869 {
870 	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
871 	u_int32_t device_id;
872 	MR_DRV_RAID_MAP_ALL *map_ptr;
873 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
874 
875 	io_request = cmd->io_request;
876 	device_id = ccb_h->target_id;
877 	map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
878 
879 	/* Check if this is for system PD */
880 	if (cam_sim_bus(sim) == 1 &&
881 	    sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
882 		io_request->Function = 0;
883 		io_request->DevHandle = map_ptr->raidMap.devHndlInfo[device_id].
884 		    curDevHdl;
885 		io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
886 		io_request->RaidContext.regLockFlags = 0;
887 		io_request->RaidContext.regLockRowLBA = 0;
888 		io_request->RaidContext.regLockLength = 0;
889 
890 		io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
891 		    << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
892 		if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
893 			io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
894 		cmd->request_desc->SCSIIO.RequestFlags =
895 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
896 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
897 		cmd->request_desc->SCSIIO.DevHandle =
898 		    map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
899 		cmd->request_desc->SCSIIO.MSIxIndex =
900 		    sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
901 
902 	} else {
903 		io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
904 		io_request->DevHandle = device_id;
905 		cmd->request_desc->SCSIIO.RequestFlags =
906 		    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
907 		    MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
908 	}
909 
910 	io_request->RaidContext.VirtualDiskTgtId = device_id;
911 	io_request->LUN[1] = ccb_h->target_lun & 0xF;
912 	io_request->DataLength = cmd->length;
913 
914 	if (mrsas_map_request(sc, cmd) == SUCCESS) {
915 		if (cmd->sge_count > sc->max_num_sge) {
916 			device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
917 			    "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
918 			return (1);
919 		}
920 		io_request->RaidContext.numSGE = cmd->sge_count;
921 	} else {
922 		device_printf(sc->mrsas_dev, "Data map/load failed.\n");
923 		return (1);
924 	}
925 	return (0);
926 }
927 
928 /*
929  * mrsas_map_request:	Map and load data
930  * input:				Adapter instance soft state
931  * 						Pointer to command packet
932  *
933  * For data from OS, map and load the data buffer into bus space.  The SG list
934  * is built in the callback.  If the  bus dmamap load is not successful,
935  * cmd->error_code will contain the  error code and a 1 is returned.
936  */
937 int
938 mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
939 {
940 	u_int32_t retcode = 0;
941 	struct cam_sim *sim;
942 	int flag = BUS_DMA_NOWAIT;
943 
944 	sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
945 
946 	if (cmd->data != NULL) {
947 		mtx_lock(&sc->io_lock);
948 		/* Map data buffer into bus space */
949 		retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
950 		    cmd->length, mrsas_data_load_cb, cmd, flag);
951 		mtx_unlock(&sc->io_lock);
952 		if (retcode)
953 			device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
954 		if (retcode == EINPROGRESS) {
955 			device_printf(sc->mrsas_dev, "request load in progress\n");
956 			mrsas_freeze_simq(cmd, sim);
957 		}
958 	}
959 	if (cmd->error_code)
960 		return (1);
961 	return (retcode);
962 }
963 
964 /*
965  * mrsas_unmap_request:	Unmap and unload data
966  * input:				Adapter instance soft state
967  * 						Pointer to command packet
968  *
969  * This function unmaps and unloads data from OS.
970  */
971 void
972 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
973 {
974 	if (cmd->data != NULL) {
975 		if (cmd->flags & MRSAS_DIR_IN)
976 			bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
977 		if (cmd->flags & MRSAS_DIR_OUT)
978 			bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
979 		mtx_lock(&sc->io_lock);
980 		bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
981 		mtx_unlock(&sc->io_lock);
982 	}
983 }
984 
985 /*
986  * mrsas_data_load_cb:	Callback entry point
987  * input:				Pointer to command packet as argument
988  * 						Pointer to segment
989  * 						Number of segments Error
990  *
991  * This is the callback function of the bus dma map load.  It builds the SG
992  * list.
993  */
994 static void
995 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
996 {
997 	struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
998 	struct mrsas_softc *sc = cmd->sc;
999 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1000 	pMpi25IeeeSgeChain64_t sgl_ptr;
1001 	int i = 0, sg_processed = 0;
1002 
1003 	if (error) {
1004 		cmd->error_code = error;
1005 		device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
1006 		if (error == EFBIG) {
1007 			cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1008 			return;
1009 		}
1010 	}
1011 	if (cmd->flags & MRSAS_DIR_IN)
1012 		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1013 		    BUS_DMASYNC_PREREAD);
1014 	if (cmd->flags & MRSAS_DIR_OUT)
1015 		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1016 		    BUS_DMASYNC_PREWRITE);
1017 	if (nseg > sc->max_num_sge) {
1018 		device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
1019 		return;
1020 	}
1021 	io_request = cmd->io_request;
1022 	sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1023 
1024 	if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
1025 		pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1026 
1027 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1028 		sgl_ptr_end->Flags = 0;
1029 	}
1030 	if (nseg != 0) {
1031 		for (i = 0; i < nseg; i++) {
1032 			sgl_ptr->Address = segs[i].ds_addr;
1033 			sgl_ptr->Length = segs[i].ds_len;
1034 			sgl_ptr->Flags = 0;
1035 			if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
1036 				if (i == nseg - 1)
1037 					sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1038 			}
1039 			sgl_ptr++;
1040 			sg_processed = i + 1;
1041 			if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1042 			    (nseg > sc->max_sge_in_main_msg)) {
1043 				pMpi25IeeeSgeChain64_t sg_chain;
1044 
1045 				if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
1046 					if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1047 					    != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1048 						cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1049 					else
1050 						cmd->io_request->ChainOffset = 0;
1051 				} else
1052 					cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1053 				sg_chain = sgl_ptr;
1054 				if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
1055 					sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1056 				else
1057 					sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1058 				sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
1059 				sg_chain->Address = cmd->chain_frame_phys_addr;
1060 				sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1061 			}
1062 		}
1063 	}
1064 	cmd->sge_count = nseg;
1065 }
1066 
1067 /*
1068  * mrsas_freeze_simq:	Freeze SIM queue
1069  * input:				Pointer to command packet
1070  * 						Pointer to SIM
1071  *
1072  * This function freezes the sim queue.
1073  */
1074 static void
1075 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
1076 {
1077 	union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
1078 
1079 	xpt_freeze_simq(sim, 1);
1080 	ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1081 	ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1082 }
1083 
1084 void
1085 mrsas_xpt_freeze(struct mrsas_softc *sc)
1086 {
1087 	xpt_freeze_simq(sc->sim_0, 1);
1088 	xpt_freeze_simq(sc->sim_1, 1);
1089 }
1090 
1091 void
1092 mrsas_xpt_release(struct mrsas_softc *sc)
1093 {
1094 	xpt_release_simq(sc->sim_0, 1);
1095 	xpt_release_simq(sc->sim_1, 1);
1096 }
1097 
1098 /*
1099  * mrsas_cmd_done:	Perform remaining command completion
1100  * input:			Adapter instance soft state  Pointer to command packet
1101  *
1102  * This function calls ummap request and releases the MPT command.
1103  */
1104 void
1105 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1106 {
1107 	callout_stop(&cmd->cm_callout);
1108 	mrsas_unmap_request(sc, cmd);
1109 	mtx_lock(&sc->sim_lock);
1110 	xpt_done(cmd->ccb_ptr);
1111 	cmd->ccb_ptr = NULL;
1112 	mtx_unlock(&sc->sim_lock);
1113 	mrsas_release_mpt_cmd(cmd);
1114 }
1115 
1116 /*
1117  * mrsas_cam_poll:	Polling entry point
1118  * input:			Pointer to SIM
1119  *
1120  * This is currently a stub function.
1121  */
1122 static void
1123 mrsas_cam_poll(struct cam_sim *sim)
1124 {
1125 	struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
1126 
1127 	mrsas_isr((void *)sc);
1128 }
1129 
1130 /*
1131  * mrsas_bus_scan:	Perform bus scan
1132  * input:			Adapter instance soft state
1133  *
1134  * This mrsas_bus_scan function is needed for FreeBSD 7.x.  Also, it should not
1135  * be called in FreeBSD 8.x and later versions, where the bus scan is
1136  * automatic.
1137  */
1138 int
1139 mrsas_bus_scan(struct mrsas_softc *sc)
1140 {
1141 	union ccb *ccb_0;
1142 	union ccb *ccb_1;
1143 
1144 	if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
1145 		return (ENOMEM);
1146 	}
1147 	if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
1148 		xpt_free_ccb(ccb_0);
1149 		return (ENOMEM);
1150 	}
1151 	mtx_lock(&sc->sim_lock);
1152 	if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
1153 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1154 		xpt_free_ccb(ccb_0);
1155 		xpt_free_ccb(ccb_1);
1156 		mtx_unlock(&sc->sim_lock);
1157 		return (EIO);
1158 	}
1159 	if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
1160 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1161 		xpt_free_ccb(ccb_0);
1162 		xpt_free_ccb(ccb_1);
1163 		mtx_unlock(&sc->sim_lock);
1164 		return (EIO);
1165 	}
1166 	mtx_unlock(&sc->sim_lock);
1167 	xpt_rescan(ccb_0);
1168 	xpt_rescan(ccb_1);
1169 
1170 	return (0);
1171 }
1172 
1173 /*
1174  * mrsas_bus_scan_sim:	Perform bus scan per SIM
1175  * input:				adapter instance soft state
1176  *
1177  * This function will be called from Event handler on LD creation/deletion,
1178  * JBOD on/off.
1179  */
1180 int
1181 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
1182 {
1183 	union ccb *ccb;
1184 
1185 	if ((ccb = xpt_alloc_ccb()) == NULL) {
1186 		return (ENOMEM);
1187 	}
1188 	mtx_lock(&sc->sim_lock);
1189 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
1190 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1191 		xpt_free_ccb(ccb);
1192 		mtx_unlock(&sc->sim_lock);
1193 		return (EIO);
1194 	}
1195 	mtx_unlock(&sc->sim_lock);
1196 	xpt_rescan(ccb);
1197 
1198 	return (0);
1199 }
1200