xref: /freebsd/sys/dev/smartpqi/smartpqi_cam.c (revision dba7640e44c5ec148a84b0d58c6c9a3c9e5147f3)
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 /*
29  * CAM interface for smartpqi driver
30  */
31 
32 #include "smartpqi_includes.h"
33 
34 /*
35  * Set cam sim properties of the smartpqi adapter.
36  */
37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
38 {
39 
40 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
41 					cam_sim_softc(sim);
42 	DBG_FUNC("IN\n");
43 
44 	cpi->version_num = 1;
45 	cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
46 	cpi->target_sprt = 0;
47 	cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
48 	cpi->hba_eng_cnt = 0;
49 	cpi->max_lun = PQI_MAX_MULTILUN;
50 	cpi->max_target = 1088;
51 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
52 	cpi->initiator_id = 255;
53 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
54 	strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
55 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
56 	cpi->unit_number = cam_sim_unit(sim);
57 	cpi->bus_id = cam_sim_bus(sim);
58 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
59 	cpi->protocol = PROTO_SCSI;
60 	cpi->protocol_version = SCSI_REV_SPC4;
61 	cpi->transport = XPORT_SPI;
62 	cpi->transport_version = 2;
63 	cpi->ccb_h.status = CAM_REQ_CMP;
64 
65 	DBG_FUNC("OUT\n");
66 }
67 
68 /*
69  * Get transport settings of the smartpqi adapter
70  */
71 static void get_transport_settings(struct pqisrc_softstate *softs,
72 		struct ccb_trans_settings *cts)
73 {
74 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
75 	struct ccb_trans_settings_sas	*sas = &cts->xport_specific.sas;
76 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
77 
78 	DBG_FUNC("IN\n");
79 
80 	cts->protocol = PROTO_SCSI;
81 	cts->protocol_version = SCSI_REV_SPC4;
82 	cts->transport = XPORT_SPI;
83 	cts->transport_version = 2;
84 	spi->valid = CTS_SPI_VALID_DISC;
85 	spi->flags = CTS_SPI_FLAGS_DISC_ENB;
86 	scsi->valid = CTS_SCSI_VALID_TQ;
87 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
88 	sas->valid = CTS_SAS_VALID_SPEED;
89 	cts->ccb_h.status = CAM_REQ_CMP;
90 
91 	DBG_FUNC("OUT\n");
92 }
93 
94 /*
95  *  Add the target to CAM layer and rescan, when a new device is found
96  */
97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
98 	union ccb			*ccb;
99 
100 	DBG_FUNC("IN\n");
101 
102 	if(softs->os_specific.sim_registered) {
103 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
104 			DBG_ERR("rescan failed (can't allocate CCB)\n");
105 			return;
106 		}
107 
108 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
109 			cam_sim_path(softs->os_specific.sim),
110 			device->target, device->lun) != CAM_REQ_CMP) {
111 			DBG_ERR("rescan failed (can't create path)\n");
112 			xpt_free_ccb(ccb);
113 			return;
114 		}
115 		xpt_rescan(ccb);
116 	}
117 
118 	DBG_FUNC("OUT\n");
119 }
120 
121 /*
122  * Remove the device from CAM layer when deleted or hot removed
123  */
124 void os_remove_device(pqisrc_softstate_t *softs,
125         pqi_scsi_dev_t *device) {
126 	struct cam_path *tmppath;
127 
128 	DBG_FUNC("IN\n");
129 
130 	if(softs->os_specific.sim_registered) {
131 		if (xpt_create_path(&tmppath, NULL,
132 			cam_sim_path(softs->os_specific.sim),
133 			device->target, device->lun) != CAM_REQ_CMP) {
134 			DBG_ERR("unable to create path for async event");
135 			return;
136 		}
137 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
138 		xpt_free_path(tmppath);
139 		pqisrc_free_device(softs, device);
140 	}
141 
142 	DBG_FUNC("OUT\n");
143 
144 }
145 
146 /*
147  * Function to release the frozen simq
148  */
149 static void pqi_release_camq( rcb_t *rcb )
150 {
151 	pqisrc_softstate_t *softs;
152 	struct ccb_scsiio *csio;
153 
154 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
155 	softs = rcb->softs;
156 
157 	DBG_FUNC("IN\n");
158 
159 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
160 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
161 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
162 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
163 		else
164 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
165 	}
166 
167 	DBG_FUNC("OUT\n");
168 }
169 
170 /*
171  * Function to dma-unmap the completed request
172  */
173 static void pqi_unmap_request(void *arg)
174 {
175 	pqisrc_softstate_t *softs;
176 	rcb_t *rcb;
177 
178 	DBG_IO("IN rcb = %p\n", arg);
179 
180 	rcb = (rcb_t *)arg;
181 	softs = rcb->softs;
182 
183 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
184 		return;
185 
186 	if (rcb->bcount != 0 ) {
187 		if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
188 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
189 					rcb->cm_datamap,
190 					BUS_DMASYNC_POSTREAD);
191 		if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
192 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
193 					rcb->cm_datamap,
194 					BUS_DMASYNC_POSTWRITE);
195 		bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
196 					rcb->cm_datamap);
197 	}
198 	rcb->cm_flags &= ~PQI_CMD_MAPPED;
199 
200 	if(rcb->sgt && rcb->nseg)
201 		os_mem_free(rcb->softs, (void*)rcb->sgt,
202 			rcb->nseg*sizeof(sgt_t));
203 
204 	pqisrc_put_tag(&softs->taglist, rcb->tag);
205 
206 	DBG_IO("OUT\n");
207 }
208 
209 /*
210  * Construct meaningful LD name for volume here.
211  */
212 static void
213 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
214 {
215 	struct scsi_inquiry_data *inq = NULL;
216 	uint8_t *cdb = NULL;
217 	pqi_scsi_dev_t *device = NULL;
218 
219 	DBG_FUNC("IN\n");
220 
221  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
222 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
223 	if(cdb[0] == INQUIRY &&
224 		(cdb[1] & SI_EVPD) == 0 &&
225 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
226 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
227 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
228 
229 		device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
230 
231 		/* Let the disks be probed and dealt with via CAM. Only for LD
232 		  let it fall through and inquiry be tweaked */
233 		if( !device || 	!pqisrc_is_logical_device(device) ||
234 				(device->devtype != DISK_DEVICE)  ||
235 				pqisrc_is_external_raid_device(device)) {
236  	 		return;
237 		}
238 
239 		strncpy(inq->vendor, "MSCC",
240        			SID_VENDOR_SIZE);
241 		strncpy(inq->product,
242 			pqisrc_raidlevel_to_string(device->raid_level),
243        			SID_PRODUCT_SIZE);
244 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
245        			SID_REVISION_SIZE);
246     	}
247 
248 	DBG_FUNC("OUT\n");
249 }
250 
251 /*
252  * Handle completion of a command - pass results back through the CCB
253  */
254 void
255 os_io_response_success(rcb_t *rcb)
256 {
257 	struct ccb_scsiio		*csio;
258 
259 	DBG_IO("IN rcb = %p\n", rcb);
260 
261 	if (rcb == NULL)
262 		panic("rcb is null");
263 
264 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
265 
266 	if (csio == NULL)
267 		panic("csio is null");
268 
269 	rcb->status = REQUEST_SUCCESS;
270 	csio->ccb_h.status = CAM_REQ_CMP;
271 
272 	smartpqi_fix_ld_inquiry(rcb->softs, csio);
273 	pqi_release_camq(rcb);
274 	pqi_unmap_request(rcb);
275 	xpt_done((union ccb *)csio);
276 
277 	DBG_IO("OUT\n");
278 }
279 
280 /*
281  * Error response handling for raid IO
282  */
283 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
284 {
285 	struct ccb_scsiio *csio;
286 	pqisrc_softstate_t *softs;
287 
288 	DBG_IO("IN\n");
289 
290 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
291 
292 	if (csio == NULL)
293 		panic("csio is null");
294 
295 	softs = rcb->softs;
296 
297 	ASSERT(err_info != NULL);
298 	csio->scsi_status = err_info->status;
299 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
300 
301 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
302 		/*
303 		 * Handle specific SCSI status values.
304 		 */
305 		switch(csio->scsi_status) {
306 			case PQI_RAID_STATUS_QUEUE_FULL:
307 				csio->ccb_h.status = CAM_REQ_CMP;
308 				DBG_ERR("Queue Full error");
309 				break;
310 				/* check condition, sense data included */
311 			case PQI_RAID_STATUS_CHECK_CONDITION:
312 				{
313 				uint16_t sense_data_len =
314 					LE_16(err_info->sense_data_len);
315 				uint8_t *sense_data = NULL;
316 				if (sense_data_len)
317 					sense_data = err_info->data;
318 				memset(&csio->sense_data, 0, csio->sense_len);
319 				sense_data_len = (sense_data_len >
320 						csio->sense_len) ?
321 						csio->sense_len :
322 						sense_data_len;
323 				if (sense_data)
324 					memcpy(&csio->sense_data, sense_data,
325 						sense_data_len);
326 				if (csio->sense_len > sense_data_len)
327 					csio->sense_resid = csio->sense_len
328 							- sense_data_len;
329 					else
330 						csio->sense_resid = 0;
331 				csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
332 							| CAM_AUTOSNS_VALID
333 							| CAM_REQ_CMP_ERR;
334 				}
335 				break;
336 
337 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
338 				{
339 				uint32_t resid = 0;
340 				resid = rcb->bcount-err_info->data_out_transferred;
341 			    	csio->resid  = resid;
342 				csio->ccb_h.status = CAM_REQ_CMP;
343 				break;
344 				}
345 			default:
346 				csio->ccb_h.status = CAM_REQ_CMP;
347 				break;
348 		}
349 	}
350 
351 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
352 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
353 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
354 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
355 		else
356 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
357 	}
358 
359 	pqi_unmap_request(rcb);
360 	xpt_done((union ccb *)csio);
361 
362 	DBG_IO("OUT\n");
363 }
364 
365 /*
366  * Error response handling for aio.
367  */
368 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
369 {
370 	struct ccb_scsiio *csio;
371 	pqisrc_softstate_t *softs;
372 
373 	DBG_IO("IN\n");
374 
375         if (rcb == NULL)
376 		panic("rcb is null");
377 
378 	rcb->status = REQUEST_SUCCESS;
379 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
380 	if (csio == NULL)
381                 panic("csio is null");
382 
383 	softs = rcb->softs;
384 
385 	switch (err_info->service_resp) {
386 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
387 			csio->ccb_h.status = err_info->status;
388 			break;
389 		case PQI_AIO_SERV_RESPONSE_FAILURE:
390 			switch(err_info->status) {
391 				case PQI_AIO_STATUS_IO_ABORTED:
392 					csio->ccb_h.status = CAM_REQ_ABORTED;
393 					DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
394 					break;
395 				case PQI_AIO_STATUS_UNDERRUN:
396 					csio->ccb_h.status = CAM_REQ_CMP;
397 					csio->resid =
398 						LE_32(err_info->resd_count);
399 					break;
400 				case PQI_AIO_STATUS_OVERRUN:
401 					csio->ccb_h.status = CAM_REQ_CMP;
402 					break;
403 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
404 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
405 					rcb->dvp->offload_enabled = false;
406 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
407 					break;
408 				case PQI_AIO_STATUS_IO_ERROR:
409 				case PQI_AIO_STATUS_IO_NO_DEVICE:
410 				case PQI_AIO_STATUS_INVALID_DEVICE:
411 				default:
412 					DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
413 					csio->ccb_h.status |=
414 						CAM_SCSI_STATUS_ERROR;
415 					break;
416 			}
417 			break;
418 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
419 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
420 			csio->ccb_h.status = CAM_REQ_CMP;
421 			break;
422 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
423 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
424 			DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
425 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
426 			break;
427 		default:
428 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
429 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
430 			break;
431 	}
432 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
433 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
434 		uint8_t *sense_data = NULL;
435 		unsigned sense_data_len = LE_16(err_info->data_len);
436 		if (sense_data_len)
437 			sense_data = err_info->data;
438 		DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND  sense size %u\n",
439 			sense_data_len);
440 		memset(&csio->sense_data, 0, csio->sense_len);
441 		if (sense_data)
442 			memcpy(&csio->sense_data, sense_data, ((sense_data_len >
443                         	csio->sense_len) ? csio->sense_len : sense_data_len));
444 		if (csio->sense_len > sense_data_len)
445 			csio->sense_resid = csio->sense_len - sense_data_len;
446         	else
447 			csio->sense_resid = 0;
448 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
449 	}
450 
451 	smartpqi_fix_ld_inquiry(softs, csio);
452 	pqi_release_camq(rcb);
453 	pqi_unmap_request(rcb);
454 	xpt_done((union ccb *)csio);
455 	DBG_IO("OUT\n");
456 }
457 
458 static void
459 pqi_freeze_ccb(union ccb *ccb)
460 {
461 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
462 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
463 		xpt_freeze_devq(ccb->ccb_h.path, 1);
464 	}
465 }
466 
467 /*
468  * Command-mapping helper function - populate this command's s/g table.
469  */
470 static void
471 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
472 {
473 	pqisrc_softstate_t *softs;
474 	rcb_t *rcb;
475 
476 	rcb = (rcb_t *)arg;
477 	softs = rcb->softs;
478 
479 	if(  error || nseg > softs->pqi_cap.max_sg_elem )
480 	{
481 		rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
482 		pqi_freeze_ccb(rcb->cm_ccb);
483 		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
484 			error, nseg, softs->pqi_cap.max_sg_elem);
485 		pqi_unmap_request(rcb);
486 		xpt_done((union ccb *)rcb->cm_ccb);
487 		return;
488 	}
489 
490 	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
491 	if (rcb->sgt == NULL) {
492 		rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
493 		pqi_freeze_ccb(rcb->cm_ccb);
494 		DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
495 		pqi_unmap_request(rcb);
496 		xpt_done((union ccb *)rcb->cm_ccb);
497 		return;
498 	}
499 
500 	rcb->nseg = nseg;
501 	for (int i = 0; i < nseg; i++) {
502 		rcb->sgt[i].addr = segs[i].ds_addr;
503 		rcb->sgt[i].len = segs[i].ds_len;
504 		rcb->sgt[i].flags = 0;
505 	}
506 
507 	if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
508 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
509 			rcb->cm_datamap, BUS_DMASYNC_PREREAD);
510 	if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
511 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
512 			rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
513 
514 	/* Call IO functions depending on pd or ld */
515 	rcb->status = REQUEST_PENDING;
516 
517 	error = pqisrc_build_send_io(softs, rcb);
518 
519 	if (error) {
520 		rcb->req_pending = false;
521 		rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
522 		pqi_freeze_ccb(rcb->cm_ccb);
523 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
524 	   	pqi_unmap_request(rcb);
525 		xpt_done((union ccb *)rcb->cm_ccb);
526 		return;
527 	}
528 }
529 
530 /*
531  * Function to dma-map the request buffer
532  */
533 static int pqi_map_request( rcb_t *rcb )
534 {
535 	pqisrc_softstate_t *softs = rcb->softs;
536 	int error = PQI_STATUS_SUCCESS;
537 	union ccb *ccb = rcb->cm_ccb;
538 
539 	DBG_FUNC("IN\n");
540 
541 	/* check that mapping is necessary */
542 	if (rcb->cm_flags & PQI_CMD_MAPPED)
543 		return(0);
544 	rcb->cm_flags |= PQI_CMD_MAPPED;
545 
546 	if (rcb->bcount) {
547 		error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
548 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
549 		if (error != 0){
550 			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
551 					error, rcb->bcount);
552 			return error;
553 		}
554 	} else {
555 		/*
556 		 * Set up the command to go to the controller.  If there are no
557 		 * data buffers associated with the command then it can bypass
558 		 * busdma.
559 		 */
560 		/* Call IO functions depending on pd or ld */
561 		rcb->status = REQUEST_PENDING;
562 
563 		error = pqisrc_build_send_io(softs, rcb);
564 	}
565 
566 	DBG_FUNC("OUT error = %d\n", error);
567 
568 	return error;
569 }
570 
571 /*
572  * Function to clear the request control block
573  */
574 void os_reset_rcb( rcb_t *rcb )
575 {
576 	rcb->error_info = NULL;
577 	rcb->req = NULL;
578 	rcb->status = -1;
579 	rcb->tag = INVALID_ELEM;
580 	rcb->dvp = NULL;
581 	rcb->cdbp = NULL;
582 	rcb->softs = NULL;
583 	rcb->cm_flags = 0;
584 	rcb->cm_data = NULL;
585 	rcb->bcount = 0;
586 	rcb->nseg = 0;
587 	rcb->sgt = NULL;
588 	rcb->cm_ccb = NULL;
589 	rcb->encrypt_enable = false;
590 	rcb->ioaccel_handle = 0;
591 	rcb->resp_qid = 0;
592 	rcb->req_pending = false;
593 }
594 
595 /*
596  * Callback function for the lun rescan
597  */
598 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
599 {
600         xpt_free_path(ccb->ccb_h.path);
601         xpt_free_ccb(ccb);
602 }
603 
604 /*
605  * Function to rescan the lun
606  */
607 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
608 			int lun)
609 {
610 	union ccb   *ccb = NULL;
611 	cam_status  status = 0;
612 	struct cam_path     *path = NULL;
613 
614 	DBG_FUNC("IN\n");
615 
616 	ccb = xpt_alloc_ccb_nowait();
617 	status = xpt_create_path(&path, NULL,
618 				cam_sim_path(softs->os_specific.sim), target, lun);
619 	if (status != CAM_REQ_CMP) {
620 		DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
621 				 status);
622 		xpt_free_ccb(ccb);
623 		return;
624 	}
625 
626 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
627 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
628 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
629 	ccb->crcn.flags = CAM_FLAG_NONE;
630 
631 	xpt_action(ccb);
632 
633 	DBG_FUNC("OUT\n");
634 }
635 
636 /*
637  * Function to rescan the lun under each target
638  */
639 void smartpqi_target_rescan(struct pqisrc_softstate *softs)
640 {
641 	int target = 0, lun = 0;
642 
643 	DBG_FUNC("IN\n");
644 
645 	for(target = 0; target < PQI_MAX_DEVICES; target++){
646 		for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
647 			if(softs->device_list[target][lun]){
648 				smartpqi_lun_rescan(softs, target, lun);
649 			}
650 		}
651 	}
652 
653 	DBG_FUNC("OUT\n");
654 }
655 
656 /*
657  * Set the mode of tagged command queueing for the current task.
658  */
659 uint8_t os_get_task_attr(rcb_t *rcb)
660 {
661 	union ccb *ccb = rcb->cm_ccb;
662 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
663 
664 	switch(ccb->csio.tag_action) {
665 	case MSG_HEAD_OF_Q_TAG:
666 		tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
667 		break;
668 	case MSG_ORDERED_Q_TAG:
669 		tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
670 		break;
671 	case MSG_SIMPLE_Q_TAG:
672 	default:
673 		tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
674 		break;
675 	}
676 	return tag_action;
677 }
678 
679 /*
680  * Complete all outstanding commands
681  */
682 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
683 {
684 	int tag = 0;
685 
686 	DBG_FUNC("IN\n");
687 
688 	for (tag = 1; tag < softs->max_outstanding_io; tag++) {
689 		rcb_t *prcb = &softs->rcb[tag];
690 		if(prcb->req_pending && prcb->cm_ccb ) {
691 			prcb->req_pending = false;
692 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
693 			xpt_done((union ccb *)prcb->cm_ccb);
694 			prcb->cm_ccb = NULL;
695 		}
696 	}
697 
698 	DBG_FUNC("OUT\n");
699 }
700 
701 /*
702  * IO handling functionality entry point
703  */
704 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
705 {
706 	rcb_t *rcb;
707 	uint32_t tag, no_transfer = 0;
708 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
709 					cam_sim_softc(sim);
710 	int32_t error = PQI_STATUS_FAILURE;
711 	pqi_scsi_dev_t *dvp;
712 
713 	DBG_FUNC("IN\n");
714 
715 	if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
716 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
717 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
718 		return PQI_STATUS_FAILURE;
719 	}
720 
721 	dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
722 	/* Check  controller state */
723 	if (IN_PQI_RESET(softs)) {
724 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
725 					| CAM_BUSY | CAM_REQ_INPROG;
726 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
727 		return error;
728 	}
729 	/* Check device state */
730 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
731 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
732 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
733 		return error;
734 	}
735 	/* Check device reset */
736 	if (dvp->reset_in_progress) {
737 		ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
738 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
739 		return error;
740 	}
741 
742 	if (dvp->expose_device == false) {
743 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
744 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
745 		return error;
746 	}
747 
748 	tag = pqisrc_get_tag(&softs->taglist);
749 	if( tag == INVALID_ELEM ) {
750 		DBG_ERR("Get Tag failed\n");
751 		xpt_freeze_simq(softs->os_specific.sim, 1);
752 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
753 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
754 		return PQI_STATUS_FAILURE;
755 	}
756 
757 	DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
758 
759 	rcb = &softs->rcb[tag];
760 	os_reset_rcb( rcb );
761 	rcb->tag = tag;
762 	rcb->softs = softs;
763 	rcb->cmdlen = ccb->csio.cdb_len;
764 	ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
765 
766 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
767 		case CAM_DIR_IN:
768 			rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
769 			break;
770 		case CAM_DIR_OUT:
771 			rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
772 			break;
773 		case CAM_DIR_NONE:
774 			no_transfer = 1;
775 			break;
776 		default:
777 			DBG_ERR("Unknown Dir\n");
778 			break;
779 	}
780 	rcb->cm_ccb = ccb;
781 	rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
782 
783 	if (!no_transfer) {
784 		rcb->cm_data = (void *)ccb->csio.data_ptr;
785 		rcb->bcount = ccb->csio.dxfer_len;
786 	} else {
787 		rcb->cm_data = NULL;
788 		rcb->bcount = 0;
789 	}
790 	/*
791 	 * Submit the request to the adapter.
792 	 *
793 	 * Note that this may fail if we're unable to map the request (and
794 	 * if we ever learn a transport layer other than simple, may fail
795 	 * if the adapter rejects the command).
796 	 */
797 	if ((error = pqi_map_request(rcb)) != 0) {
798 		rcb->req_pending = false;
799 		xpt_freeze_simq(softs->os_specific.sim, 1);
800 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
801 		if (error == EINPROGRESS) {
802 			DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
803 			error = 0;
804 		} else {
805 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
806 			DBG_WARN("Requeue req error = %d target = %d\n", error,
807 				ccb->ccb_h.target_id);
808 			pqi_unmap_request(rcb);
809 		}
810 	}
811 
812 	DBG_FUNC("OUT error = %d\n", error);
813 	return error;
814 }
815 
816 /*
817  * Abort a task, task management functionality
818  */
819 static int
820 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
821 {
822 	rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
823 	uint32_t abort_tag = rcb->tag;
824 	uint32_t tag = 0;
825 	int rval = PQI_STATUS_SUCCESS;
826 	uint16_t qid;
827 
828     DBG_FUNC("IN\n");
829 
830 	qid = (uint16_t)rcb->resp_qid;
831 
832 	tag = pqisrc_get_tag(&softs->taglist);
833 	rcb = &softs->rcb[tag];
834 	rcb->tag = tag;
835 	rcb->resp_qid = qid;
836 
837 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
838 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
839 
840 	if (PQI_STATUS_SUCCESS == rval) {
841 		rval = rcb->status;
842 		if (REQUEST_SUCCESS == rval) {
843 			ccb->ccb_h.status = CAM_REQ_ABORTED;
844 		}
845 	}
846 	pqisrc_put_tag(&softs->taglist, abort_tag);
847 	pqisrc_put_tag(&softs->taglist,rcb->tag);
848 
849 	DBG_FUNC("OUT rval = %d\n", rval);
850 
851 	return rval;
852 }
853 
854 /*
855  * Abort a taskset, task management functionality
856  */
857 static int
858 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
859 {
860 	rcb_t *rcb = NULL;
861 	uint32_t tag = 0;
862 	int rval = PQI_STATUS_SUCCESS;
863 
864 	DBG_FUNC("IN\n");
865 
866 	tag = pqisrc_get_tag(&softs->taglist);
867 	rcb = &softs->rcb[tag];
868 	rcb->tag = tag;
869 
870 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
871 			SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
872 
873 	if (rval == PQI_STATUS_SUCCESS) {
874 		rval = rcb->status;
875 	}
876 
877 	pqisrc_put_tag(&softs->taglist,rcb->tag);
878 
879 	DBG_FUNC("OUT rval = %d\n", rval);
880 
881 	return rval;
882 }
883 
884 /*
885  * Target reset task management functionality
886  */
887 static int
888 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
889 {
890 	pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
891 	rcb_t *rcb = NULL;
892 	uint32_t tag = 0;
893 	int rval = PQI_STATUS_SUCCESS;
894 
895 	DBG_FUNC("IN\n");
896 
897 	if (devp == NULL) {
898 		DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
899 		return (-1);
900 	}
901 
902 	tag = pqisrc_get_tag(&softs->taglist);
903 	rcb = &softs->rcb[tag];
904 	rcb->tag = tag;
905 
906 	devp->reset_in_progress = true;
907 	rval = pqisrc_send_tmf(softs, devp, rcb, 0,
908 		SOP_TASK_MANAGEMENT_LUN_RESET);
909 	if (PQI_STATUS_SUCCESS == rval) {
910 		rval = rcb->status;
911 	}
912 	devp->reset_in_progress = false;
913 	pqisrc_put_tag(&softs->taglist,rcb->tag);
914 
915 	DBG_FUNC("OUT rval = %d\n", rval);
916 
917 	return ((rval == REQUEST_SUCCESS) ?
918 		PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
919 }
920 
921 /*
922  * cam entry point of the smartpqi module.
923  */
924 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
925 {
926 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
927 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
928 
929 	DBG_FUNC("IN\n");
930 
931 	switch (ccb_h->func_code) {
932 		case XPT_SCSI_IO:
933 		{
934 			if(!pqisrc_io_start(sim, ccb)) {
935 				return;
936 			}
937 			break;
938 		}
939 		case XPT_CALC_GEOMETRY:
940 		{
941 			struct ccb_calc_geometry *ccg;
942 			ccg = &ccb->ccg;
943 			if (ccg->block_size == 0) {
944 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
945 				ccb->ccb_h.status = CAM_REQ_INVALID;
946 				break;
947 			}
948 			cam_calc_geometry(ccg, /* extended */ 1);
949 			ccb->ccb_h.status = CAM_REQ_CMP;
950 			break;
951 		}
952 		case XPT_PATH_INQ:
953 		{
954 			update_sim_properties(sim, &ccb->cpi);
955 			ccb->ccb_h.status = CAM_REQ_CMP;
956 			break;
957 		}
958 		case XPT_GET_TRAN_SETTINGS:
959 			get_transport_settings(softs, &ccb->cts);
960 			ccb->ccb_h.status = CAM_REQ_CMP;
961 			break;
962 		case XPT_ABORT:
963 			if(pqisrc_scsi_abort_task(softs,  ccb)) {
964 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
965 				xpt_done(ccb);
966 				DBG_ERR("Abort task failed on %d\n",
967 					ccb->ccb_h.target_id);
968 				return;
969 			}
970 			break;
971 		case XPT_TERM_IO:
972 			if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
973 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
974 				DBG_ERR("Abort task set failed on %d\n",
975 					ccb->ccb_h.target_id);
976 				xpt_done(ccb);
977 				return;
978 			}
979 			break;
980 		case XPT_RESET_DEV:
981 			if(pqisrc_target_reset(softs,  ccb)) {
982 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
983 				DBG_ERR("Target reset failed on %d\n",
984 					ccb->ccb_h.target_id);
985 				xpt_done(ccb);
986 				return;
987 			} else {
988 				ccb->ccb_h.status = CAM_REQ_CMP;
989 			}
990 			break;
991 		case XPT_RESET_BUS:
992 			ccb->ccb_h.status = CAM_REQ_CMP;
993 			break;
994 		case XPT_SET_TRAN_SETTINGS:
995 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
996 			return;
997 		default:
998 			DBG_WARN("UNSUPPORTED FUNC CODE\n");
999 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1000 			break;
1001 	}
1002 	xpt_done(ccb);
1003 
1004 	DBG_FUNC("OUT\n");
1005 }
1006 
1007 /*
1008  * Function to poll the response, when interrupts are unavailable
1009  * This also serves supporting crash dump.
1010  */
1011 static void smartpqi_poll(struct cam_sim *sim)
1012 {
1013 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1014 	int i;
1015 
1016 	for (i = 1; i < softs->intr_count; i++ )
1017 		pqisrc_process_response_queue(softs, i);
1018 }
1019 
1020 /*
1021  * Function to adjust the queue depth of a device
1022  */
1023 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1024 {
1025 	struct ccb_relsim crs;
1026 
1027 	DBG_INFO("IN\n");
1028 
1029 	xpt_setup_ccb(&crs.ccb_h, path, 5);
1030 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1031 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1032 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1033 	crs.openings = queue_depth;
1034 	xpt_action((union ccb *)&crs);
1035 	if(crs.ccb_h.status != CAM_REQ_CMP) {
1036 		printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1037 	}
1038 
1039 	DBG_INFO("OUT\n");
1040 }
1041 
1042 /*
1043  * Function to register async callback for setting queue depth
1044  */
1045 static void
1046 smartpqi_async(void *callback_arg, u_int32_t code,
1047 		struct cam_path *path, void *arg)
1048 {
1049 	struct pqisrc_softstate *softs;
1050 	softs = (struct pqisrc_softstate*)callback_arg;
1051 
1052 	DBG_FUNC("IN\n");
1053 
1054 	switch (code) {
1055 		case AC_FOUND_DEVICE:
1056 		{
1057 			struct ccb_getdev *cgd;
1058 			cgd = (struct ccb_getdev *)arg;
1059 			if (cgd == NULL) {
1060 				break;
1061 			}
1062 			uint32_t t_id = cgd->ccb_h.target_id;
1063 
1064 			if (t_id <= (PQI_CTLR_INDEX - 1)) {
1065 				if (softs != NULL) {
1066 					pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1067 					smartpqi_adjust_queue_depth(path,
1068 							dvp->queue_depth);
1069 				}
1070 			}
1071 			break;
1072 		}
1073 		default:
1074 			break;
1075 	}
1076 
1077 	DBG_FUNC("OUT\n");
1078 }
1079 
1080 /*
1081  * Function to register sim with CAM layer for smartpqi driver
1082  */
1083 int register_sim(struct pqisrc_softstate *softs, int card_index)
1084 {
1085 	int error = 0;
1086 	int max_transactions;
1087 	union ccb   *ccb = NULL;
1088 	cam_status status = 0;
1089 	struct ccb_setasync csa;
1090 	struct cam_sim *sim;
1091 
1092 	DBG_FUNC("IN\n");
1093 
1094 	max_transactions = softs->max_io_for_scsi_ml;
1095 	softs->os_specific.devq = cam_simq_alloc(max_transactions);
1096 	if (softs->os_specific.devq == NULL) {
1097 		DBG_ERR("cam_simq_alloc failed txns = %d\n",
1098 			max_transactions);
1099 		return PQI_STATUS_FAILURE;
1100 	}
1101 
1102 	sim = cam_sim_alloc(smartpqi_cam_action, \
1103 				smartpqi_poll, "smartpqi", softs, \
1104 				card_index, &softs->os_specific.cam_lock, \
1105 				1, max_transactions, softs->os_specific.devq);
1106 	if (sim == NULL) {
1107 		DBG_ERR("cam_sim_alloc failed txns = %d\n",
1108 			max_transactions);
1109 		cam_simq_free(softs->os_specific.devq);
1110 		return PQI_STATUS_FAILURE;
1111 	}
1112 
1113 	softs->os_specific.sim = sim;
1114 	mtx_lock(&softs->os_specific.cam_lock);
1115 	status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1116 	if (status != CAM_SUCCESS) {
1117 		DBG_ERR("xpt_bus_register failed status=%d\n", status);
1118 		cam_sim_free(softs->os_specific.sim, FALSE);
1119 		cam_simq_free(softs->os_specific.devq);
1120 		mtx_unlock(&softs->os_specific.cam_lock);
1121 		return PQI_STATUS_FAILURE;
1122 	}
1123 
1124 	softs->os_specific.sim_registered = TRUE;
1125 	ccb = xpt_alloc_ccb_nowait();
1126 	if (ccb == NULL) {
1127 		DBG_ERR("xpt_create_path failed\n");
1128 		return PQI_STATUS_FAILURE;
1129 	}
1130 
1131 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
1132 			cam_sim_path(softs->os_specific.sim),
1133 			CAM_TARGET_WILDCARD,
1134 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1135 		DBG_ERR("xpt_create_path failed\n");
1136 		xpt_free_ccb(ccb);
1137 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1138 		cam_sim_free(softs->os_specific.sim, TRUE);
1139 		mtx_unlock(&softs->os_specific.cam_lock);
1140 		return PQI_STATUS_FAILURE;
1141 	}
1142 	/*
1143  	 * Callback to set the queue depth per target which is
1144 	 * derived from the FW.
1145  	 */
1146 	softs->os_specific.path = ccb->ccb_h.path;
1147 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1148 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1149 	csa.event_enable = AC_FOUND_DEVICE;
1150 	csa.callback = smartpqi_async;
1151 	csa.callback_arg = softs;
1152 	xpt_action((union ccb *)&csa);
1153 	if (csa.ccb_h.status != CAM_REQ_CMP) {
1154 		DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1155 			csa.ccb_h.status);
1156 	}
1157 
1158 	mtx_unlock(&softs->os_specific.cam_lock);
1159 	DBG_INFO("OUT\n");
1160 	return error;
1161 }
1162 
1163 /*
1164  * Function to deregister smartpqi sim from cam layer
1165  */
1166 void deregister_sim(struct pqisrc_softstate *softs)
1167 {
1168 	struct ccb_setasync csa;
1169 
1170 	DBG_FUNC("IN\n");
1171 
1172 	if (softs->os_specific.mtx_init) {
1173 		mtx_lock(&softs->os_specific.cam_lock);
1174 	}
1175 
1176 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1177 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1178 	csa.event_enable = 0;
1179 	csa.callback = smartpqi_async;
1180 	csa.callback_arg = softs;
1181 	xpt_action((union ccb *)&csa);
1182 	xpt_free_path(softs->os_specific.path);
1183 
1184 	xpt_release_simq(softs->os_specific.sim, 0);
1185 
1186 	xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1187 	softs->os_specific.sim_registered = FALSE;
1188 
1189 	if (softs->os_specific.sim) {
1190 		cam_sim_free(softs->os_specific.sim, FALSE);
1191 		softs->os_specific.sim = NULL;
1192 	}
1193 	if (softs->os_specific.mtx_init) {
1194 		mtx_unlock(&softs->os_specific.cam_lock);
1195 	}
1196 	if (softs->os_specific.devq != NULL) {
1197 		cam_simq_free(softs->os_specific.devq);
1198 	}
1199 	if (softs->os_specific.mtx_init) {
1200 		mtx_destroy(&softs->os_specific.cam_lock);
1201 		softs->os_specific.mtx_init = FALSE;
1202 	}
1203 
1204 	mtx_destroy(&softs->os_specific.map_lock);
1205 
1206 	DBG_FUNC("OUT\n");
1207 }
1208