xref: /freebsd/sys/dev/smartpqi/smartpqi_cam.c (revision 525fe93dc7487a1e63a90f6a2b956abc601963c1)
1 /*-
2  * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * CAM interface for smartpqi driver
28  */
29 
30 #include "smartpqi_includes.h"
31 
32 /*
33  * Set cam sim properties of the smartpqi adapter.
34  */
35 static void
36 update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
37 {
38 
39 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
40 					cam_sim_softc(sim);
41 
42 	device_t dev = softs->os_specific.pqi_dev;
43 
44 	DBG_FUNC("IN\n");
45 
46 	cpi->version_num = 1;
47 	cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
48 	cpi->target_sprt = 0;
49 	cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
50 	cpi->hba_eng_cnt = 0;
51 	cpi->max_lun = PQI_MAX_MULTILUN;
52 	cpi->max_target = MAX_TARGET_DEVICES;
53 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
54 	cpi->initiator_id = 255;
55 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN-1);
56 	cpi->sim_vid[sizeof(cpi->sim_vid)-1] = '\0';
57 	strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN-1);
58 	cpi->hba_vid[sizeof(cpi->hba_vid)-1] = '\0';
59 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN-1);
60 	cpi->dev_name[sizeof(cpi->dev_name)-1] = '\0';
61 	cpi->unit_number = cam_sim_unit(sim);
62 	cpi->bus_id = cam_sim_bus(sim);
63 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
64 	cpi->protocol = PROTO_SCSI;
65 	cpi->protocol_version = SCSI_REV_SPC4;
66 	cpi->transport = XPORT_SPI;
67 	cpi->transport_version = 2;
68 	cpi->ccb_h.status = CAM_REQ_CMP;
69 	cpi->hba_vendor = pci_get_vendor(dev);
70 	cpi->hba_device = pci_get_device(dev);
71 	cpi->hba_subvendor = pci_get_subvendor(dev);
72 	cpi->hba_subdevice = pci_get_subdevice(dev);
73 
74 
75 	DBG_FUNC("OUT\n");
76 }
77 
78 /*
79  * Get transport settings of the smartpqi adapter.
80  */
81 static void
82 get_transport_settings(struct pqisrc_softstate *softs,
83 		struct ccb_trans_settings *cts)
84 {
85 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
86 	struct ccb_trans_settings_sas	*sas = &cts->xport_specific.sas;
87 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
88 
89 	DBG_FUNC("IN\n");
90 
91 	cts->protocol = PROTO_SCSI;
92 	cts->protocol_version = SCSI_REV_SPC4;
93 	cts->transport = XPORT_SPI;
94 	cts->transport_version = 2;
95 	spi->valid = CTS_SPI_VALID_DISC;
96 	spi->flags = CTS_SPI_FLAGS_DISC_ENB;
97 	scsi->valid = CTS_SCSI_VALID_TQ;
98 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
99 	sas->valid = CTS_SAS_VALID_SPEED;
100 	cts->ccb_h.status = CAM_REQ_CMP;
101 
102 	DBG_FUNC("OUT\n");
103 }
104 
105 /*
106  *  Add the target to CAM layer and rescan, when a new device is found
107  */
108 void
109 os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
110 {
111 	union ccb *ccb;
112 	uint64_t lun;
113 
114 	DBG_FUNC("IN\n");
115 
116 	lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
117 	if(softs->os_specific.sim_registered) {
118 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
119 			DBG_ERR("rescan failed (can't allocate CCB)\n");
120 			return;
121 		}
122 
123 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
124 			cam_sim_path(softs->os_specific.sim),
125 			device->target, lun) != CAM_REQ_CMP) {
126 			DBG_ERR("rescan failed (can't create path)\n");
127 			xpt_free_ccb(ccb);
128 			return;
129 		}
130 		xpt_rescan(ccb);
131 	}
132 
133 	DBG_FUNC("OUT\n");
134 }
135 
136 /*
137  * Remove the device from CAM layer when deleted or hot removed
138  */
139 void
140 os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
141 {
142 	struct cam_path *tmppath = NULL;
143 	uint64_t lun;
144 
145 	DBG_FUNC("IN\n");
146 
147 	lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun;
148 	if(softs->os_specific.sim_registered) {
149 		if (xpt_create_path(&tmppath, NULL,
150 			cam_sim_path(softs->os_specific.sim),
151 			device->target, lun) != CAM_REQ_CMP) {
152 			DBG_ERR("unable to create path for async event\n");
153 			return;
154 		}
155 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
156 		xpt_free_path(tmppath);
157 		/* softs->device_list[device->target][device->lun] = NULL; */
158 		int index = pqisrc_find_device_list_index(softs,device);
159 		if (index >= 0 && index < PQI_MAX_DEVICES)
160 			softs->dev_list[index] = NULL;
161 		pqisrc_free_device(softs, device);
162 	}
163 
164 	DBG_FUNC("OUT\n");
165 
166 }
167 
168 /*
169  * Function to release the frozen simq
170  */
171 static void
172 pqi_release_camq(rcb_t *rcb)
173 {
174 	pqisrc_softstate_t *softs;
175 	struct ccb_scsiio *csio;
176 
177 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
178 	softs = rcb->softs;
179 
180 	DBG_FUNC("IN\n");
181 
182 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
183 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
184 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
185 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
186 		else
187 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
188 	}
189 
190 	DBG_FUNC("OUT\n");
191 }
192 
193 static void
194 pqi_synch_request(rcb_t *rcb)
195 {
196 	pqisrc_softstate_t *softs = rcb->softs;
197 
198 	DBG_IO("IN rcb = %p\n", rcb);
199 
200 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
201 		return;
202 
203 	if (rcb->bcount != 0 ) {
204 		if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
205 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
206 					rcb->cm_datamap,BUS_DMASYNC_POSTREAD);
207 		if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
208 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
209 					rcb->cm_datamap,BUS_DMASYNC_POSTWRITE);
210 		bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
211 			rcb->cm_datamap);
212 	}
213 	rcb->cm_flags &= ~PQI_CMD_MAPPED;
214 
215 	if(rcb->sgt && rcb->nseg)
216 		os_mem_free(rcb->softs, (void*)rcb->sgt,
217 			rcb->nseg*sizeof(sgt_t));
218 
219 	DBG_IO("OUT\n");
220 }
221 
222 /*
223  * Function to dma-unmap the completed request
224  */
225 static inline void
226 pqi_unmap_request(rcb_t *rcb)
227 {
228 	DBG_IO("IN rcb = %p\n", rcb);
229 
230 	pqi_synch_request(rcb);
231 	pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
232 
233 	DBG_IO("OUT\n");
234 }
235 
236 /*
237  * Construct meaningful LD name for volume here.
238  */
239 static void
240 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
241 {
242 	struct scsi_inquiry_data *inq = NULL;
243 	uint8_t *cdb = NULL;
244 	pqi_scsi_dev_t *device = NULL;
245 
246 	DBG_FUNC("IN\n");
247 
248 	if (pqisrc_ctrl_offline(softs))
249 		return;
250 
251  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
252 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
253 
254 	if(cdb[0] == INQUIRY &&
255 		(cdb[1] & SI_EVPD) == 0 &&
256 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
257 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
258 
259 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
260 
261 		/* device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; */
262 		int target = csio->ccb_h.target_id;
263 		int lun = csio->ccb_h.target_lun;
264 		int index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
265 		if (index != INVALID_ELEM)
266 			device = softs->dev_list[index];
267 
268 		/* Let the disks be probed and dealt with via CAM. Only for LD
269 		  let it fall through and inquiry be tweaked */
270 		if( !device || 	!pqisrc_is_logical_device(device) ||
271 				(device->devtype != DISK_DEVICE)  ||
272 				pqisrc_is_external_raid_device(device)) {
273  	 		return;
274 		}
275 
276 		strncpy(inq->vendor, device->vendor,
277 				SID_VENDOR_SIZE-1);
278 		inq->vendor[sizeof(inq->vendor)-1] = '\0';
279 		strncpy(inq->product,
280 				pqisrc_raidlevel_to_string(device->raid_level),
281 				SID_PRODUCT_SIZE-1);
282 		inq->product[sizeof(inq->product)-1] = '\0';
283 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
284 				SID_REVISION_SIZE-1);
285 		inq->revision[sizeof(inq->revision)-1] = '\0';
286     	}
287 
288 	DBG_FUNC("OUT\n");
289 }
290 
291 static void
292 pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
293 {
294 	uint32_t release_tag;
295 	pqisrc_softstate_t *softs = rcb->softs;
296 
297 	DBG_IO("IN scsi io = %p\n", csio);
298 
299 	pqi_synch_request(rcb);
300 	smartpqi_fix_ld_inquiry(rcb->softs, csio);
301 	pqi_release_camq(rcb);
302 	release_tag = rcb->tag;
303 	os_reset_rcb(rcb);
304 	pqisrc_put_tag(&softs->taglist, release_tag);
305 	xpt_done((union ccb *)csio);
306 
307 	DBG_FUNC("OUT\n");
308 }
309 
310 /*
311  * Handle completion of a command - pass results back through the CCB
312  */
313 void
314 os_io_response_success(rcb_t *rcb)
315 {
316 	struct ccb_scsiio *csio;
317 
318 	DBG_IO("IN rcb = %p\n", rcb);
319 
320 	if (rcb == NULL)
321 		panic("rcb is null");
322 
323 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
324 
325 	if (csio == NULL)
326 		panic("csio is null");
327 
328 	rcb->status = PQI_STATUS_SUCCESS;
329 	csio->ccb_h.status = CAM_REQ_CMP;
330 
331 	pqi_complete_scsi_io(csio, rcb);
332 
333 	DBG_IO("OUT\n");
334 }
335 
336 static void
337 copy_sense_data_to_csio(struct ccb_scsiio *csio,
338 		uint8_t *sense_data, uint16_t sense_data_len)
339 {
340 	DBG_IO("IN csio = %p\n", csio);
341 
342 	memset(&csio->sense_data, 0, csio->sense_len);
343 
344 	sense_data_len = (sense_data_len > csio->sense_len) ?
345 		csio->sense_len : sense_data_len;
346 
347 	if (sense_data)
348 		memcpy(&csio->sense_data, sense_data, sense_data_len);
349 
350 	if (csio->sense_len > sense_data_len)
351 		csio->sense_resid = csio->sense_len - sense_data_len;
352 	else
353 		csio->sense_resid = 0;
354 
355 	DBG_IO("OUT\n");
356 }
357 
358 /*
359  * Error response handling for raid IO
360  */
361 void
362 os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
363 {
364 	struct ccb_scsiio *csio;
365 	pqisrc_softstate_t *softs;
366 
367 	DBG_IO("IN\n");
368 
369 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
370 
371 	if (csio == NULL)
372 		panic("csio is null");
373 
374 	softs = rcb->softs;
375 
376 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
377 
378 	if (!err_info || !rcb->dvp) {
379 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
380 				err_info, rcb->dvp);
381 		goto error_out;
382 	}
383 
384 	csio->scsi_status = err_info->status;
385 
386 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
387 		/*
388 		 * Handle specific SCSI status values.
389 		 */
390 		switch(csio->scsi_status) {
391 			case PQI_RAID_STATUS_QUEUE_FULL:
392 				csio->ccb_h.status = CAM_REQ_CMP;
393 				DBG_ERR("Queue Full error\n");
394 				break;
395 				/* check condition, sense data included */
396 			case PQI_RAID_STATUS_CHECK_CONDITION:
397 				{
398 					uint16_t sense_data_len =
399 						LE_16(err_info->sense_data_len);
400 					uint8_t *sense_data = NULL;
401 					if (sense_data_len)
402 						sense_data = err_info->data;
403 
404 					copy_sense_data_to_csio(csio, sense_data, sense_data_len);
405 					csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
406 						| CAM_AUTOSNS_VALID
407 						| CAM_REQ_CMP_ERR;
408 
409 				}
410 				break;
411 
412 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
413 				{
414 					uint32_t resid = 0;
415 					resid = rcb->bcount-err_info->data_out_transferred;
416 					csio->resid  = resid;
417 					csio->ccb_h.status = CAM_REQ_CMP;
418 				}
419 				break;
420 			default:
421 				csio->ccb_h.status = CAM_REQ_CMP;
422 				break;
423 		}
424 	}
425 
426 error_out:
427 	pqi_complete_scsi_io(csio, rcb);
428 
429 	DBG_IO("OUT\n");
430 }
431 
432 /*
433  * Error response handling for aio.
434  */
435 void
436 os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
437 {
438 	struct ccb_scsiio *csio;
439 	pqisrc_softstate_t *softs;
440 
441 	DBG_IO("IN\n");
442 
443 	if (rcb == NULL)
444 		panic("rcb is null");
445 
446 	rcb->status = PQI_STATUS_SUCCESS;
447 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
448 	if (csio == NULL)
449                 panic("csio is null");
450 
451 	softs = rcb->softs;
452 
453 	if (!err_info || !rcb->dvp) {
454 		csio->ccb_h.status = CAM_REQ_CMP_ERR;
455 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
456 				err_info, rcb->dvp);
457 		goto error_out;
458 	}
459 
460 	switch (err_info->service_resp) {
461 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
462 			csio->ccb_h.status = err_info->status;
463 			break;
464 		case PQI_AIO_SERV_RESPONSE_FAILURE:
465 			switch(err_info->status) {
466 				case PQI_AIO_STATUS_IO_ABORTED:
467 					csio->ccb_h.status = CAM_REQ_ABORTED;
468 					DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
469 					break;
470 				case PQI_AIO_STATUS_UNDERRUN:
471 					csio->ccb_h.status = CAM_REQ_CMP;
472 					csio->resid =
473 						LE_32(err_info->resd_count);
474 					break;
475 				case PQI_AIO_STATUS_OVERRUN:
476 					csio->ccb_h.status = CAM_REQ_CMP;
477 					break;
478 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
479 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
480 					/* Timed out TMF response comes here */
481 					if (rcb->tm_req) {
482 						rcb->req_pending = false;
483 						rcb->status = PQI_STATUS_SUCCESS;
484 						DBG_ERR("AIO Disabled for TMF\n");
485 						return;
486 					}
487 					rcb->dvp->aio_enabled = false;
488 					rcb->dvp->offload_enabled = false;
489 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
490 					break;
491 				case PQI_AIO_STATUS_IO_ERROR:
492 				case PQI_AIO_STATUS_IO_NO_DEVICE:
493 				case PQI_AIO_STATUS_INVALID_DEVICE:
494 				default:
495 					DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
496 					csio->ccb_h.status |=
497 						CAM_SCSI_STATUS_ERROR;
498 					break;
499 			}
500 			break;
501 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
502 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
503 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
504 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
505 			rcb->status = PQI_STATUS_SUCCESS;
506 			rcb->req_pending = false;
507 			return;
508 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
509 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
510 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
511 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
512 			rcb->status = PQI_STATUS_TIMEOUT;
513 			rcb->req_pending = false;
514 			return;
515 		default:
516 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
517 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
518 			break;
519 	}
520 
521 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
522 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
523 		uint8_t *sense_data = NULL;
524 		unsigned sense_data_len = LE_16(err_info->data_len);
525 		if (sense_data_len)
526 			sense_data = err_info->data;
527 		DBG_INFO("SCSI_STATUS_CHECK_COND  sense size %u\n",
528 			sense_data_len);
529 		copy_sense_data_to_csio(csio, sense_data, sense_data_len);
530 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
531 	}
532 
533 error_out:
534 	pqi_complete_scsi_io(csio, rcb);
535 	DBG_IO("OUT\n");
536 }
537 
538 static void
539 pqi_freeze_ccb(union ccb *ccb)
540 {
541 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
542 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
543 		xpt_freeze_devq(ccb->ccb_h.path, 1);
544 	}
545 }
546 
547 /*
548  * Command-mapping helper function - populate this command's s/g table.
549  */
550 static void
551 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
552 {
553 	rcb_t *rcb = (rcb_t *)arg;
554 	pqisrc_softstate_t *softs = rcb->softs;
555 	union ccb *ccb;
556 
557 	if (error || nseg > softs->pqi_cap.max_sg_elem)
558 	{
559 		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%u)\n",
560 			error, nseg, softs->pqi_cap.max_sg_elem);
561 		goto error_io;
562 	}
563 
564 	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
565 
566 	if (!rcb->sgt) {
567 		DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
568 		goto error_io;
569 	}
570 
571 	rcb->nseg = nseg;
572 	for (int i = 0; i < nseg; i++) {
573 		rcb->sgt[i].addr = segs[i].ds_addr;
574 		rcb->sgt[i].len = segs[i].ds_len;
575 		rcb->sgt[i].flags = 0;
576 	}
577 
578 	if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
579                 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
580                         rcb->cm_datamap, BUS_DMASYNC_PREREAD);
581 	if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
582                 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
583                         rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
584 
585 	/* Call IO functions depending on pd or ld */
586 	rcb->status = PQI_STATUS_FAILURE;
587 
588 	error = pqisrc_build_send_io(softs, rcb);
589 
590 	if (error) {
591 		rcb->req_pending = false;
592 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
593 	} else {
594 		/* Successfully IO was submitted to the device. */
595 		return;
596 	}
597 
598 error_io:
599 	ccb = rcb->cm_ccb;
600 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
601 	pqi_freeze_ccb(ccb);
602 	pqi_unmap_request(rcb);
603 	xpt_done(ccb);
604 	return;
605 }
606 
607 /*
608  * Function to dma-map the request buffer
609  */
610 static int
611 pqi_map_request(rcb_t *rcb)
612 {
613 	pqisrc_softstate_t *softs = rcb->softs;
614 	int bsd_status = BSD_SUCCESS;
615 	union ccb *ccb = rcb->cm_ccb;
616 
617 	DBG_FUNC("IN\n");
618 
619 	/* check that mapping is necessary */
620 	if (rcb->cm_flags & PQI_CMD_MAPPED)
621 		return BSD_SUCCESS;
622 
623 	rcb->cm_flags |= PQI_CMD_MAPPED;
624 
625 	if (rcb->bcount) {
626 		bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
627 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
628 		if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
629 			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %u\n",
630 					bsd_status, rcb->bcount);
631 			return bsd_status;
632 		}
633 	} else {
634 		/*
635 		 * Set up the command to go to the controller.  If there are no
636 		 * data buffers associated with the command then it can bypass
637 		 * busdma.
638 		 */
639 		/* Call IO functions depending on pd or ld */
640 		rcb->status = PQI_STATUS_FAILURE;
641 
642 		if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
643 			bsd_status = EIO;
644 		}
645 	}
646 
647 	DBG_FUNC("OUT error = %d\n", bsd_status);
648 
649 	return bsd_status;
650 }
651 
652 /*
653  * Function to clear the request control block
654  */
655 void
656 os_reset_rcb(rcb_t *rcb)
657 {
658 	rcb->error_info = NULL;
659 	rcb->req = NULL;
660 	rcb->status = -1;
661 	rcb->tag = INVALID_ELEM;
662 	rcb->dvp = NULL;
663 	rcb->cdbp = NULL;
664 	rcb->softs = NULL;
665 	rcb->cm_flags = 0;
666 	rcb->cm_data = NULL;
667 	rcb->bcount = 0;
668 	rcb->nseg = 0;
669 	rcb->sgt = NULL;
670 	rcb->cm_ccb = NULL;
671 	rcb->encrypt_enable = false;
672 	rcb->ioaccel_handle = 0;
673 	rcb->resp_qid = 0;
674 	rcb->req_pending = false;
675 	rcb->tm_req = false;
676 }
677 
678 /*
679  * Callback function for the lun rescan
680  */
681 static void
682 smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
683 {
684         xpt_free_path(ccb->ccb_h.path);
685         xpt_free_ccb(ccb);
686 }
687 
688 
689 /*
690  * Function to rescan the lun
691  */
692 static void
693 smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
694 			int lun)
695 {
696 	union ccb *ccb = NULL;
697 	cam_status status = 0;
698 	struct cam_path *path = NULL;
699 
700 	DBG_FUNC("IN\n");
701 
702 	ccb = xpt_alloc_ccb_nowait();
703 	if (ccb == NULL) {
704 		DBG_ERR("Unable to alloc ccb for lun rescan\n");
705 		return;
706 	}
707 
708 	status = xpt_create_path(&path, NULL,
709 				cam_sim_path(softs->os_specific.sim), target, lun);
710 	if (status != CAM_REQ_CMP) {
711 		DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
712 				 status);
713 		xpt_free_ccb(ccb);
714 		return;
715 	}
716 
717 	memset(ccb, 0, sizeof(union ccb));
718 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
719 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
720 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
721 	ccb->crcn.flags = CAM_FLAG_NONE;
722 
723 	xpt_action(ccb);
724 
725 	DBG_FUNC("OUT\n");
726 }
727 
728 /*
729  * Function to rescan the lun under each target
730  */
731 void
732 smartpqi_target_rescan(struct pqisrc_softstate *softs)
733 {
734 	pqi_scsi_dev_t *device;
735 	int index;
736 
737 	DBG_FUNC("IN\n");
738 
739 	for(index = 0; index < PQI_MAX_DEVICES; index++){
740 		/* if(softs->device_list[target][lun]){ */
741 		if(softs->dev_list[index] != NULL) {
742 			device = softs->dev_list[index];
743 			DBG_INFO("calling smartpqi_lun_rescan with TL = %d:%d\n",device->target,device->lun);
744 			smartpqi_lun_rescan(softs, device->target, device->lun);
745 		}
746 	}
747 
748 	DBG_FUNC("OUT\n");
749 }
750 
751 /*
752  * Set the mode of tagged command queueing for the current task.
753  */
754 uint8_t
755 os_get_task_attr(rcb_t *rcb)
756 {
757 	union ccb *ccb = rcb->cm_ccb;
758 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
759 
760 	switch(ccb->csio.tag_action) {
761 	case MSG_HEAD_OF_Q_TAG:
762 		tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
763 		break;
764 	case MSG_ORDERED_Q_TAG:
765 		tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
766 		break;
767 	case MSG_SIMPLE_Q_TAG:
768 	default:
769 		tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
770 		break;
771 	}
772 	return tag_action;
773 }
774 
775 /*
776  * Complete all outstanding commands
777  */
778 void
779 os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
780 {
781 	int tag = 0;
782 	pqi_scsi_dev_t	*dvp = NULL;
783 
784 	DBG_FUNC("IN\n");
785 
786 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
787 		rcb_t *prcb = &softs->rcb[tag];
788 		dvp = prcb->dvp;
789 		if(prcb->req_pending && prcb->cm_ccb ) {
790 			prcb->req_pending = false;
791 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
792 			pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
793 			if (dvp)
794 				pqisrc_decrement_device_active_io(softs, dvp);
795 		}
796 	}
797 
798 	DBG_FUNC("OUT\n");
799 }
800 
801 /*
802  * IO handling functionality entry point
803  */
804 static int
805 pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
806 {
807 	rcb_t *rcb;
808 	uint32_t tag;
809 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
810 					cam_sim_softc(sim);
811 	int32_t error;
812 	pqi_scsi_dev_t *dvp;
813 	int target, lun, index;
814 
815 	DBG_FUNC("IN\n");
816 
817 	/* if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { */
818 	target = ccb->ccb_h.target_id;
819 	lun = ccb->ccb_h.target_lun;
820 	index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun);
821 
822 	if (index == INVALID_ELEM) {
823 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
824 		DBG_INFO("Invalid index/device!!!, Device BTL %u:%d:%d\n", softs->bus_id, target, lun);
825 		return ENXIO;
826 	}
827 
828 	if( softs->dev_list[index] == NULL ) {
829 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
830 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
831 		return ENXIO;
832 	}
833 
834 	/* DBG_INFO("starting IO on BTL = %d:%d:%d index = %d\n",softs->bus_id,target,lun,index); */
835 
836 	/* dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
837 	dvp = softs->dev_list[index];
838 	/* Check  controller state */
839 	if (IN_PQI_RESET(softs)) {
840 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
841 					| CAM_BUSY | CAM_REQ_INPROG;
842 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
843 		return ENXIO;
844 	}
845 	/* Check device state */
846 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
847 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
848 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
849 		return ENXIO;
850 	}
851 	/* Check device reset */
852 	if (DEVICE_RESET(dvp)) {
853 		ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
854 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
855 		return EBUSY;
856 	}
857 
858 	if (dvp->expose_device == false) {
859 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
860 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
861 		return ENXIO;
862 	}
863 
864 	tag = pqisrc_get_tag(&softs->taglist);
865 	if( tag == INVALID_ELEM ) {
866 		DBG_ERR("Get Tag failed\n");
867 		xpt_freeze_simq(softs->os_specific.sim, 1);
868 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
869 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
870 		return EIO;
871 	}
872 
873 	DBG_IO("tag = %u &softs->taglist : %p\n", tag, &softs->taglist);
874 
875 	rcb = &softs->rcb[tag];
876 	os_reset_rcb(rcb);
877 	rcb->tag = tag;
878 	rcb->softs = softs;
879 	rcb->cmdlen = ccb->csio.cdb_len;
880 	ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
881 
882 	rcb->cm_ccb = ccb;
883 	/* rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
884 	rcb->dvp = softs->dev_list[index];
885 
886 	rcb->cm_data = (void *)ccb->csio.data_ptr;
887 	rcb->bcount = ccb->csio.dxfer_len;
888 
889 	/*
890 	 * Submit the request to the adapter.
891 	 *
892 	 * Note that this may fail if we're unable to map the request (and
893 	 * if we ever learn a transport layer other than simple, may fail
894 	 * if the adapter rejects the command).
895 	 */
896 	if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
897 		xpt_freeze_simq(softs->os_specific.sim, 1);
898 		if (error == EINPROGRESS) {
899 			/* Release simq in the completion */
900 			softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
901 			error = BSD_SUCCESS;
902 		} else {
903 			rcb->req_pending = false;
904 			ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
905 			DBG_WARN("Requeue req error = %d target = %d\n", error,
906 				ccb->ccb_h.target_id);
907 			pqi_unmap_request(rcb);
908 			error = EIO;
909 		}
910 	}
911 
912 	DBG_FUNC("OUT error = %d\n", error);
913 
914 	return error;
915 }
916 
917 static inline int
918 pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
919 {
920 	if (PQI_STATUS_SUCCESS == pqi_status &&
921 			PQI_STATUS_SUCCESS == rcb->status)
922 		return BSD_SUCCESS;
923 	else
924 		return EIO;
925 }
926 
927 /*
928  * Abort a task, task management functionality
929  */
930 static int
931 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
932 {
933 	rcb_t *rcb = NULL;
934 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
935 	rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
936 	uint32_t tag;
937 	int rval;
938 
939 	DBG_FUNC("IN\n");
940 
941 	tag = pqisrc_get_tag(&softs->taglist);
942 	rcb = &softs->rcb[tag];
943 	rcb->tag = tag;
944 
945 	if (rcb->dvp == NULL) {
946 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
947 		rval = ENXIO;
948 		goto error_tmf;
949 	}
950 
951 	rcb->tm_req = true;
952 
953 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
954 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
955 
956 	if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
957 		ccb->ccb_h.status = CAM_REQ_ABORTED;
958 
959 error_tmf:
960 	os_reset_rcb(rcb);
961 	pqisrc_put_tag(&softs->taglist, tag);
962 
963 	DBG_FUNC("OUT rval = %d\n", rval);
964 
965 	return rval;
966 }
967 
968 /*
969  * Abort a taskset, task management functionality
970  */
971 static int
972 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
973 {
974 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
975 	rcb_t *rcb = NULL;
976 	uint32_t tag;
977 	int rval;
978 
979 	DBG_FUNC("IN\n");
980 
981 	tag = pqisrc_get_tag(&softs->taglist);
982 	rcb = &softs->rcb[tag];
983 	rcb->tag = tag;
984 	rcb->cm_ccb = ccb;
985 
986 	if (rcb->dvp == NULL) {
987 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
988 		rval = ENXIO;
989 		goto error_tmf;
990 	}
991 
992 	rcb->tm_req = true;
993 
994 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
995 			SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
996 
997 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
998 
999 error_tmf:
1000 	os_reset_rcb(rcb);
1001 	pqisrc_put_tag(&softs->taglist, tag);
1002 
1003 	DBG_FUNC("OUT rval = %d\n", rval);
1004 
1005 	return rval;
1006 }
1007 
1008 /*
1009  * Target reset task management functionality
1010  */
1011 static int
1012 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
1013 {
1014 
1015 	/* pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */
1016 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
1017 	rcb_t *rcb = NULL;
1018 	uint32_t tag;
1019 	int rval;
1020 
1021 	int bus, target, lun;
1022 	int index;
1023 
1024 	DBG_FUNC("IN\n");
1025 
1026 	bus = softs->bus_id;
1027 	target = ccb->ccb_h.target_id;
1028 	lun = ccb->ccb_h.target_lun;
1029 
1030 	index = pqisrc_find_btl_list_index(softs,bus,target,lun);
1031 	if (index == INVALID_ELEM) {
1032 		DBG_ERR("device not found at BTL %d:%d:%d\n",bus,target,lun);
1033 		return (-1);
1034 	}
1035 
1036 	pqi_scsi_dev_t *devp = softs->dev_list[index];
1037 	if (devp == NULL) {
1038 		DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
1039 		return (-1);
1040 	}
1041 
1042 	tag = pqisrc_get_tag(&softs->taglist);
1043 	rcb = &softs->rcb[tag];
1044 	rcb->tag = tag;
1045 	rcb->cm_ccb = ccb;
1046 
1047 	rcb->tm_req = true;
1048 
1049 	rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
1050 		SOP_TASK_MANAGEMENT_LUN_RESET);
1051 
1052 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
1053 
1054 	devp->reset_in_progress = false;
1055 
1056 	os_reset_rcb(rcb);
1057 	pqisrc_put_tag(&softs->taglist, tag);
1058 
1059 	DBG_FUNC("OUT rval = %d\n", rval);
1060 
1061 	return rval;
1062 
1063 }
1064 
1065 /*
1066  * cam entry point of the smartpqi module.
1067  */
1068 static void
1069 smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
1070 {
1071 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1072 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
1073 
1074 	DBG_FUNC("IN\n");
1075 
1076 	switch (ccb_h->func_code) {
1077 		case XPT_SCSI_IO:
1078 		{
1079 			if(!pqisrc_io_start(sim, ccb)) {
1080 				return;
1081 			}
1082 			break;
1083 		}
1084 		case XPT_CALC_GEOMETRY:
1085 		{
1086 			struct ccb_calc_geometry *ccg;
1087 			ccg = &ccb->ccg;
1088 			if (ccg->block_size == 0) {
1089 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1090 				ccb->ccb_h.status |= CAM_REQ_INVALID;
1091 				break;
1092 			}
1093 			cam_calc_geometry(ccg, /* extended */ 1);
1094 			ccb->ccb_h.status = CAM_REQ_CMP;
1095 			break;
1096 		}
1097 		case XPT_PATH_INQ:
1098 		{
1099 			update_sim_properties(sim, &ccb->cpi);
1100 			ccb->ccb_h.status = CAM_REQ_CMP;
1101 			break;
1102 		}
1103 		case XPT_GET_TRAN_SETTINGS:
1104 			get_transport_settings(softs, &ccb->cts);
1105 			ccb->ccb_h.status = CAM_REQ_CMP;
1106 			break;
1107 		case XPT_ABORT:
1108 			if(pqisrc_scsi_abort_task(softs,  ccb)) {
1109 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1110 				xpt_done(ccb);
1111 				DBG_ERR("Abort task failed on %d\n",
1112 					ccb->ccb_h.target_id);
1113 				return;
1114 			}
1115 			break;
1116 		case XPT_TERM_IO:
1117 			if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
1118 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1119 				DBG_ERR("Abort task set failed on %d\n",
1120 					ccb->ccb_h.target_id);
1121 				xpt_done(ccb);
1122 				return;
1123 			}
1124 			break;
1125 		case XPT_RESET_DEV:
1126 			if(pqisrc_target_reset(softs,  ccb)) {
1127 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1128 				DBG_ERR("Target reset failed on %d\n",
1129 					ccb->ccb_h.target_id);
1130 				xpt_done(ccb);
1131 				return;
1132 			} else {
1133 				ccb->ccb_h.status = CAM_REQ_CMP;
1134 			}
1135 			break;
1136 		case XPT_RESET_BUS:
1137 			ccb->ccb_h.status = CAM_REQ_CMP;
1138 			break;
1139 		case XPT_SET_TRAN_SETTINGS:
1140 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1141 			return;
1142 		default:
1143 			DBG_WARN("UNSUPPORTED FUNC CODE\n");
1144 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1145 			break;
1146 	}
1147 	xpt_done(ccb);
1148 
1149 	DBG_FUNC("OUT\n");
1150 }
1151 
1152 /*
1153  * Function to poll the response, when interrupts are unavailable
1154  * This also serves supporting crash dump.
1155  */
1156 static void
1157 smartpqi_poll(struct cam_sim *sim)
1158 {
1159 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1160 	int i;
1161 
1162 	for (i = 1; i < softs->intr_count; i++ )
1163 		pqisrc_process_response_queue(softs, i);
1164 }
1165 
1166 /*
1167  * Function to adjust the queue depth of a device
1168  */
1169 void
1170 smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1171 {
1172 	struct ccb_relsim crs;
1173 
1174 	DBG_FUNC("IN\n");
1175 
1176 	memset(&crs, 0, sizeof(struct ccb_relsim));
1177 	xpt_setup_ccb(&crs.ccb_h, path, 5);
1178 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1179 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1180 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1181 	crs.openings = queue_depth;
1182 	xpt_action((union ccb *)&crs);
1183 	if(crs.ccb_h.status != CAM_REQ_CMP) {
1184 		printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1185 	}
1186 
1187 	DBG_FUNC("OUT\n");
1188 }
1189 
1190 /*
1191  * Function to register async callback for setting queue depth
1192  */
1193 static void
1194 smartpqi_async(void *callback_arg, u_int32_t code,
1195 		struct cam_path *path, void *arg)
1196 {
1197 	struct pqisrc_softstate *softs;
1198 	softs = (struct pqisrc_softstate*)callback_arg;
1199 
1200 	DBG_FUNC("IN\n");
1201 
1202 	switch (code) {
1203 		case AC_FOUND_DEVICE:
1204 		{
1205 			struct ccb_getdev *cgd;
1206 			cgd = (struct ccb_getdev *)arg;
1207 			if (cgd == NULL) {
1208 				break;
1209 			}
1210 			uint32_t t_id = cgd->ccb_h.target_id;
1211 
1212 			/* if (t_id <= (PQI_CTLR_INDEX - 1)) { */
1213 			if (t_id >= PQI_CTLR_INDEX) {
1214 				if (softs != NULL) {
1215 					/* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */
1216 					int lun = cgd->ccb_h.target_lun;
1217 					int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun);
1218 					if (index != INVALID_ELEM) {
1219 						pqi_scsi_dev_t *dvp = softs->dev_list[index];
1220 						if (dvp == NULL) {
1221 							DBG_ERR("Target is null, target id=%u\n", t_id);
1222 							break;
1223 						}
1224 						smartpqi_adjust_queue_depth(path, dvp->queue_depth);
1225 					}
1226 				}
1227 			}
1228 			break;
1229 		}
1230 		default:
1231 			break;
1232 	}
1233 
1234 	DBG_FUNC("OUT\n");
1235 }
1236 
1237 /*
1238  * Function to register sim with CAM layer for smartpqi driver
1239  */
1240 int
1241 register_sim(struct pqisrc_softstate *softs, int card_index)
1242 {
1243 	int max_transactions;
1244 	union ccb   *ccb = NULL;
1245 	cam_status status = 0;
1246 	struct ccb_setasync csa;
1247 	struct cam_sim *sim;
1248 
1249 	DBG_FUNC("IN\n");
1250 
1251 	max_transactions = softs->max_io_for_scsi_ml;
1252 	softs->os_specific.devq = cam_simq_alloc(max_transactions);
1253 	if (softs->os_specific.devq == NULL) {
1254 		DBG_ERR("cam_simq_alloc failed txns = %d\n",
1255 			max_transactions);
1256 		return ENOMEM;
1257 	}
1258 
1259 	sim = cam_sim_alloc(smartpqi_cam_action, \
1260 				smartpqi_poll, "smartpqi", softs, \
1261 				card_index, &softs->os_specific.cam_lock, \
1262 				1, max_transactions, softs->os_specific.devq);
1263 	if (sim == NULL) {
1264 		DBG_ERR("cam_sim_alloc failed txns = %d\n",
1265 			max_transactions);
1266 		cam_simq_free(softs->os_specific.devq);
1267 		return ENOMEM;
1268 	}
1269 
1270 	softs->os_specific.sim = sim;
1271 	mtx_lock(&softs->os_specific.cam_lock);
1272 	status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1273 	if (status != CAM_SUCCESS) {
1274 		DBG_ERR("xpt_bus_register failed status=%d\n", status);
1275 		cam_sim_free(softs->os_specific.sim, FALSE);
1276 		cam_simq_free(softs->os_specific.devq);
1277 		mtx_unlock(&softs->os_specific.cam_lock);
1278 		return ENXIO;
1279 	}
1280 
1281 	softs->os_specific.sim_registered = TRUE;
1282 	ccb = xpt_alloc_ccb_nowait();
1283 	if (ccb == NULL) {
1284 		DBG_ERR("xpt_create_path failed\n");
1285 		return ENXIO;
1286 	}
1287 
1288 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
1289 			cam_sim_path(softs->os_specific.sim),
1290 			CAM_TARGET_WILDCARD,
1291 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1292 		DBG_ERR("xpt_create_path failed\n");
1293 		xpt_free_ccb(ccb);
1294 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1295 		cam_sim_free(softs->os_specific.sim, TRUE);
1296 		mtx_unlock(&softs->os_specific.cam_lock);
1297 		return ENXIO;
1298 	}
1299 	/*
1300 	 * Callback to set the queue depth per target which is
1301 	 * derived from the FW.
1302 	 */
1303 	softs->os_specific.path = ccb->ccb_h.path;
1304 	memset(&csa, 0, sizeof(struct ccb_setasync));
1305 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1306 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1307 	csa.event_enable = AC_FOUND_DEVICE;
1308 	csa.callback = smartpqi_async;
1309 	csa.callback_arg = softs;
1310 	xpt_action((union ccb *)&csa);
1311 	if (csa.ccb_h.status != CAM_REQ_CMP) {
1312 		DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1313 			csa.ccb_h.status);
1314 	}
1315 
1316 	mtx_unlock(&softs->os_specific.cam_lock);
1317 	DBG_FUNC("OUT\n");
1318 
1319 	return BSD_SUCCESS;
1320 }
1321 
1322 /*
1323  * Function to deregister smartpqi sim from cam layer
1324  */
1325 void
1326 deregister_sim(struct pqisrc_softstate *softs)
1327 {
1328 	struct ccb_setasync csa;
1329 
1330 	DBG_FUNC("IN\n");
1331 
1332 	if (softs->os_specific.mtx_init) {
1333 		mtx_lock(&softs->os_specific.cam_lock);
1334 	}
1335 
1336 	memset(&csa, 0, sizeof(struct ccb_setasync));
1337 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1338 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1339 	csa.event_enable = 0;
1340 	csa.callback = smartpqi_async;
1341 	csa.callback_arg = softs;
1342 	xpt_action((union ccb *)&csa);
1343 	xpt_free_path(softs->os_specific.path);
1344 
1345 	if (softs->os_specific.sim) {
1346 		xpt_release_simq(softs->os_specific.sim, 0);
1347 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1348 		softs->os_specific.sim_registered = FALSE;
1349 		cam_sim_free(softs->os_specific.sim, FALSE);
1350 		softs->os_specific.sim = NULL;
1351 	}
1352 
1353 	if (softs->os_specific.mtx_init) {
1354 		mtx_unlock(&softs->os_specific.cam_lock);
1355 	}
1356 	if (softs->os_specific.devq != NULL) {
1357 		cam_simq_free(softs->os_specific.devq);
1358 	}
1359 	if (softs->os_specific.mtx_init) {
1360 		mtx_destroy(&softs->os_specific.cam_lock);
1361 		softs->os_specific.mtx_init = FALSE;
1362 	}
1363 
1364 	mtx_destroy(&softs->os_specific.map_lock);
1365 
1366 	DBG_FUNC("OUT\n");
1367 }
1368 
1369 void
1370 os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1371 {
1372 	struct cam_path *tmppath = NULL;
1373 
1374 	DBG_FUNC("IN\n");
1375 
1376 	if(softs->os_specific.sim_registered) {
1377 		if (xpt_create_path(&tmppath, NULL,
1378 			cam_sim_path(softs->os_specific.sim),
1379 			device->target, device->lun) != CAM_REQ_CMP) {
1380 			DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
1381 				device->bus, device->target, device->lun);
1382 			return;
1383 		}
1384 		xpt_async(AC_INQ_CHANGED, tmppath, NULL);
1385 		xpt_free_path(tmppath);
1386 	}
1387 
1388 	device->scsi_rescan = false;
1389 
1390 	DBG_FUNC("OUT\n");
1391 }
1392