xref: /freebsd/sys/dev/smartpqi/smartpqi_cam.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * CAM interface for smartpqi driver
28  */
29 
30 #include "smartpqi_includes.h"
31 
32 /*
33  * Set cam sim properties of the smartpqi adapter.
34  */
35 static void
36 update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
37 {
38 
39 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
40 					cam_sim_softc(sim);
41 
42 	device_t dev = softs->os_specific.pqi_dev;
43 
44 	DBG_FUNC("IN\n");
45 
46 	cpi->version_num = 1;
47 	cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
48 	cpi->target_sprt = 0;
49 	cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
50 	cpi->hba_eng_cnt = 0;
51 	cpi->max_lun = PQI_MAX_MULTILUN;
52 	cpi->max_target = 1088;
53 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
54 	cpi->initiator_id = 255;
55 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
56 	strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
57 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
58 	cpi->unit_number = cam_sim_unit(sim);
59 	cpi->bus_id = cam_sim_bus(sim);
60 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
61 	cpi->protocol = PROTO_SCSI;
62 	cpi->protocol_version = SCSI_REV_SPC4;
63 	cpi->transport = XPORT_SPI;
64 	cpi->transport_version = 2;
65 	cpi->ccb_h.status = CAM_REQ_CMP;
66 	cpi->hba_vendor = pci_get_vendor(dev);
67 	cpi->hba_device = pci_get_device(dev);
68 	cpi->hba_subvendor = pci_get_subvendor(dev);
69 	cpi->hba_subdevice = pci_get_subdevice(dev);
70 
71 
72 	DBG_FUNC("OUT\n");
73 }
74 
75 /*
76  * Get transport settings of the smartpqi adapter
77  */
78 static void
79 get_transport_settings(struct pqisrc_softstate *softs,
80 		struct ccb_trans_settings *cts)
81 {
82 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
83 	struct ccb_trans_settings_sas	*sas = &cts->xport_specific.sas;
84 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
85 
86 	DBG_FUNC("IN\n");
87 
88 	cts->protocol = PROTO_SCSI;
89 	cts->protocol_version = SCSI_REV_SPC4;
90 	cts->transport = XPORT_SPI;
91 	cts->transport_version = 2;
92 	spi->valid = CTS_SPI_VALID_DISC;
93 	spi->flags = CTS_SPI_FLAGS_DISC_ENB;
94 	scsi->valid = CTS_SCSI_VALID_TQ;
95 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
96 	sas->valid = CTS_SAS_VALID_SPEED;
97 	cts->ccb_h.status = CAM_REQ_CMP;
98 
99 	DBG_FUNC("OUT\n");
100 }
101 
102 /*
103  *  Add the target to CAM layer and rescan, when a new device is found
104  */
105 void
106 os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
107 {
108 	union ccb *ccb;
109 
110 	DBG_FUNC("IN\n");
111 
112 	if(softs->os_specific.sim_registered) {
113 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
114 			DBG_ERR("rescan failed (can't allocate CCB)\n");
115 			return;
116 		}
117 
118 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
119 			cam_sim_path(softs->os_specific.sim),
120 			device->target, device->lun) != CAM_REQ_CMP) {
121 			DBG_ERR("rescan failed (can't create path)\n");
122 			xpt_free_ccb(ccb);
123 			return;
124 		}
125 		xpt_rescan(ccb);
126 	}
127 
128 	DBG_FUNC("OUT\n");
129 }
130 
131 /*
132  * Remove the device from CAM layer when deleted or hot removed
133  */
134 void
135 os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
136 {
137 	struct cam_path *tmppath;
138 
139 	DBG_FUNC("IN\n");
140 
141 	if(softs->os_specific.sim_registered) {
142 		if (xpt_create_path(&tmppath, NULL,
143 			cam_sim_path(softs->os_specific.sim),
144 			device->target, device->lun) != CAM_REQ_CMP) {
145 			DBG_ERR("unable to create path for async event");
146 			return;
147 		}
148 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
149 		xpt_free_path(tmppath);
150 		softs->device_list[device->target][device->lun] = NULL;
151 		pqisrc_free_device(softs, device);
152 	}
153 
154 	DBG_FUNC("OUT\n");
155 
156 }
157 
158 /*
159  * Function to release the frozen simq
160  */
161 static void
162 pqi_release_camq(rcb_t *rcb)
163 {
164 	pqisrc_softstate_t *softs;
165 	struct ccb_scsiio *csio;
166 
167 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
168 	softs = rcb->softs;
169 
170 	DBG_FUNC("IN\n");
171 
172 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
173 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
174 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
175 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
176 		else
177 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
178 	}
179 
180 	DBG_FUNC("OUT\n");
181 }
182 
183 static void
184 pqi_synch_request(rcb_t *rcb)
185 {
186 	pqisrc_softstate_t *softs = rcb->softs;
187 
188 	DBG_IO("IN rcb = %p\n", rcb);
189 
190 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
191 		return;
192 
193 	if (rcb->bcount != 0 ) {
194 		if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
195 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
196 					rcb->cm_datamap,
197 					BUS_DMASYNC_POSTREAD);
198 		if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
199 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
200 					rcb->cm_datamap,
201 					BUS_DMASYNC_POSTWRITE);
202 		bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
203 					rcb->cm_datamap);
204 	}
205 	rcb->cm_flags &= ~PQI_CMD_MAPPED;
206 
207 	if(rcb->sgt && rcb->nseg)
208 		os_mem_free(rcb->softs, (void*)rcb->sgt,
209 				rcb->nseg*sizeof(sgt_t));
210 
211 	DBG_IO("OUT\n");
212 }
213 
214 /*
215  * Function to dma-unmap the completed request
216  */
217 static inline void
218 pqi_unmap_request(rcb_t *rcb)
219 {
220 	DBG_IO("IN rcb = %p\n", rcb);
221 
222 	pqi_synch_request(rcb);
223 	pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
224 
225 	DBG_IO("OUT\n");
226 }
227 
228 /*
229  * Construct meaningful LD name for volume here.
230  */
231 static void
232 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
233 {
234 	struct scsi_inquiry_data *inq = NULL;
235 	uint8_t *cdb = NULL;
236 	pqi_scsi_dev_t *device = NULL;
237 
238 	DBG_FUNC("IN\n");
239 
240 	if (pqisrc_ctrl_offline(softs))
241 		return;
242 
243  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
244 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
245 	if(cdb[0] == INQUIRY &&
246 		(cdb[1] & SI_EVPD) == 0 &&
247 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
248 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
249 
250 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
251 
252 		device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
253 
254 		/* Let the disks be probed and dealt with via CAM. Only for LD
255 		  let it fall through and inquiry be tweaked */
256 		if (!device || !pqisrc_is_logical_device(device) ||
257 				(device->devtype != DISK_DEVICE) ||
258 				pqisrc_is_external_raid_device(device)) {
259  	 		return;
260 		}
261 
262 		strncpy(inq->vendor, device->vendor,
263 				SID_VENDOR_SIZE);
264 		strncpy(inq->product,
265 				pqisrc_raidlevel_to_string(device->raid_level),
266 				SID_PRODUCT_SIZE);
267 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
268 				SID_REVISION_SIZE);
269     	}
270 
271 	DBG_FUNC("OUT\n");
272 }
273 
274 static void
275 pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
276 {
277 	uint32_t release_tag;
278 	pqisrc_softstate_t *softs = rcb->softs;
279 
280 	DBG_IO("IN scsi io = %p\n", csio);
281 
282 	pqi_synch_request(rcb);
283 	smartpqi_fix_ld_inquiry(rcb->softs, csio);
284 	pqi_release_camq(rcb);
285 	release_tag = rcb->tag;
286 	os_reset_rcb(rcb);
287 	pqisrc_put_tag(&softs->taglist, release_tag);
288 	xpt_done((union ccb *)csio);
289 
290 	DBG_FUNC("OUT\n");
291 }
292 
293 /*
294  * Handle completion of a command - pass results back through the CCB
295  */
296 void
297 os_io_response_success(rcb_t *rcb)
298 {
299 	struct ccb_scsiio *csio;
300 
301 	DBG_IO("IN rcb = %p\n", rcb);
302 
303 	if (rcb == NULL)
304 		panic("rcb is null");
305 
306 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
307 
308 	if (csio == NULL)
309 		panic("csio is null");
310 
311 	rcb->status = REQUEST_SUCCESS;
312 	csio->ccb_h.status = CAM_REQ_CMP;
313 
314 	pqi_complete_scsi_io(csio, rcb);
315 
316 	DBG_IO("OUT\n");
317 }
318 
319 static void
320 copy_sense_data_to_csio(struct ccb_scsiio *csio,
321 		uint8_t *sense_data, uint16_t sense_data_len)
322 {
323 	DBG_IO("IN csio = %p\n", csio);
324 
325 	memset(&csio->sense_data, 0, csio->sense_len);
326 
327 	sense_data_len = (sense_data_len > csio->sense_len) ?
328 		csio->sense_len : sense_data_len;
329 
330 	if (sense_data)
331 		memcpy(&csio->sense_data, sense_data, sense_data_len);
332 
333 	if (csio->sense_len > sense_data_len)
334 		csio->sense_resid = csio->sense_len - sense_data_len;
335 	else
336 		csio->sense_resid = 0;
337 
338 	DBG_IO("OUT\n");
339 }
340 
341 /*
342  * Error response handling for raid IO
343  */
344 void
345 os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
346 {
347 	struct ccb_scsiio *csio;
348 	pqisrc_softstate_t *softs;
349 
350 	DBG_IO("IN\n");
351 
352 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
353 
354 	if (csio == NULL)
355 		panic("csio is null");
356 
357 	softs = rcb->softs;
358 
359 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
360 
361 	if (!err_info || !rcb->dvp) {
362 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
363 				err_info, rcb->dvp);
364 		goto error_out;
365 	}
366 
367 	csio->scsi_status = err_info->status;
368 
369 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
370 		/*
371 		 * Handle specific SCSI status values.
372 		 */
373 		switch(csio->scsi_status) {
374 			case PQI_RAID_STATUS_QUEUE_FULL:
375 				csio->ccb_h.status = CAM_REQ_CMP;
376 				DBG_ERR("Queue Full error\n");
377 				break;
378 				/* check condition, sense data included */
379 			case PQI_RAID_STATUS_CHECK_CONDITION:
380 				{
381 					uint16_t sense_data_len =
382 						LE_16(err_info->sense_data_len);
383 					uint8_t *sense_data = NULL;
384 					if (sense_data_len)
385 						sense_data = err_info->data;
386 					copy_sense_data_to_csio(csio, sense_data, sense_data_len);
387 					csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
388 							| CAM_AUTOSNS_VALID
389 							| CAM_REQ_CMP_ERR;
390 
391 				}
392 				break;
393 
394 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
395 				{
396 					uint32_t resid = 0;
397 					resid = rcb->bcount-err_info->data_out_transferred;
398 					csio->resid  = resid;
399 					csio->ccb_h.status = CAM_REQ_CMP;
400 				}
401 				break;
402 			default:
403 				csio->ccb_h.status = CAM_REQ_CMP;
404 				break;
405 		}
406 	}
407 
408 error_out:
409 	pqi_complete_scsi_io(csio, rcb);
410 
411 	DBG_IO("OUT\n");
412 }
413 
414 /*
415  * Error response handling for aio.
416  */
417 void
418 os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
419 {
420 	struct ccb_scsiio *csio;
421 	pqisrc_softstate_t *softs;
422 
423 	DBG_IO("IN\n");
424 
425 	if (rcb == NULL)
426 		panic("rcb is null");
427 
428 	rcb->status = REQUEST_SUCCESS;
429 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
430 	if (csio == NULL)
431                 panic("csio is null");
432 
433 	softs = rcb->softs;
434 
435 	if (!err_info || !rcb->dvp) {
436 		csio->ccb_h.status = CAM_REQ_CMP_ERR;
437 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
438 				err_info, rcb->dvp);
439 		goto error_out;
440 	}
441 
442 	switch (err_info->service_resp) {
443 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
444 			csio->ccb_h.status = err_info->status;
445 			break;
446 		case PQI_AIO_SERV_RESPONSE_FAILURE:
447 			switch(err_info->status) {
448 				case PQI_AIO_STATUS_IO_ABORTED:
449 					csio->ccb_h.status = CAM_REQ_ABORTED;
450 					DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
451 					break;
452 				case PQI_AIO_STATUS_UNDERRUN:
453 					csio->ccb_h.status = CAM_REQ_CMP;
454 					csio->resid =
455 						LE_32(err_info->resd_count);
456 					break;
457 				case PQI_AIO_STATUS_OVERRUN:
458 					csio->ccb_h.status = CAM_REQ_CMP;
459 					break;
460 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
461 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
462 					/* Timed out TMF response comes here */
463 					if (rcb->tm_req) {
464 						rcb->req_pending = false;
465 						rcb->status = REQUEST_SUCCESS;
466 						DBG_ERR("AIO Disabled for TMF\n");
467 						return;
468 					}
469 					rcb->dvp->aio_enabled = false;
470 					rcb->dvp->offload_enabled = false;
471 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
472 					break;
473 				case PQI_AIO_STATUS_IO_ERROR:
474 				case PQI_AIO_STATUS_IO_NO_DEVICE:
475 				case PQI_AIO_STATUS_INVALID_DEVICE:
476 				default:
477 					DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
478 					csio->ccb_h.status |=
479 						CAM_SCSI_STATUS_ERROR;
480 					break;
481 			}
482 			break;
483 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
484 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
485 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
486 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
487 			rcb->status = REQUEST_SUCCESS;
488 			rcb->req_pending = false;
489 			return;
490 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
491 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
492 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
493 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
494 			rcb->status = REQUEST_FAILED;
495 			rcb->req_pending = false;
496 			return;
497 		default:
498 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
499 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
500 			break;
501 	}
502 
503 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
504 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
505 		uint8_t *sense_data = NULL;
506 		unsigned sense_data_len = LE_16(err_info->data_len);
507 		if (sense_data_len)
508 			sense_data = err_info->data;
509 		DBG_INFO("SCSI_STATUS_CHECK_COND  sense size %u\n",
510 			sense_data_len);
511 		copy_sense_data_to_csio(csio, sense_data, sense_data_len);
512 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
513 	}
514 
515 error_out:
516 	pqi_complete_scsi_io(csio, rcb);
517 	DBG_IO("OUT\n");
518 }
519 
520 static void
521 pqi_freeze_ccb(union ccb *ccb)
522 {
523 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
524 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
525 		xpt_freeze_devq(ccb->ccb_h.path, 1);
526 	}
527 }
528 
529 /*
530  * Command-mapping helper function - populate this command's s/g table.
531  */
532 static void
533 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
534 {
535 	rcb_t *rcb = (rcb_t *)arg;
536 	pqisrc_softstate_t *softs = rcb->softs;
537 	union ccb *ccb;
538 
539 	if (error || nseg > softs->pqi_cap.max_sg_elem) {
540 		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
541 			error, nseg, softs->pqi_cap.max_sg_elem);
542 		goto error_io;
543 	}
544 
545 	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
546 
547 	if (!rcb->sgt) {
548 		DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
549 		goto error_io;
550 	}
551 
552 	rcb->nseg = nseg;
553 	for (int i = 0; i < nseg; i++) {
554 		rcb->sgt[i].addr = segs[i].ds_addr;
555 		rcb->sgt[i].len = segs[i].ds_len;
556 		rcb->sgt[i].flags = 0;
557 	}
558 
559 	if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
560 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
561 			rcb->cm_datamap, BUS_DMASYNC_PREREAD);
562 	if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
563 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
564 			rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
565 
566 	/* Call IO functions depending on pd or ld */
567 	rcb->status = REQUEST_PENDING;
568 
569 	error = pqisrc_build_send_io(softs, rcb);
570 
571 	if (error) {
572 		rcb->req_pending = false;
573 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
574 	} else {
575 		/* Successfully IO was submitted to the device. */
576 		return;
577 	}
578 
579 error_io:
580 	ccb = rcb->cm_ccb;
581 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
582 	pqi_freeze_ccb(ccb);
583 	pqi_unmap_request(rcb);
584 	xpt_done(ccb);
585 	return;
586 }
587 
588 /*
589  * Function to dma-map the request buffer
590  */
591 static int
592 pqi_map_request(rcb_t *rcb)
593 {
594 	pqisrc_softstate_t *softs = rcb->softs;
595 	int bsd_status = BSD_SUCCESS;
596 	union ccb *ccb = rcb->cm_ccb;
597 
598 	DBG_FUNC("IN\n");
599 
600 	/* check that mapping is necessary */
601 	if (rcb->cm_flags & PQI_CMD_MAPPED)
602 		return BSD_SUCCESS;
603 
604 	rcb->cm_flags |= PQI_CMD_MAPPED;
605 
606 	if (rcb->bcount) {
607 		bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
608 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
609 		if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
610 			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
611 					bsd_status, rcb->bcount);
612 			return bsd_status;
613 		}
614 	} else {
615 		/*
616 		 * Set up the command to go to the controller.  If there are no
617 		 * data buffers associated with the command then it can bypass
618 		 * busdma.
619 		 */
620 		/* Call IO functions depending on pd or ld */
621 		rcb->status = REQUEST_PENDING;
622 
623 		if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
624 			bsd_status = EIO;
625 		}
626 	}
627 
628 	DBG_FUNC("OUT error = %d\n", bsd_status);
629 
630 	return bsd_status;
631 }
632 
633 /*
634  * Function to clear the request control block
635  */
636 void
637 os_reset_rcb(rcb_t *rcb)
638 {
639 	rcb->error_info = NULL;
640 	rcb->req = NULL;
641 	rcb->status = -1;
642 	rcb->tag = INVALID_ELEM;
643 	rcb->dvp = NULL;
644 	rcb->cdbp = NULL;
645 	rcb->softs = NULL;
646 	rcb->cm_flags = 0;
647 	rcb->cm_data = NULL;
648 	rcb->bcount = 0;
649 	rcb->nseg = 0;
650 	rcb->sgt = NULL;
651 	rcb->cm_ccb = NULL;
652 	rcb->encrypt_enable = false;
653 	rcb->ioaccel_handle = 0;
654 	rcb->resp_qid = 0;
655 	rcb->req_pending = false;
656 	rcb->tm_req = false;
657 }
658 
659 /*
660  * Callback function for the lun rescan
661  */
662 static void
663 smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
664 {
665         xpt_free_path(ccb->ccb_h.path);
666         xpt_free_ccb(ccb);
667 }
668 
669 
670 /*
671  * Function to rescan the lun
672  */
673 static void
674 smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
675 			int lun)
676 {
677 	union ccb *ccb = NULL;
678 	cam_status status = 0;
679 	struct cam_path *path = NULL;
680 
681 	DBG_FUNC("IN\n");
682 
683 	ccb = xpt_alloc_ccb_nowait();
684 	if (ccb == NULL) {
685 		DBG_ERR("Unable to alloc ccb for lun rescan\n");
686 		return;
687 	}
688 
689 	status = xpt_create_path(&path, NULL,
690 				cam_sim_path(softs->os_specific.sim), target, lun);
691 	if (status != CAM_REQ_CMP) {
692 		DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
693 				 status);
694 		xpt_free_ccb(ccb);
695 		return;
696 	}
697 
698 	bzero(ccb, sizeof(union ccb));
699 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
700 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
701 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
702 	ccb->crcn.flags = CAM_FLAG_NONE;
703 
704 	xpt_action(ccb);
705 
706 	DBG_FUNC("OUT\n");
707 }
708 
709 /*
710  * Function to rescan the lun under each target
711  */
712 void
713 smartpqi_target_rescan(struct pqisrc_softstate *softs)
714 {
715 	int target = 0, lun = 0;
716 
717 	DBG_FUNC("IN\n");
718 
719 	for(target = 0; target < PQI_MAX_DEVICES; target++){
720 		for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
721 			if(softs->device_list[target][lun]){
722 				smartpqi_lun_rescan(softs, target, lun);
723 			}
724 		}
725 	}
726 
727 	DBG_FUNC("OUT\n");
728 }
729 
730 /*
731  * Set the mode of tagged command queueing for the current task.
732  */
733 uint8_t
734 os_get_task_attr(rcb_t *rcb)
735 {
736 	union ccb *ccb = rcb->cm_ccb;
737 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
738 
739 	switch(ccb->csio.tag_action) {
740 	case MSG_HEAD_OF_Q_TAG:
741 		tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
742 		break;
743 	case MSG_ORDERED_Q_TAG:
744 		tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
745 		break;
746 	case MSG_SIMPLE_Q_TAG:
747 	default:
748 		tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
749 		break;
750 	}
751 	return tag_action;
752 }
753 
754 /*
755  * Complete all outstanding commands
756  */
757 void
758 os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
759 {
760 	int tag = 0;
761 	pqi_scsi_dev_t  *dvp = NULL;
762 
763 	DBG_FUNC("IN\n");
764 
765 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
766 		rcb_t *prcb = &softs->rcb[tag];
767 		dvp = prcb->dvp;
768 		if(prcb->req_pending && prcb->cm_ccb ) {
769 			prcb->req_pending = false;
770 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
771 			pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
772 			if (dvp)
773 				pqisrc_decrement_device_active_io(softs, dvp);
774 
775 		}
776 	}
777 
778 	DBG_FUNC("OUT\n");
779 }
780 
781 /*
782  * IO handling functionality entry point
783  */
784 static int
785 pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
786 {
787 	rcb_t *rcb;
788 	uint32_t tag, no_transfer = 0;
789 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
790 					cam_sim_softc(sim);
791 	int32_t error;
792 	pqi_scsi_dev_t *dvp;
793 
794 	DBG_FUNC("IN\n");
795 
796 	if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
797 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
798 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
799 		return ENXIO;
800 	}
801 
802 	dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
803 	/* Check  controller state */
804 	if (IN_PQI_RESET(softs)) {
805 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
806 					| CAM_BUSY | CAM_REQ_INPROG;
807 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
808 		return ENXIO;
809 	}
810 	/* Check device state */
811 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
812 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
813 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
814 		return ENXIO;
815 	}
816 	/* Check device reset */
817 	if (DEVICE_RESET(dvp)) {
818 		ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
819 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
820 		return EBUSY;
821 	}
822 
823 	if (dvp->expose_device == false) {
824 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
825 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
826 		return ENXIO;
827 	}
828 
829 	tag = pqisrc_get_tag(&softs->taglist);
830 	if (tag == INVALID_ELEM) {
831 		DBG_ERR("Get Tag failed\n");
832 		xpt_freeze_simq(softs->os_specific.sim, 1);
833 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
834 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
835 		return EIO;
836 	}
837 
838 	DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
839 
840 	rcb = &softs->rcb[tag];
841 	os_reset_rcb(rcb);
842 	rcb->tag = tag;
843 	rcb->softs = softs;
844 	rcb->cmdlen = ccb->csio.cdb_len;
845 	ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
846 
847 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
848 		case CAM_DIR_IN:
849 			rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
850 			break;
851 		case CAM_DIR_OUT:
852 			rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
853 			break;
854 		case CAM_DIR_NONE:
855 			no_transfer = 1;
856 			break;
857 		default:
858 			DBG_ERR("Unknown Dir\n");
859 			break;
860 	}
861 	rcb->cm_ccb = ccb;
862 	rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
863 
864 	if (!no_transfer) {
865 		rcb->cm_data = (void *)ccb->csio.data_ptr;
866 		rcb->bcount = ccb->csio.dxfer_len;
867 	} else {
868 		rcb->cm_data = NULL;
869 		rcb->bcount = 0;
870 	}
871 	/*
872 	 * Submit the request to the adapter.
873 	 *
874 	 * Note that this may fail if we're unable to map the request (and
875 	 * if we ever learn a transport layer other than simple, may fail
876 	 * if the adapter rejects the command).
877 	 */
878 	if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
879 		xpt_freeze_simq(softs->os_specific.sim, 1);
880 		if (error == EINPROGRESS) {
881 			/* Release simq in the completion */
882 			softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
883 			error = BSD_SUCCESS;
884 		} else {
885 			rcb->req_pending = false;
886 			ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
887 			DBG_WARN("Requeue req error = %d target = %d\n", error,
888 				ccb->ccb_h.target_id);
889 			pqi_unmap_request(rcb);
890 			error = EIO;
891 		}
892 	}
893 
894 	DBG_FUNC("OUT error = %d\n", error);
895 
896 	return error;
897 }
898 
899 static inline int
900 pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
901 {
902 	if (PQI_STATUS_SUCCESS == pqi_status &&
903 			REQUEST_SUCCESS == rcb->status)
904 		return BSD_SUCCESS;
905 	else
906 		return EIO;
907 }
908 
909 /*
910  * Abort a task, task management functionality
911  */
912 static int
913 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
914 {
915 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
916 	rcb_t *rcb = NULL;
917 	rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
918 	uint32_t tag;
919 	int rval;
920 
921 	DBG_FUNC("IN\n");
922 
923 	tag = pqisrc_get_tag(&softs->taglist);
924 	rcb = &softs->rcb[tag];
925 	rcb->tag = tag;
926 
927 	if (!rcb->dvp) {
928 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
929 		rval = ENXIO;
930 		goto error_tmf;
931 	}
932 
933 	rcb->tm_req = true;
934 
935 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
936 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
937 
938 	if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
939 		ccb->ccb_h.status = CAM_REQ_ABORTED;
940 
941 error_tmf:
942 	os_reset_rcb(rcb);
943 	pqisrc_put_tag(&softs->taglist, tag);
944 
945 	DBG_FUNC("OUT rval = %d\n", rval);
946 
947 	return rval;
948 }
949 
950 /*
951  * Abort a taskset, task management functionality
952  */
953 static int
954 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
955 {
956 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
957 	rcb_t *rcb = NULL;
958 	uint32_t tag;
959 	int rval;
960 
961 	DBG_FUNC("IN\n");
962 
963 	tag = pqisrc_get_tag(&softs->taglist);
964 	rcb = &softs->rcb[tag];
965 	rcb->tag = tag;
966 
967 	if (!rcb->dvp) {
968 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
969 		rval = ENXIO;
970 		goto error_tmf;
971 	}
972 
973 	rcb->tm_req = true;
974 
975 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
976 			SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
977 
978 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
979 
980 error_tmf:
981 	os_reset_rcb(rcb);
982 	pqisrc_put_tag(&softs->taglist, tag);
983 
984 	DBG_FUNC("OUT rval = %d\n", rval);
985 
986 	return rval;
987 }
988 
989 /*
990  * Target reset task management functionality
991  */
992 static int
993 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
994 {
995 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
996 	pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
997 	rcb_t *rcb = NULL;
998 	uint32_t tag;
999 	int rval;
1000 
1001 	DBG_FUNC("IN\n");
1002 
1003 	if (devp == NULL) {
1004 		DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
1005 		return ENXIO;
1006 	}
1007 
1008 	tag = pqisrc_get_tag(&softs->taglist);
1009 	rcb = &softs->rcb[tag];
1010 	rcb->tag = tag;
1011 
1012 	devp->reset_in_progress = true;
1013 
1014 	rcb->tm_req = true;
1015 
1016 	rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
1017 		SOP_TASK_MANAGEMENT_LUN_RESET);
1018 
1019 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
1020 	devp->reset_in_progress = false;
1021 
1022 	os_reset_rcb(rcb);
1023 	pqisrc_put_tag(&softs->taglist, tag);
1024 
1025 	DBG_FUNC("OUT rval = %d\n", rval);
1026 
1027 	return rval;
1028 
1029 }
1030 
1031 /*
1032  * cam entry point of the smartpqi module.
1033  */
1034 static void
1035 smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
1036 {
1037 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1038 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
1039 
1040 	DBG_FUNC("IN\n");
1041 
1042 	switch (ccb_h->func_code) {
1043 		case XPT_SCSI_IO:
1044 		{
1045 			if(!pqisrc_io_start(sim, ccb)) {
1046 				return;
1047 			}
1048 			break;
1049 		}
1050 		case XPT_CALC_GEOMETRY:
1051 		{
1052 			struct ccb_calc_geometry *ccg;
1053 			ccg = &ccb->ccg;
1054 			if (ccg->block_size == 0) {
1055 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1056 				ccb->ccb_h.status |= CAM_REQ_INVALID;
1057 				break;
1058 			}
1059 			cam_calc_geometry(ccg, /* extended */ 1);
1060 			ccb->ccb_h.status = CAM_REQ_CMP;
1061 			break;
1062 		}
1063 		case XPT_PATH_INQ:
1064 		{
1065 			update_sim_properties(sim, &ccb->cpi);
1066 			ccb->ccb_h.status = CAM_REQ_CMP;
1067 			break;
1068 		}
1069 		case XPT_GET_TRAN_SETTINGS:
1070 			get_transport_settings(softs, &ccb->cts);
1071 			ccb->ccb_h.status = CAM_REQ_CMP;
1072 			break;
1073 		case XPT_ABORT:
1074 			if(pqisrc_scsi_abort_task(softs,  ccb)) {
1075 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1076 				xpt_done(ccb);
1077 				DBG_ERR("Abort task failed on %d\n",
1078 					ccb->ccb_h.target_id);
1079 				return;
1080 			}
1081 			break;
1082 		case XPT_TERM_IO:
1083 			if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
1084 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1085 				DBG_ERR("Abort task set failed on %d\n",
1086 					ccb->ccb_h.target_id);
1087 				xpt_done(ccb);
1088 				return;
1089 			}
1090 			break;
1091 		case XPT_RESET_DEV:
1092 			if(pqisrc_target_reset(softs,  ccb)) {
1093 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1094 				DBG_ERR("Target reset failed on %d\n",
1095 					ccb->ccb_h.target_id);
1096 				xpt_done(ccb);
1097 				return;
1098 			} else {
1099 				ccb->ccb_h.status = CAM_REQ_CMP;
1100 			}
1101 			break;
1102 		case XPT_RESET_BUS:
1103 			ccb->ccb_h.status = CAM_REQ_CMP;
1104 			break;
1105 		case XPT_SET_TRAN_SETTINGS:
1106 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1107 			return;
1108 		default:
1109 			DBG_WARN("UNSUPPORTED FUNC CODE\n");
1110 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1111 			break;
1112 	}
1113 	xpt_done(ccb);
1114 
1115 	DBG_FUNC("OUT\n");
1116 }
1117 
1118 /*
1119  * Function to poll the response, when interrupts are unavailable
1120  * This also serves supporting crash dump.
1121  */
1122 static void
1123 smartpqi_poll(struct cam_sim *sim)
1124 {
1125 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1126 	int i;
1127 
1128 	for (i = 1; i < softs->intr_count; i++ )
1129 		pqisrc_process_response_queue(softs, i);
1130 }
1131 
1132 /*
1133  * Function to adjust the queue depth of a device
1134  */
1135 void
1136 smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1137 {
1138 	struct ccb_relsim crs;
1139 
1140 	DBG_INFO("IN\n");
1141 
1142 	memset(&crs, 0, sizeof(crs));
1143 	xpt_setup_ccb(&crs.ccb_h, path, 5);
1144 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1145 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1146 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1147 	crs.openings = queue_depth;
1148 	xpt_action((union ccb *)&crs);
1149 	if(crs.ccb_h.status != CAM_REQ_CMP) {
1150 		printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1151 	}
1152 
1153 	DBG_INFO("OUT\n");
1154 }
1155 
1156 /*
1157  * Function to register async callback for setting queue depth
1158  */
1159 static void
1160 smartpqi_async(void *callback_arg, u_int32_t code,
1161 		struct cam_path *path, void *arg)
1162 {
1163 	struct pqisrc_softstate *softs;
1164 	softs = (struct pqisrc_softstate*)callback_arg;
1165 
1166 	DBG_FUNC("IN\n");
1167 
1168 	switch (code) {
1169 		case AC_FOUND_DEVICE:
1170 		{
1171 			struct ccb_getdev *cgd;
1172 			cgd = (struct ccb_getdev *)arg;
1173 			if (cgd == NULL) {
1174 				break;
1175 			}
1176 			uint32_t t_id = cgd->ccb_h.target_id;
1177 
1178 			if (t_id <= (PQI_CTLR_INDEX - 1)) {
1179 				if (softs != NULL) {
1180 					pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1181 					if (dvp == NULL) {
1182 						DBG_ERR("Target is null, target id=%d\n", t_id);
1183 						break;
1184 					}
1185 					smartpqi_adjust_queue_depth(path,
1186 							dvp->queue_depth);
1187 				}
1188 			}
1189 			break;
1190 		}
1191 		default:
1192 			break;
1193 	}
1194 
1195 	DBG_FUNC("OUT\n");
1196 }
1197 
1198 /*
1199  * Function to register sim with CAM layer for smartpqi driver
1200  */
1201 int
1202 register_sim(struct pqisrc_softstate *softs, int card_index)
1203 {
1204 	int max_transactions;
1205 	union ccb   *ccb = NULL;
1206 	int error;
1207 	struct ccb_setasync csa;
1208 	struct cam_sim *sim;
1209 
1210 	DBG_FUNC("IN\n");
1211 
1212 	max_transactions = softs->max_io_for_scsi_ml;
1213 	softs->os_specific.devq = cam_simq_alloc(max_transactions);
1214 	if (softs->os_specific.devq == NULL) {
1215 		DBG_ERR("cam_simq_alloc failed txns = %d\n",
1216 			max_transactions);
1217 		return ENOMEM;
1218 	}
1219 
1220 	sim = cam_sim_alloc(smartpqi_cam_action, \
1221 				smartpqi_poll, "smartpqi", softs, \
1222 				card_index, &softs->os_specific.cam_lock, \
1223 				1, max_transactions, softs->os_specific.devq);
1224 	if (sim == NULL) {
1225 		DBG_ERR("cam_sim_alloc failed txns = %d\n",
1226 			max_transactions);
1227 		cam_simq_free(softs->os_specific.devq);
1228 		return ENOMEM;
1229 	}
1230 
1231 	softs->os_specific.sim = sim;
1232 	mtx_lock(&softs->os_specific.cam_lock);
1233 	error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1234 	if (error != CAM_SUCCESS) {
1235 		DBG_ERR("xpt_bus_register failed errno %d\n", error);
1236 		cam_sim_free(softs->os_specific.sim, FALSE);
1237 		cam_simq_free(softs->os_specific.devq);
1238 		mtx_unlock(&softs->os_specific.cam_lock);
1239 		return ENXIO;
1240 	}
1241 
1242 	softs->os_specific.sim_registered = TRUE;
1243 	ccb = xpt_alloc_ccb_nowait();
1244 	if (ccb == NULL) {
1245 		DBG_ERR("xpt_create_path failed\n");
1246 		return ENXIO;
1247 	}
1248 
1249 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
1250 			cam_sim_path(softs->os_specific.sim),
1251 			CAM_TARGET_WILDCARD,
1252 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1253 		DBG_ERR("xpt_create_path failed\n");
1254 		xpt_free_ccb(ccb);
1255 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1256 		cam_sim_free(softs->os_specific.sim, TRUE);
1257 		mtx_unlock(&softs->os_specific.cam_lock);
1258 		return ENXIO;
1259 	}
1260 	/*
1261  	 * Callback to set the queue depth per target which is
1262 	 * derived from the FW.
1263  	 */
1264 	softs->os_specific.path = ccb->ccb_h.path;
1265 	memset(&csa, 0, sizeof(csa));
1266 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1267 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1268 	csa.event_enable = AC_FOUND_DEVICE;
1269 	csa.callback = smartpqi_async;
1270 	csa.callback_arg = softs;
1271 	xpt_action((union ccb *)&csa);
1272 	if (csa.ccb_h.status != CAM_REQ_CMP) {
1273 		DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1274 			csa.ccb_h.status);
1275 	}
1276 
1277 	mtx_unlock(&softs->os_specific.cam_lock);
1278 	DBG_INFO("OUT\n");
1279 
1280 	return BSD_SUCCESS;
1281 }
1282 
1283 /*
1284  * Function to deregister smartpqi sim from cam layer
1285  */
1286 void
1287 deregister_sim(struct pqisrc_softstate *softs)
1288 {
1289 	struct ccb_setasync csa;
1290 
1291 	DBG_FUNC("IN\n");
1292 
1293 	if (softs->os_specific.mtx_init) {
1294 		mtx_lock(&softs->os_specific.cam_lock);
1295 	}
1296 
1297 
1298 	memset(&csa, 0, sizeof(csa));
1299 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1300 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1301 	csa.event_enable = 0;
1302 	csa.callback = smartpqi_async;
1303 	csa.callback_arg = softs;
1304 	xpt_action((union ccb *)&csa);
1305 	xpt_free_path(softs->os_specific.path);
1306 
1307 	if (softs->os_specific.sim) {
1308 		xpt_release_simq(softs->os_specific.sim, 0);
1309 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1310 		softs->os_specific.sim_registered = FALSE;
1311 		cam_sim_free(softs->os_specific.sim, FALSE);
1312 		softs->os_specific.sim = NULL;
1313 	}
1314 
1315 	if (softs->os_specific.mtx_init) {
1316 		mtx_unlock(&softs->os_specific.cam_lock);
1317 	}
1318 	if (softs->os_specific.devq != NULL) {
1319 		cam_simq_free(softs->os_specific.devq);
1320 	}
1321 	if (softs->os_specific.mtx_init) {
1322 		mtx_destroy(&softs->os_specific.cam_lock);
1323 		softs->os_specific.mtx_init = FALSE;
1324 	}
1325 
1326 	mtx_destroy(&softs->os_specific.map_lock);
1327 
1328 	DBG_FUNC("OUT\n");
1329 }
1330 
1331 void
1332 os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1333 {
1334        struct cam_path *tmppath;
1335 
1336        DBG_FUNC("IN\n");
1337 
1338        if(softs->os_specific.sim_registered) {
1339                if (xpt_create_path(&tmppath, NULL,
1340                        cam_sim_path(softs->os_specific.sim),
1341                        device->target, device->lun) != CAM_REQ_CMP) {
1342                        DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
1343                                device->bus, device->target, device->lun);
1344                        return;
1345                }
1346                xpt_async(AC_INQ_CHANGED, tmppath, NULL);
1347                xpt_free_path(tmppath);
1348        }
1349 
1350        device->scsi_rescan = false;
1351 
1352        DBG_FUNC("OUT\n");
1353 }
1354