xref: /freebsd/sys/dev/smartpqi/smartpqi_cam.c (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /* $FreeBSD$ */
27 /*
28  * CAM interface for smartpqi driver
29  */
30 
31 #include "smartpqi_includes.h"
32 
33 /*
34  * Set cam sim properties of the smartpqi adapter.
35  */
36 static void
37 update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
38 {
39 
40 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
41 					cam_sim_softc(sim);
42 
43 	device_t dev = softs->os_specific.pqi_dev;
44 
45 	DBG_FUNC("IN\n");
46 
47 	cpi->version_num = 1;
48 	cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
49 	cpi->target_sprt = 0;
50 	cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
51 	cpi->hba_eng_cnt = 0;
52 	cpi->max_lun = PQI_MAX_MULTILUN;
53 	cpi->max_target = 1088;
54 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
55 	cpi->initiator_id = 255;
56 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
57 	strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
58 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
59 	cpi->unit_number = cam_sim_unit(sim);
60 	cpi->bus_id = cam_sim_bus(sim);
61 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
62 	cpi->protocol = PROTO_SCSI;
63 	cpi->protocol_version = SCSI_REV_SPC4;
64 	cpi->transport = XPORT_SPI;
65 	cpi->transport_version = 2;
66 	cpi->ccb_h.status = CAM_REQ_CMP;
67 	cpi->hba_vendor = pci_get_vendor(dev);
68 	cpi->hba_device = pci_get_device(dev);
69 	cpi->hba_subvendor = pci_get_subvendor(dev);
70 	cpi->hba_subdevice = pci_get_subdevice(dev);
71 
72 
73 	DBG_FUNC("OUT\n");
74 }
75 
76 /*
77  * Get transport settings of the smartpqi adapter
78  */
79 static void
80 get_transport_settings(struct pqisrc_softstate *softs,
81 		struct ccb_trans_settings *cts)
82 {
83 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
84 	struct ccb_trans_settings_sas	*sas = &cts->xport_specific.sas;
85 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
86 
87 	DBG_FUNC("IN\n");
88 
89 	cts->protocol = PROTO_SCSI;
90 	cts->protocol_version = SCSI_REV_SPC4;
91 	cts->transport = XPORT_SPI;
92 	cts->transport_version = 2;
93 	spi->valid = CTS_SPI_VALID_DISC;
94 	spi->flags = CTS_SPI_FLAGS_DISC_ENB;
95 	scsi->valid = CTS_SCSI_VALID_TQ;
96 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
97 	sas->valid = CTS_SAS_VALID_SPEED;
98 	cts->ccb_h.status = CAM_REQ_CMP;
99 
100 	DBG_FUNC("OUT\n");
101 }
102 
103 /*
104  *  Add the target to CAM layer and rescan, when a new device is found
105  */
106 void
107 os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
108 {
109 	union ccb *ccb;
110 
111 	DBG_FUNC("IN\n");
112 
113 	if(softs->os_specific.sim_registered) {
114 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
115 			DBG_ERR("rescan failed (can't allocate CCB)\n");
116 			return;
117 		}
118 
119 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
120 			cam_sim_path(softs->os_specific.sim),
121 			device->target, device->lun) != CAM_REQ_CMP) {
122 			DBG_ERR("rescan failed (can't create path)\n");
123 			xpt_free_ccb(ccb);
124 			return;
125 		}
126 		xpt_rescan(ccb);
127 	}
128 
129 	DBG_FUNC("OUT\n");
130 }
131 
132 /*
133  * Remove the device from CAM layer when deleted or hot removed
134  */
135 void
136 os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
137 {
138 	struct cam_path *tmppath;
139 
140 	DBG_FUNC("IN\n");
141 
142 	if(softs->os_specific.sim_registered) {
143 		if (xpt_create_path(&tmppath, NULL,
144 			cam_sim_path(softs->os_specific.sim),
145 			device->target, device->lun) != CAM_REQ_CMP) {
146 			DBG_ERR("unable to create path for async event");
147 			return;
148 		}
149 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
150 		xpt_free_path(tmppath);
151 		softs->device_list[device->target][device->lun] = NULL;
152 		pqisrc_free_device(softs, device);
153 	}
154 
155 	DBG_FUNC("OUT\n");
156 
157 }
158 
159 /*
160  * Function to release the frozen simq
161  */
162 static void
163 pqi_release_camq(rcb_t *rcb)
164 {
165 	pqisrc_softstate_t *softs;
166 	struct ccb_scsiio *csio;
167 
168 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
169 	softs = rcb->softs;
170 
171 	DBG_FUNC("IN\n");
172 
173 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
174 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
175 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
176 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
177 		else
178 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
179 	}
180 
181 	DBG_FUNC("OUT\n");
182 }
183 
184 static void
185 pqi_synch_request(rcb_t *rcb)
186 {
187 	pqisrc_softstate_t *softs = rcb->softs;
188 
189 	DBG_IO("IN rcb = %p\n", rcb);
190 
191 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
192 		return;
193 
194 	if (rcb->bcount != 0 ) {
195 		if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
196 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
197 					rcb->cm_datamap,
198 					BUS_DMASYNC_POSTREAD);
199 		if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
200 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
201 					rcb->cm_datamap,
202 					BUS_DMASYNC_POSTWRITE);
203 		bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
204 					rcb->cm_datamap);
205 	}
206 	rcb->cm_flags &= ~PQI_CMD_MAPPED;
207 
208 	if(rcb->sgt && rcb->nseg)
209 		os_mem_free(rcb->softs, (void*)rcb->sgt,
210 				rcb->nseg*sizeof(sgt_t));
211 
212 	DBG_IO("OUT\n");
213 }
214 
215 /*
216  * Function to dma-unmap the completed request
217  */
218 static inline void
219 pqi_unmap_request(rcb_t *rcb)
220 {
221 	DBG_IO("IN rcb = %p\n", rcb);
222 
223 	pqi_synch_request(rcb);
224 	pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
225 
226 	DBG_IO("OUT\n");
227 }
228 
229 /*
230  * Construct meaningful LD name for volume here.
231  */
232 static void
233 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
234 {
235 	struct scsi_inquiry_data *inq = NULL;
236 	uint8_t *cdb = NULL;
237 	pqi_scsi_dev_t *device = NULL;
238 
239 	DBG_FUNC("IN\n");
240 
241 	if (pqisrc_ctrl_offline(softs))
242 		return;
243 
244  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
245 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
246 	if(cdb[0] == INQUIRY &&
247 		(cdb[1] & SI_EVPD) == 0 &&
248 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
249 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
250 
251 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
252 
253 		device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
254 
255 		/* Let the disks be probed and dealt with via CAM. Only for LD
256 		  let it fall through and inquiry be tweaked */
257 		if (!device || !pqisrc_is_logical_device(device) ||
258 				(device->devtype != DISK_DEVICE) ||
259 				pqisrc_is_external_raid_device(device)) {
260  	 		return;
261 		}
262 
263 		strncpy(inq->vendor, device->vendor,
264 				SID_VENDOR_SIZE);
265 		strncpy(inq->product,
266 				pqisrc_raidlevel_to_string(device->raid_level),
267 				SID_PRODUCT_SIZE);
268 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
269 				SID_REVISION_SIZE);
270     	}
271 
272 	DBG_FUNC("OUT\n");
273 }
274 
275 static void
276 pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
277 {
278 	uint32_t release_tag;
279 	pqisrc_softstate_t *softs = rcb->softs;
280 
281 	DBG_IO("IN scsi io = %p\n", csio);
282 
283 	pqi_synch_request(rcb);
284 	smartpqi_fix_ld_inquiry(rcb->softs, csio);
285 	pqi_release_camq(rcb);
286 	release_tag = rcb->tag;
287 	os_reset_rcb(rcb);
288 	pqisrc_put_tag(&softs->taglist, release_tag);
289 	xpt_done((union ccb *)csio);
290 
291 	DBG_FUNC("OUT\n");
292 }
293 
294 /*
295  * Handle completion of a command - pass results back through the CCB
296  */
297 void
298 os_io_response_success(rcb_t *rcb)
299 {
300 	struct ccb_scsiio *csio;
301 
302 	DBG_IO("IN rcb = %p\n", rcb);
303 
304 	if (rcb == NULL)
305 		panic("rcb is null");
306 
307 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
308 
309 	if (csio == NULL)
310 		panic("csio is null");
311 
312 	rcb->status = REQUEST_SUCCESS;
313 	csio->ccb_h.status = CAM_REQ_CMP;
314 
315 	pqi_complete_scsi_io(csio, rcb);
316 
317 	DBG_IO("OUT\n");
318 }
319 
320 static void
321 copy_sense_data_to_csio(struct ccb_scsiio *csio,
322 		uint8_t *sense_data, uint16_t sense_data_len)
323 {
324 	DBG_IO("IN csio = %p\n", csio);
325 
326 	memset(&csio->sense_data, 0, csio->sense_len);
327 
328 	sense_data_len = (sense_data_len > csio->sense_len) ?
329 		csio->sense_len : sense_data_len;
330 
331 	if (sense_data)
332 		memcpy(&csio->sense_data, sense_data, sense_data_len);
333 
334 	if (csio->sense_len > sense_data_len)
335 		csio->sense_resid = csio->sense_len - sense_data_len;
336 	else
337 		csio->sense_resid = 0;
338 
339 	DBG_IO("OUT\n");
340 }
341 
342 /*
343  * Error response handling for raid IO
344  */
345 void
346 os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
347 {
348 	struct ccb_scsiio *csio;
349 	pqisrc_softstate_t *softs;
350 
351 	DBG_IO("IN\n");
352 
353 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
354 
355 	if (csio == NULL)
356 		panic("csio is null");
357 
358 	softs = rcb->softs;
359 
360 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
361 
362 	if (!err_info || !rcb->dvp) {
363 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
364 				err_info, rcb->dvp);
365 		goto error_out;
366 	}
367 
368 	csio->scsi_status = err_info->status;
369 
370 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
371 		/*
372 		 * Handle specific SCSI status values.
373 		 */
374 		switch(csio->scsi_status) {
375 			case PQI_RAID_STATUS_QUEUE_FULL:
376 				csio->ccb_h.status = CAM_REQ_CMP;
377 				DBG_ERR("Queue Full error\n");
378 				break;
379 				/* check condition, sense data included */
380 			case PQI_RAID_STATUS_CHECK_CONDITION:
381 				{
382 					uint16_t sense_data_len =
383 						LE_16(err_info->sense_data_len);
384 					uint8_t *sense_data = NULL;
385 					if (sense_data_len)
386 						sense_data = err_info->data;
387 					copy_sense_data_to_csio(csio, sense_data, sense_data_len);
388 					csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
389 							| CAM_AUTOSNS_VALID
390 							| CAM_REQ_CMP_ERR;
391 
392 				}
393 				break;
394 
395 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
396 				{
397 					uint32_t resid = 0;
398 					resid = rcb->bcount-err_info->data_out_transferred;
399 					csio->resid  = resid;
400 					csio->ccb_h.status = CAM_REQ_CMP;
401 				}
402 				break;
403 			default:
404 				csio->ccb_h.status = CAM_REQ_CMP;
405 				break;
406 		}
407 	}
408 
409 error_out:
410 	pqi_complete_scsi_io(csio, rcb);
411 
412 	DBG_IO("OUT\n");
413 }
414 
415 /*
416  * Error response handling for aio.
417  */
418 void
419 os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
420 {
421 	struct ccb_scsiio *csio;
422 	pqisrc_softstate_t *softs;
423 
424 	DBG_IO("IN\n");
425 
426 	if (rcb == NULL)
427 		panic("rcb is null");
428 
429 	rcb->status = REQUEST_SUCCESS;
430 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
431 	if (csio == NULL)
432                 panic("csio is null");
433 
434 	softs = rcb->softs;
435 
436 	if (!err_info || !rcb->dvp) {
437 		csio->ccb_h.status = CAM_REQ_CMP_ERR;
438 		DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
439 				err_info, rcb->dvp);
440 		goto error_out;
441 	}
442 
443 	switch (err_info->service_resp) {
444 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
445 			csio->ccb_h.status = err_info->status;
446 			break;
447 		case PQI_AIO_SERV_RESPONSE_FAILURE:
448 			switch(err_info->status) {
449 				case PQI_AIO_STATUS_IO_ABORTED:
450 					csio->ccb_h.status = CAM_REQ_ABORTED;
451 					DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
452 					break;
453 				case PQI_AIO_STATUS_UNDERRUN:
454 					csio->ccb_h.status = CAM_REQ_CMP;
455 					csio->resid =
456 						LE_32(err_info->resd_count);
457 					break;
458 				case PQI_AIO_STATUS_OVERRUN:
459 					csio->ccb_h.status = CAM_REQ_CMP;
460 					break;
461 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
462 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
463 					/* Timed out TMF response comes here */
464 					if (rcb->tm_req) {
465 						rcb->req_pending = false;
466 						rcb->status = REQUEST_SUCCESS;
467 						DBG_ERR("AIO Disabled for TMF\n");
468 						return;
469 					}
470 					rcb->dvp->aio_enabled = false;
471 					rcb->dvp->offload_enabled = false;
472 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
473 					break;
474 				case PQI_AIO_STATUS_IO_ERROR:
475 				case PQI_AIO_STATUS_IO_NO_DEVICE:
476 				case PQI_AIO_STATUS_INVALID_DEVICE:
477 				default:
478 					DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
479 					csio->ccb_h.status |=
480 						CAM_SCSI_STATUS_ERROR;
481 					break;
482 			}
483 			break;
484 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
485 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
486 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
487 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
488 			rcb->status = REQUEST_SUCCESS;
489 			rcb->req_pending = false;
490 			return;
491 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
492 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
493 			DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
494 				(err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
495 			rcb->status = REQUEST_FAILED;
496 			rcb->req_pending = false;
497 			return;
498 		default:
499 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
500 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
501 			break;
502 	}
503 
504 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
505 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
506 		uint8_t *sense_data = NULL;
507 		unsigned sense_data_len = LE_16(err_info->data_len);
508 		if (sense_data_len)
509 			sense_data = err_info->data;
510 		DBG_INFO("SCSI_STATUS_CHECK_COND  sense size %u\n",
511 			sense_data_len);
512 		copy_sense_data_to_csio(csio, sense_data, sense_data_len);
513 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
514 	}
515 
516 error_out:
517 	pqi_complete_scsi_io(csio, rcb);
518 	DBG_IO("OUT\n");
519 }
520 
521 static void
522 pqi_freeze_ccb(union ccb *ccb)
523 {
524 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
525 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
526 		xpt_freeze_devq(ccb->ccb_h.path, 1);
527 	}
528 }
529 
530 /*
531  * Command-mapping helper function - populate this command's s/g table.
532  */
533 static void
534 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
535 {
536 	rcb_t *rcb = (rcb_t *)arg;
537 	pqisrc_softstate_t *softs = rcb->softs;
538 	union ccb *ccb;
539 
540 	if (error || nseg > softs->pqi_cap.max_sg_elem) {
541 		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
542 			error, nseg, softs->pqi_cap.max_sg_elem);
543 		goto error_io;
544 	}
545 
546 	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
547 
548 	if (!rcb->sgt) {
549 		DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
550 		goto error_io;
551 	}
552 
553 	rcb->nseg = nseg;
554 	for (int i = 0; i < nseg; i++) {
555 		rcb->sgt[i].addr = segs[i].ds_addr;
556 		rcb->sgt[i].len = segs[i].ds_len;
557 		rcb->sgt[i].flags = 0;
558 	}
559 
560 	if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
561 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
562 			rcb->cm_datamap, BUS_DMASYNC_PREREAD);
563 	if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
564 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
565 			rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
566 
567 	/* Call IO functions depending on pd or ld */
568 	rcb->status = REQUEST_PENDING;
569 
570 	error = pqisrc_build_send_io(softs, rcb);
571 
572 	if (error) {
573 		rcb->req_pending = false;
574 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
575 	} else {
576 		/* Successfully IO was submitted to the device. */
577 		return;
578 	}
579 
580 error_io:
581 	ccb = rcb->cm_ccb;
582 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
583 	pqi_freeze_ccb(ccb);
584 	pqi_unmap_request(rcb);
585 	xpt_done(ccb);
586 	return;
587 }
588 
589 /*
590  * Function to dma-map the request buffer
591  */
592 static int
593 pqi_map_request(rcb_t *rcb)
594 {
595 	pqisrc_softstate_t *softs = rcb->softs;
596 	int bsd_status = BSD_SUCCESS;
597 	union ccb *ccb = rcb->cm_ccb;
598 
599 	DBG_FUNC("IN\n");
600 
601 	/* check that mapping is necessary */
602 	if (rcb->cm_flags & PQI_CMD_MAPPED)
603 		return BSD_SUCCESS;
604 
605 	rcb->cm_flags |= PQI_CMD_MAPPED;
606 
607 	if (rcb->bcount) {
608 		bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
609 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
610 		if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
611 			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
612 					bsd_status, rcb->bcount);
613 			return bsd_status;
614 		}
615 	} else {
616 		/*
617 		 * Set up the command to go to the controller.  If there are no
618 		 * data buffers associated with the command then it can bypass
619 		 * busdma.
620 		 */
621 		/* Call IO functions depending on pd or ld */
622 		rcb->status = REQUEST_PENDING;
623 
624 		if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
625 			bsd_status = EIO;
626 		}
627 	}
628 
629 	DBG_FUNC("OUT error = %d\n", bsd_status);
630 
631 	return bsd_status;
632 }
633 
634 /*
635  * Function to clear the request control block
636  */
637 void
638 os_reset_rcb(rcb_t *rcb)
639 {
640 	rcb->error_info = NULL;
641 	rcb->req = NULL;
642 	rcb->status = -1;
643 	rcb->tag = INVALID_ELEM;
644 	rcb->dvp = NULL;
645 	rcb->cdbp = NULL;
646 	rcb->softs = NULL;
647 	rcb->cm_flags = 0;
648 	rcb->cm_data = NULL;
649 	rcb->bcount = 0;
650 	rcb->nseg = 0;
651 	rcb->sgt = NULL;
652 	rcb->cm_ccb = NULL;
653 	rcb->encrypt_enable = false;
654 	rcb->ioaccel_handle = 0;
655 	rcb->resp_qid = 0;
656 	rcb->req_pending = false;
657 	rcb->tm_req = false;
658 }
659 
660 /*
661  * Callback function for the lun rescan
662  */
663 static void
664 smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
665 {
666         xpt_free_path(ccb->ccb_h.path);
667         xpt_free_ccb(ccb);
668 }
669 
670 
671 /*
672  * Function to rescan the lun
673  */
674 static void
675 smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
676 			int lun)
677 {
678 	union ccb *ccb = NULL;
679 	cam_status status = 0;
680 	struct cam_path *path = NULL;
681 
682 	DBG_FUNC("IN\n");
683 
684 	ccb = xpt_alloc_ccb_nowait();
685 	if (ccb == NULL) {
686 		DBG_ERR("Unable to alloc ccb for lun rescan\n");
687 		return;
688 	}
689 
690 	status = xpt_create_path(&path, NULL,
691 				cam_sim_path(softs->os_specific.sim), target, lun);
692 	if (status != CAM_REQ_CMP) {
693 		DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
694 				 status);
695 		xpt_free_ccb(ccb);
696 		return;
697 	}
698 
699 	bzero(ccb, sizeof(union ccb));
700 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
701 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
702 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
703 	ccb->crcn.flags = CAM_FLAG_NONE;
704 
705 	xpt_action(ccb);
706 
707 	DBG_FUNC("OUT\n");
708 }
709 
710 /*
711  * Function to rescan the lun under each target
712  */
713 void
714 smartpqi_target_rescan(struct pqisrc_softstate *softs)
715 {
716 	int target = 0, lun = 0;
717 
718 	DBG_FUNC("IN\n");
719 
720 	for(target = 0; target < PQI_MAX_DEVICES; target++){
721 		for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
722 			if(softs->device_list[target][lun]){
723 				smartpqi_lun_rescan(softs, target, lun);
724 			}
725 		}
726 	}
727 
728 	DBG_FUNC("OUT\n");
729 }
730 
731 /*
732  * Set the mode of tagged command queueing for the current task.
733  */
734 uint8_t
735 os_get_task_attr(rcb_t *rcb)
736 {
737 	union ccb *ccb = rcb->cm_ccb;
738 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
739 
740 	switch(ccb->csio.tag_action) {
741 	case MSG_HEAD_OF_Q_TAG:
742 		tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
743 		break;
744 	case MSG_ORDERED_Q_TAG:
745 		tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
746 		break;
747 	case MSG_SIMPLE_Q_TAG:
748 	default:
749 		tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
750 		break;
751 	}
752 	return tag_action;
753 }
754 
755 /*
756  * Complete all outstanding commands
757  */
758 void
759 os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
760 {
761 	int tag = 0;
762 	pqi_scsi_dev_t  *dvp = NULL;
763 
764 	DBG_FUNC("IN\n");
765 
766 	for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
767 		rcb_t *prcb = &softs->rcb[tag];
768 		dvp = prcb->dvp;
769 		if(prcb->req_pending && prcb->cm_ccb ) {
770 			prcb->req_pending = false;
771 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
772 			pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
773 			if (dvp)
774 				pqisrc_decrement_device_active_io(softs, dvp);
775 
776 		}
777 	}
778 
779 	DBG_FUNC("OUT\n");
780 }
781 
782 /*
783  * IO handling functionality entry point
784  */
785 static int
786 pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
787 {
788 	rcb_t *rcb;
789 	uint32_t tag, no_transfer = 0;
790 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
791 					cam_sim_softc(sim);
792 	int32_t error;
793 	pqi_scsi_dev_t *dvp;
794 
795 	DBG_FUNC("IN\n");
796 
797 	if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
798 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
799 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
800 		return ENXIO;
801 	}
802 
803 	dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
804 	/* Check  controller state */
805 	if (IN_PQI_RESET(softs)) {
806 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
807 					| CAM_BUSY | CAM_REQ_INPROG;
808 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
809 		return ENXIO;
810 	}
811 	/* Check device state */
812 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
813 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
814 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
815 		return ENXIO;
816 	}
817 	/* Check device reset */
818 	if (DEVICE_RESET(dvp)) {
819 		ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
820 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
821 		return EBUSY;
822 	}
823 
824 	if (dvp->expose_device == false) {
825 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
826 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
827 		return ENXIO;
828 	}
829 
830 	tag = pqisrc_get_tag(&softs->taglist);
831 	if (tag == INVALID_ELEM) {
832 		DBG_ERR("Get Tag failed\n");
833 		xpt_freeze_simq(softs->os_specific.sim, 1);
834 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
835 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
836 		return EIO;
837 	}
838 
839 	DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
840 
841 	rcb = &softs->rcb[tag];
842 	os_reset_rcb(rcb);
843 	rcb->tag = tag;
844 	rcb->softs = softs;
845 	rcb->cmdlen = ccb->csio.cdb_len;
846 	ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
847 
848 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
849 		case CAM_DIR_IN:
850 			rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
851 			break;
852 		case CAM_DIR_OUT:
853 			rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
854 			break;
855 		case CAM_DIR_NONE:
856 			no_transfer = 1;
857 			break;
858 		default:
859 			DBG_ERR("Unknown Dir\n");
860 			break;
861 	}
862 	rcb->cm_ccb = ccb;
863 	rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
864 
865 	if (!no_transfer) {
866 		rcb->cm_data = (void *)ccb->csio.data_ptr;
867 		rcb->bcount = ccb->csio.dxfer_len;
868 	} else {
869 		rcb->cm_data = NULL;
870 		rcb->bcount = 0;
871 	}
872 	/*
873 	 * Submit the request to the adapter.
874 	 *
875 	 * Note that this may fail if we're unable to map the request (and
876 	 * if we ever learn a transport layer other than simple, may fail
877 	 * if the adapter rejects the command).
878 	 */
879 	if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
880 		xpt_freeze_simq(softs->os_specific.sim, 1);
881 		if (error == EINPROGRESS) {
882 			/* Release simq in the completion */
883 			softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
884 			error = BSD_SUCCESS;
885 		} else {
886 			rcb->req_pending = false;
887 			ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
888 			DBG_WARN("Requeue req error = %d target = %d\n", error,
889 				ccb->ccb_h.target_id);
890 			pqi_unmap_request(rcb);
891 			error = EIO;
892 		}
893 	}
894 
895 	DBG_FUNC("OUT error = %d\n", error);
896 
897 	return error;
898 }
899 
900 static inline int
901 pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
902 {
903 	if (PQI_STATUS_SUCCESS == pqi_status &&
904 			REQUEST_SUCCESS == rcb->status)
905 		return BSD_SUCCESS;
906 	else
907 		return EIO;
908 }
909 
910 /*
911  * Abort a task, task management functionality
912  */
913 static int
914 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
915 {
916 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
917 	rcb_t *rcb = NULL;
918 	rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
919 	uint32_t tag;
920 	int rval;
921 
922 	DBG_FUNC("IN\n");
923 
924 	tag = pqisrc_get_tag(&softs->taglist);
925 	rcb = &softs->rcb[tag];
926 	rcb->tag = tag;
927 
928 	if (!rcb->dvp) {
929 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
930 		rval = ENXIO;
931 		goto error_tmf;
932 	}
933 
934 	rcb->tm_req = true;
935 
936 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
937 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
938 
939 	if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
940 		ccb->ccb_h.status = CAM_REQ_ABORTED;
941 
942 error_tmf:
943 	os_reset_rcb(rcb);
944 	pqisrc_put_tag(&softs->taglist, tag);
945 
946 	DBG_FUNC("OUT rval = %d\n", rval);
947 
948 	return rval;
949 }
950 
951 /*
952  * Abort a taskset, task management functionality
953  */
954 static int
955 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
956 {
957 	struct ccb_hdr *ccb_h = &ccb->ccb_h;
958 	rcb_t *rcb = NULL;
959 	uint32_t tag;
960 	int rval;
961 
962 	DBG_FUNC("IN\n");
963 
964 	tag = pqisrc_get_tag(&softs->taglist);
965 	rcb = &softs->rcb[tag];
966 	rcb->tag = tag;
967 
968 	if (!rcb->dvp) {
969 		DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
970 		rval = ENXIO;
971 		goto error_tmf;
972 	}
973 
974 	rcb->tm_req = true;
975 
976 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
977 			SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
978 
979 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
980 
981 error_tmf:
982 	os_reset_rcb(rcb);
983 	pqisrc_put_tag(&softs->taglist, tag);
984 
985 	DBG_FUNC("OUT rval = %d\n", rval);
986 
987 	return rval;
988 }
989 
990 /*
991  * Target reset task management functionality
992  */
993 static int
994 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
995 {
996 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
997 	pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
998 	rcb_t *rcb = NULL;
999 	uint32_t tag;
1000 	int rval;
1001 
1002 	DBG_FUNC("IN\n");
1003 
1004 	if (devp == NULL) {
1005 		DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
1006 		return ENXIO;
1007 	}
1008 
1009 	tag = pqisrc_get_tag(&softs->taglist);
1010 	rcb = &softs->rcb[tag];
1011 	rcb->tag = tag;
1012 
1013 	devp->reset_in_progress = true;
1014 
1015 	rcb->tm_req = true;
1016 
1017 	rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
1018 		SOP_TASK_MANAGEMENT_LUN_RESET);
1019 
1020 	rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
1021 	devp->reset_in_progress = false;
1022 
1023 	os_reset_rcb(rcb);
1024 	pqisrc_put_tag(&softs->taglist, tag);
1025 
1026 	DBG_FUNC("OUT rval = %d\n", rval);
1027 
1028 	return rval;
1029 
1030 }
1031 
1032 /*
1033  * cam entry point of the smartpqi module.
1034  */
1035 static void
1036 smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
1037 {
1038 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1039 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
1040 
1041 	DBG_FUNC("IN\n");
1042 
1043 	switch (ccb_h->func_code) {
1044 		case XPT_SCSI_IO:
1045 		{
1046 			if(!pqisrc_io_start(sim, ccb)) {
1047 				return;
1048 			}
1049 			break;
1050 		}
1051 		case XPT_CALC_GEOMETRY:
1052 		{
1053 			struct ccb_calc_geometry *ccg;
1054 			ccg = &ccb->ccg;
1055 			if (ccg->block_size == 0) {
1056 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1057 				ccb->ccb_h.status |= CAM_REQ_INVALID;
1058 				break;
1059 			}
1060 			cam_calc_geometry(ccg, /* extended */ 1);
1061 			ccb->ccb_h.status = CAM_REQ_CMP;
1062 			break;
1063 		}
1064 		case XPT_PATH_INQ:
1065 		{
1066 			update_sim_properties(sim, &ccb->cpi);
1067 			ccb->ccb_h.status = CAM_REQ_CMP;
1068 			break;
1069 		}
1070 		case XPT_GET_TRAN_SETTINGS:
1071 			get_transport_settings(softs, &ccb->cts);
1072 			ccb->ccb_h.status = CAM_REQ_CMP;
1073 			break;
1074 		case XPT_ABORT:
1075 			if(pqisrc_scsi_abort_task(softs,  ccb)) {
1076 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1077 				xpt_done(ccb);
1078 				DBG_ERR("Abort task failed on %d\n",
1079 					ccb->ccb_h.target_id);
1080 				return;
1081 			}
1082 			break;
1083 		case XPT_TERM_IO:
1084 			if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
1085 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1086 				DBG_ERR("Abort task set failed on %d\n",
1087 					ccb->ccb_h.target_id);
1088 				xpt_done(ccb);
1089 				return;
1090 			}
1091 			break;
1092 		case XPT_RESET_DEV:
1093 			if(pqisrc_target_reset(softs,  ccb)) {
1094 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1095 				DBG_ERR("Target reset failed on %d\n",
1096 					ccb->ccb_h.target_id);
1097 				xpt_done(ccb);
1098 				return;
1099 			} else {
1100 				ccb->ccb_h.status = CAM_REQ_CMP;
1101 			}
1102 			break;
1103 		case XPT_RESET_BUS:
1104 			ccb->ccb_h.status = CAM_REQ_CMP;
1105 			break;
1106 		case XPT_SET_TRAN_SETTINGS:
1107 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1108 			return;
1109 		default:
1110 			DBG_WARN("UNSUPPORTED FUNC CODE\n");
1111 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1112 			break;
1113 	}
1114 	xpt_done(ccb);
1115 
1116 	DBG_FUNC("OUT\n");
1117 }
1118 
1119 /*
1120  * Function to poll the response, when interrupts are unavailable
1121  * This also serves supporting crash dump.
1122  */
1123 static void
1124 smartpqi_poll(struct cam_sim *sim)
1125 {
1126 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1127 	int i;
1128 
1129 	for (i = 1; i < softs->intr_count; i++ )
1130 		pqisrc_process_response_queue(softs, i);
1131 }
1132 
1133 /*
1134  * Function to adjust the queue depth of a device
1135  */
1136 void
1137 smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1138 {
1139 	struct ccb_relsim crs;
1140 
1141 	DBG_INFO("IN\n");
1142 
1143 	memset(&crs, 0, sizeof(crs));
1144 	xpt_setup_ccb(&crs.ccb_h, path, 5);
1145 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1146 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1147 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1148 	crs.openings = queue_depth;
1149 	xpt_action((union ccb *)&crs);
1150 	if(crs.ccb_h.status != CAM_REQ_CMP) {
1151 		printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1152 	}
1153 
1154 	DBG_INFO("OUT\n");
1155 }
1156 
1157 /*
1158  * Function to register async callback for setting queue depth
1159  */
1160 static void
1161 smartpqi_async(void *callback_arg, u_int32_t code,
1162 		struct cam_path *path, void *arg)
1163 {
1164 	struct pqisrc_softstate *softs;
1165 	softs = (struct pqisrc_softstate*)callback_arg;
1166 
1167 	DBG_FUNC("IN\n");
1168 
1169 	switch (code) {
1170 		case AC_FOUND_DEVICE:
1171 		{
1172 			struct ccb_getdev *cgd;
1173 			cgd = (struct ccb_getdev *)arg;
1174 			if (cgd == NULL) {
1175 				break;
1176 			}
1177 			uint32_t t_id = cgd->ccb_h.target_id;
1178 
1179 			if (t_id <= (PQI_CTLR_INDEX - 1)) {
1180 				if (softs != NULL) {
1181 					pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1182 					if (dvp == NULL) {
1183 						DBG_ERR("Target is null, target id=%d\n", t_id);
1184 						break;
1185 					}
1186 					smartpqi_adjust_queue_depth(path,
1187 							dvp->queue_depth);
1188 				}
1189 			}
1190 			break;
1191 		}
1192 		default:
1193 			break;
1194 	}
1195 
1196 	DBG_FUNC("OUT\n");
1197 }
1198 
1199 /*
1200  * Function to register sim with CAM layer for smartpqi driver
1201  */
1202 int
1203 register_sim(struct pqisrc_softstate *softs, int card_index)
1204 {
1205 	int max_transactions;
1206 	union ccb   *ccb = NULL;
1207 	int error;
1208 	struct ccb_setasync csa;
1209 	struct cam_sim *sim;
1210 
1211 	DBG_FUNC("IN\n");
1212 
1213 	max_transactions = softs->max_io_for_scsi_ml;
1214 	softs->os_specific.devq = cam_simq_alloc(max_transactions);
1215 	if (softs->os_specific.devq == NULL) {
1216 		DBG_ERR("cam_simq_alloc failed txns = %d\n",
1217 			max_transactions);
1218 		return ENOMEM;
1219 	}
1220 
1221 	sim = cam_sim_alloc(smartpqi_cam_action, \
1222 				smartpqi_poll, "smartpqi", softs, \
1223 				card_index, &softs->os_specific.cam_lock, \
1224 				1, max_transactions, softs->os_specific.devq);
1225 	if (sim == NULL) {
1226 		DBG_ERR("cam_sim_alloc failed txns = %d\n",
1227 			max_transactions);
1228 		cam_simq_free(softs->os_specific.devq);
1229 		return ENOMEM;
1230 	}
1231 
1232 	softs->os_specific.sim = sim;
1233 	mtx_lock(&softs->os_specific.cam_lock);
1234 	error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1235 	if (error != CAM_SUCCESS) {
1236 		DBG_ERR("xpt_bus_register failed errno %d\n", error);
1237 		cam_sim_free(softs->os_specific.sim, FALSE);
1238 		cam_simq_free(softs->os_specific.devq);
1239 		mtx_unlock(&softs->os_specific.cam_lock);
1240 		return ENXIO;
1241 	}
1242 
1243 	softs->os_specific.sim_registered = TRUE;
1244 	ccb = xpt_alloc_ccb_nowait();
1245 	if (ccb == NULL) {
1246 		DBG_ERR("xpt_create_path failed\n");
1247 		return ENXIO;
1248 	}
1249 
1250 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
1251 			cam_sim_path(softs->os_specific.sim),
1252 			CAM_TARGET_WILDCARD,
1253 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1254 		DBG_ERR("xpt_create_path failed\n");
1255 		xpt_free_ccb(ccb);
1256 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1257 		cam_sim_free(softs->os_specific.sim, TRUE);
1258 		mtx_unlock(&softs->os_specific.cam_lock);
1259 		return ENXIO;
1260 	}
1261 	/*
1262  	 * Callback to set the queue depth per target which is
1263 	 * derived from the FW.
1264  	 */
1265 	softs->os_specific.path = ccb->ccb_h.path;
1266 	memset(&csa, 0, sizeof(csa));
1267 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1268 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1269 	csa.event_enable = AC_FOUND_DEVICE;
1270 	csa.callback = smartpqi_async;
1271 	csa.callback_arg = softs;
1272 	xpt_action((union ccb *)&csa);
1273 	if (csa.ccb_h.status != CAM_REQ_CMP) {
1274 		DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1275 			csa.ccb_h.status);
1276 	}
1277 
1278 	mtx_unlock(&softs->os_specific.cam_lock);
1279 	DBG_INFO("OUT\n");
1280 
1281 	return BSD_SUCCESS;
1282 }
1283 
1284 /*
1285  * Function to deregister smartpqi sim from cam layer
1286  */
1287 void
1288 deregister_sim(struct pqisrc_softstate *softs)
1289 {
1290 	struct ccb_setasync csa;
1291 
1292 	DBG_FUNC("IN\n");
1293 
1294 	if (softs->os_specific.mtx_init) {
1295 		mtx_lock(&softs->os_specific.cam_lock);
1296 	}
1297 
1298 
1299 	memset(&csa, 0, sizeof(csa));
1300 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1301 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1302 	csa.event_enable = 0;
1303 	csa.callback = smartpqi_async;
1304 	csa.callback_arg = softs;
1305 	xpt_action((union ccb *)&csa);
1306 	xpt_free_path(softs->os_specific.path);
1307 
1308 	if (softs->os_specific.sim) {
1309 		xpt_release_simq(softs->os_specific.sim, 0);
1310 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1311 		softs->os_specific.sim_registered = FALSE;
1312 		cam_sim_free(softs->os_specific.sim, FALSE);
1313 		softs->os_specific.sim = NULL;
1314 	}
1315 
1316 	if (softs->os_specific.mtx_init) {
1317 		mtx_unlock(&softs->os_specific.cam_lock);
1318 	}
1319 	if (softs->os_specific.devq != NULL) {
1320 		cam_simq_free(softs->os_specific.devq);
1321 	}
1322 	if (softs->os_specific.mtx_init) {
1323 		mtx_destroy(&softs->os_specific.cam_lock);
1324 		softs->os_specific.mtx_init = FALSE;
1325 	}
1326 
1327 	mtx_destroy(&softs->os_specific.map_lock);
1328 
1329 	DBG_FUNC("OUT\n");
1330 }
1331 
1332 void
1333 os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1334 {
1335        struct cam_path *tmppath;
1336 
1337        DBG_FUNC("IN\n");
1338 
1339        if(softs->os_specific.sim_registered) {
1340                if (xpt_create_path(&tmppath, NULL,
1341                        cam_sim_path(softs->os_specific.sim),
1342                        device->target, device->lun) != CAM_REQ_CMP) {
1343                        DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
1344                                device->bus, device->target, device->lun);
1345                        return;
1346                }
1347                xpt_async(AC_INQ_CHANGED, tmppath, NULL);
1348                xpt_free_path(tmppath);
1349        }
1350 
1351        device->scsi_rescan = false;
1352 
1353        DBG_FUNC("OUT\n");
1354 }
1355