xref: /freebsd/sys/dev/smartpqi/smartpqi_cam.c (revision 190cef3d52236565eb22e18b33e9e865ec634aa3)
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 /*
29  * CAM interface for smartpqi driver
30  */
31 
32 #include "smartpqi_includes.h"
33 
34 /*
35  * Set cam sim properties of the smartpqi adapter.
36  */
37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
38 {
39 
40 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
41 					cam_sim_softc(sim);
42 	DBG_FUNC("IN\n");
43 
44 	cpi->version_num = 1;
45 	cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
46 	cpi->target_sprt = 0;
47 	cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
48 	cpi->hba_eng_cnt = 0;
49 	cpi->max_lun = PQI_MAX_MULTILUN;
50 	cpi->max_target = 1088;
51 	cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
52 	cpi->initiator_id = 255;
53 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
54 	strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
55 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
56 	cpi->unit_number = cam_sim_unit(sim);
57 	cpi->bus_id = cam_sim_bus(sim);
58 	cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
59 	cpi->protocol = PROTO_SCSI;
60 	cpi->protocol_version = SCSI_REV_SPC4;
61 	cpi->transport = XPORT_SPI;
62 	cpi->transport_version = 2;
63 	cpi->ccb_h.status = CAM_REQ_CMP;
64 
65 	DBG_FUNC("OUT\n");
66 }
67 
68 /*
69  * Get transport settings of the smartpqi adapter
70  */
71 static void get_transport_settings(struct pqisrc_softstate *softs,
72 		struct ccb_trans_settings *cts)
73 {
74 	struct ccb_trans_settings_scsi	*scsi = &cts->proto_specific.scsi;
75 	struct ccb_trans_settings_sas	*sas = &cts->xport_specific.sas;
76 	struct ccb_trans_settings_spi	*spi = &cts->xport_specific.spi;
77 
78 	DBG_FUNC("IN\n");
79 
80 	cts->protocol = PROTO_SCSI;
81 	cts->protocol_version = SCSI_REV_SPC4;
82 	cts->transport = XPORT_SPI;
83 	cts->transport_version = 2;
84 	spi->valid = CTS_SPI_VALID_DISC;
85 	spi->flags = CTS_SPI_FLAGS_DISC_ENB;
86 	scsi->valid = CTS_SCSI_VALID_TQ;
87 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
88 	sas->valid = CTS_SAS_VALID_SPEED;
89 	cts->ccb_h.status = CAM_REQ_CMP;
90 
91 	DBG_FUNC("OUT\n");
92 }
93 
94 /*
95  *  Add the target to CAM layer and rescan, when a new device is found
96  */
97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
98 	union ccb			*ccb;
99 
100 	DBG_FUNC("IN\n");
101 
102 	if(softs->os_specific.sim_registered) {
103 		if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
104 			DBG_ERR("rescan failed (can't allocate CCB)\n");
105 			return;
106 		}
107 
108 		if (xpt_create_path(&ccb->ccb_h.path, NULL,
109 			cam_sim_path(softs->os_specific.sim),
110 			device->target, device->lun) != CAM_REQ_CMP) {
111 			DBG_ERR("rescan failed (can't create path)\n");
112 			xpt_free_ccb(ccb);
113 			return;
114 		}
115 		xpt_rescan(ccb);
116 	}
117 
118 	DBG_FUNC("OUT\n");
119 }
120 
121 /*
122  * Remove the device from CAM layer when deleted or hot removed
123  */
124 void os_remove_device(pqisrc_softstate_t *softs,
125         pqi_scsi_dev_t *device) {
126 	struct cam_path *tmppath;
127 
128 	DBG_FUNC("IN\n");
129 
130 	if(softs->os_specific.sim_registered) {
131 		if (xpt_create_path(&tmppath, NULL,
132 			cam_sim_path(softs->os_specific.sim),
133 			device->target, device->lun) != CAM_REQ_CMP) {
134 			DBG_ERR("unable to create path for async event");
135 			return;
136 		}
137 		xpt_async(AC_LOST_DEVICE, tmppath, NULL);
138 		xpt_free_path(tmppath);
139 		pqisrc_free_device(softs, device);
140 	}
141 
142 	DBG_FUNC("OUT\n");
143 
144 }
145 
146 /*
147  * Function to release the frozen simq
148  */
149 static void pqi_release_camq( rcb_t *rcb )
150 {
151 	pqisrc_softstate_t *softs;
152 	struct ccb_scsiio *csio;
153 
154 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
155 	softs = rcb->softs;
156 
157 	DBG_FUNC("IN\n");
158 
159 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
160 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
161 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
162 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
163 		else
164 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
165 	}
166 
167 	DBG_FUNC("OUT\n");
168 }
169 
170 /*
171  * Function to dma-unmap the completed request
172  */
173 static void pqi_unmap_request(void *arg)
174 {
175 	pqisrc_softstate_t *softs;
176 	rcb_t *rcb;
177 
178 	DBG_IO("IN rcb = %p\n", arg);
179 
180 	rcb = (rcb_t *)arg;
181 	softs = rcb->softs;
182 
183 	if (!(rcb->cm_flags & PQI_CMD_MAPPED))
184 		return;
185 
186 	if (rcb->bcount != 0 ) {
187 		if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
188 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
189 					rcb->cm_datamap,
190 					BUS_DMASYNC_POSTREAD);
191 		if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
192 			bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
193 					rcb->cm_datamap,
194 					BUS_DMASYNC_POSTWRITE);
195 		bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
196 					rcb->cm_datamap);
197 	}
198 	rcb->cm_flags &= ~PQI_CMD_MAPPED;
199 
200 	if(rcb->sgt && rcb->nseg)
201 		os_mem_free(rcb->softs, (void*)rcb->sgt,
202 			rcb->nseg*sizeof(sgt_t));
203 
204 	pqisrc_put_tag(&softs->taglist, rcb->tag);
205 
206 	DBG_IO("OUT\n");
207 }
208 
209 /*
210  * Construct meaningful LD name for volume here.
211  */
212 static void
213 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
214 {
215 	struct scsi_inquiry_data *inq = NULL;
216 	uint8_t *cdb = NULL;
217 	pqi_scsi_dev_t *device = NULL;
218 
219 	DBG_FUNC("IN\n");
220 
221  	cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
222 		(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
223 	if(cdb[0] == INQUIRY &&
224 		(cdb[1] & SI_EVPD) == 0 &&
225 		(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
226 		csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
227 
228 		inq = (struct scsi_inquiry_data *)csio->data_ptr;
229 
230 		device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
231 
232 		/* Let the disks be probed and dealt with via CAM. Only for LD
233 		  let it fall through and inquiry be tweaked */
234 		if( !device || 	!pqisrc_is_logical_device(device) ||
235 				(device->devtype != DISK_DEVICE)  ||
236 				pqisrc_is_external_raid_device(device)) {
237  	 		return;
238 		}
239 
240 		strncpy(inq->vendor, "MSCC",
241        			SID_VENDOR_SIZE);
242 		strncpy(inq->product,
243 			pqisrc_raidlevel_to_string(device->raid_level),
244        			SID_PRODUCT_SIZE);
245 		strncpy(inq->revision, device->volume_offline?"OFF":"OK",
246        			SID_REVISION_SIZE);
247     	}
248 
249 	DBG_FUNC("OUT\n");
250 }
251 
252 /*
253  * Handle completion of a command - pass results back through the CCB
254  */
255 void
256 os_io_response_success(rcb_t *rcb)
257 {
258 	struct ccb_scsiio		*csio;
259 
260 	DBG_IO("IN rcb = %p\n", rcb);
261 
262 	if (rcb == NULL)
263 		panic("rcb is null");
264 
265 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
266 
267 	if (csio == NULL)
268 		panic("csio is null");
269 
270 	rcb->status = REQUEST_SUCCESS;
271 	csio->ccb_h.status = CAM_REQ_CMP;
272 
273 	smartpqi_fix_ld_inquiry(rcb->softs, csio);
274 	pqi_release_camq(rcb);
275 	pqi_unmap_request(rcb);
276 	xpt_done((union ccb *)csio);
277 
278 	DBG_IO("OUT\n");
279 }
280 
281 /*
282  * Error response handling for raid IO
283  */
284 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
285 {
286 	struct ccb_scsiio *csio;
287 	pqisrc_softstate_t *softs;
288 
289 	DBG_IO("IN\n");
290 
291 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
292 
293 	if (csio == NULL)
294 		panic("csio is null");
295 
296 	softs = rcb->softs;
297 
298 	ASSERT(err_info != NULL);
299 	csio->scsi_status = err_info->status;
300 	csio->ccb_h.status = CAM_REQ_CMP_ERR;
301 
302 	if (csio->ccb_h.func_code == XPT_SCSI_IO) {
303 		/*
304 		 * Handle specific SCSI status values.
305 		 */
306 		switch(csio->scsi_status) {
307 			case PQI_RAID_STATUS_QUEUE_FULL:
308 				csio->ccb_h.status = CAM_REQ_CMP;
309 				DBG_ERR("Queue Full error");
310 				break;
311 				/* check condition, sense data included */
312 			case PQI_RAID_STATUS_CHECK_CONDITION:
313 				{
314 				uint16_t sense_data_len =
315 					LE_16(err_info->sense_data_len);
316 				uint8_t *sense_data = NULL;
317 				if (sense_data_len)
318 					sense_data = err_info->data;
319 				memset(&csio->sense_data, 0, csio->sense_len);
320 				sense_data_len = (sense_data_len >
321 						csio->sense_len) ?
322 						csio->sense_len :
323 						sense_data_len;
324 				if (sense_data)
325 					memcpy(&csio->sense_data, sense_data,
326 						sense_data_len);
327 				if (csio->sense_len > sense_data_len)
328 					csio->sense_resid = csio->sense_len
329 							- sense_data_len;
330 					else
331 						csio->sense_resid = 0;
332 				csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
333 							| CAM_AUTOSNS_VALID
334 							| CAM_REQ_CMP_ERR;
335 
336 				}
337 				break;
338 
339 			case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
340 				{
341 				uint32_t resid = 0;
342 				resid = rcb->bcount-err_info->data_out_transferred;
343 			    	csio->resid  = resid;
344 				csio->ccb_h.status = CAM_REQ_CMP;
345 				break;
346 				}
347 			default:
348 				csio->ccb_h.status = CAM_REQ_CMP;
349 				break;
350 		}
351 	}
352 
353 	if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
354 		softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
355 		if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
356 			xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
357 		else
358 			csio->ccb_h.status |= CAM_RELEASE_SIMQ;
359 	}
360 
361 	pqi_unmap_request(rcb);
362 	xpt_done((union ccb *)csio);
363 
364 	DBG_IO("OUT\n");
365 }
366 
367 
368 /*
369  * Error response handling for aio.
370  */
371 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
372 {
373 	struct ccb_scsiio *csio;
374 	pqisrc_softstate_t *softs;
375 
376 	DBG_IO("IN\n");
377 
378         if (rcb == NULL)
379 		panic("rcb is null");
380 
381 	rcb->status = REQUEST_SUCCESS;
382 	csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
383 	if (csio == NULL)
384                 panic("csio is null");
385 
386 	softs = rcb->softs;
387 
388 	switch (err_info->service_resp) {
389 		case PQI_AIO_SERV_RESPONSE_COMPLETE:
390 			csio->ccb_h.status = err_info->status;
391 			break;
392 		case PQI_AIO_SERV_RESPONSE_FAILURE:
393 			switch(err_info->status) {
394 				case PQI_AIO_STATUS_IO_ABORTED:
395 					csio->ccb_h.status = CAM_REQ_ABORTED;
396 					DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
397 					break;
398 				case PQI_AIO_STATUS_UNDERRUN:
399 					csio->ccb_h.status = CAM_REQ_CMP;
400 					csio->resid =
401 						LE_32(err_info->resd_count);
402 					break;
403 				case PQI_AIO_STATUS_OVERRUN:
404 					csio->ccb_h.status = CAM_REQ_CMP;
405 					break;
406 				case PQI_AIO_STATUS_AIO_PATH_DISABLED:
407 					DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
408 					rcb->dvp->offload_enabled = false;
409 					csio->ccb_h.status |= CAM_REQUEUE_REQ;
410 					break;
411 				case PQI_AIO_STATUS_IO_ERROR:
412 				case PQI_AIO_STATUS_IO_NO_DEVICE:
413 				case PQI_AIO_STATUS_INVALID_DEVICE:
414 				default:
415 					DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
416 					csio->ccb_h.status |=
417 						CAM_SCSI_STATUS_ERROR;
418 					break;
419 			}
420 			break;
421 		case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
422 		case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
423 			csio->ccb_h.status = CAM_REQ_CMP;
424 			break;
425 		case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
426 		case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
427 			DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
428 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
429 			break;
430 		default:
431 			DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
432 			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
433 			break;
434 	}
435 	if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
436 		csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
437 		uint8_t *sense_data = NULL;
438 		unsigned sense_data_len = LE_16(err_info->data_len);
439 		if (sense_data_len)
440 			sense_data = err_info->data;
441 		DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND  sense size %u\n",
442 			sense_data_len);
443 		memset(&csio->sense_data, 0, csio->sense_len);
444 		if (sense_data)
445 			memcpy(&csio->sense_data, sense_data, ((sense_data_len >
446                         	csio->sense_len) ? csio->sense_len : sense_data_len));
447 		if (csio->sense_len > sense_data_len)
448 			csio->sense_resid = csio->sense_len - sense_data_len;
449         	else
450 			csio->sense_resid = 0;
451 		csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
452 	}
453 
454 	smartpqi_fix_ld_inquiry(softs, csio);
455 	pqi_release_camq(rcb);
456 	pqi_unmap_request(rcb);
457 	xpt_done((union ccb *)csio);
458 	DBG_IO("OUT\n");
459 }
460 
461 /*
462  * Command-mapping helper function - populate this command's s/g table.
463  */
464 static void
465 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
466 {
467 	pqisrc_softstate_t *softs;
468 	rcb_t *rcb;
469 
470 	rcb = (rcb_t *)arg;
471 	softs = rcb->softs;
472 
473 	if(  error || nseg > softs->pqi_cap.max_sg_elem )
474 	{
475 		xpt_freeze_simq(softs->os_specific.sim, 1);
476 		rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ|
477 						CAM_RELEASE_SIMQ);
478 		DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
479 			error, nseg, softs->pqi_cap.max_sg_elem);
480 		pqi_unmap_request(rcb);
481 		xpt_done((union ccb *)rcb->cm_ccb);
482 		return;
483 	}
484 
485 	rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
486 	rcb->nseg = nseg;
487 	if (rcb->sgt != NULL) {
488 		for (int i = 0; i < nseg; i++) {
489 			rcb->sgt[i].addr = segs[i].ds_addr;
490 			rcb->sgt[i].len = segs[i].ds_len;
491 			rcb->sgt[i].flags = 0;
492 		}
493 	}
494 
495 	if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
496 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
497 			rcb->cm_datamap, BUS_DMASYNC_PREREAD);
498 	if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
499 		bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
500 			rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
501 
502 	/* Call IO functions depending on pd or ld */
503 	rcb->status = REQUEST_PENDING;
504 
505 	error = pqisrc_build_send_io(softs, rcb);
506 
507 	if (error) {
508 		rcb->req_pending = false;
509 		xpt_freeze_simq(softs->os_specific.sim, 1);
510 		rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ
511 						|CAM_RELEASE_SIMQ);
512 		DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
513 	   	pqi_unmap_request(rcb);
514 		xpt_done((union ccb *)rcb->cm_ccb);
515 		return;
516 	}
517 }
518 
519 /*
520  * Function to dma-map the request buffer
521  */
522 static int pqi_map_request( rcb_t *rcb )
523 {
524 	pqisrc_softstate_t *softs = rcb->softs;
525 	int error = PQI_STATUS_SUCCESS;
526 	union ccb *ccb = rcb->cm_ccb;
527 
528 	DBG_FUNC("IN\n");
529 
530 	/* check that mapping is necessary */
531 	if (rcb->cm_flags & PQI_CMD_MAPPED)
532 		return(0);
533 	rcb->cm_flags |= PQI_CMD_MAPPED;
534 
535 	if (rcb->bcount) {
536 		error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
537 			rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
538 		if (error != 0){
539 			DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
540 					error, rcb->bcount);
541 			return error;
542 		}
543 	} else {
544 		/*
545 		 * Set up the command to go to the controller.  If there are no
546 		 * data buffers associated with the command then it can bypass
547 		 * busdma.
548 		 */
549 		/* Call IO functions depending on pd or ld */
550 		rcb->status = REQUEST_PENDING;
551 
552 		error = pqisrc_build_send_io(softs, rcb);
553 
554 	}
555 
556 	DBG_FUNC("OUT error = %d\n", error);
557 
558 	return error;
559 }
560 
561 /*
562  * Function to clear the request control block
563  */
564 void os_reset_rcb( rcb_t *rcb )
565 {
566 	rcb->error_info = NULL;
567 	rcb->req = NULL;
568 	rcb->status = -1;
569 	rcb->tag = INVALID_ELEM;
570 	rcb->dvp = NULL;
571 	rcb->cdbp = NULL;
572 	rcb->softs = NULL;
573 	rcb->cm_flags = 0;
574 	rcb->cm_data = NULL;
575 	rcb->bcount = 0;
576 	rcb->nseg = 0;
577 	rcb->sgt = NULL;
578 	rcb->cm_ccb = NULL;
579 	rcb->encrypt_enable = false;
580 	rcb->ioaccel_handle = 0;
581 	rcb->resp_qid = 0;
582 	rcb->req_pending = false;
583 }
584 
585 /*
586  * Callback function for the lun rescan
587  */
588 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
589 {
590         xpt_free_path(ccb->ccb_h.path);
591         xpt_free_ccb(ccb);
592 }
593 
594 
595 /*
596  * Function to rescan the lun
597  */
598 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
599 			int lun)
600 {
601 	union ccb   *ccb = NULL;
602 	cam_status  status = 0;
603 	struct cam_path     *path = NULL;
604 
605 	DBG_FUNC("IN\n");
606 
607 	ccb = xpt_alloc_ccb_nowait();
608 	status = xpt_create_path(&path, NULL,
609 				cam_sim_path(softs->os_specific.sim), target, lun);
610 	if (status != CAM_REQ_CMP) {
611 		DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
612 				 status);
613 		xpt_free_ccb(ccb);
614 		return;
615 	}
616 
617 	bzero(ccb, sizeof(union ccb));
618 	xpt_setup_ccb(&ccb->ccb_h, path, 5);
619 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
620 	ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
621 	ccb->crcn.flags = CAM_FLAG_NONE;
622 
623 	xpt_action(ccb);
624 
625 	DBG_FUNC("OUT\n");
626 }
627 
628 /*
629  * Function to rescan the lun under each target
630  */
631 void smartpqi_target_rescan(struct pqisrc_softstate *softs)
632 {
633 	int target = 0, lun = 0;
634 
635 	DBG_FUNC("IN\n");
636 
637 	for(target = 0; target < PQI_MAX_DEVICES; target++){
638 		for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
639 			if(softs->device_list[target][lun]){
640 				smartpqi_lun_rescan(softs, target, lun);
641 			}
642 		}
643 	}
644 
645 	DBG_FUNC("OUT\n");
646 }
647 
648 /*
649  * Set the mode of tagged command queueing for the current task.
650  */
651 uint8_t os_get_task_attr(rcb_t *rcb)
652 {
653 	union ccb *ccb = rcb->cm_ccb;
654 	uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
655 
656 	switch(ccb->csio.tag_action) {
657 	case MSG_HEAD_OF_Q_TAG:
658 		tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
659 		break;
660 	case MSG_ORDERED_Q_TAG:
661 		tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
662 		break;
663 	case MSG_SIMPLE_Q_TAG:
664 	default:
665 		tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
666 		break;
667 	}
668 	return tag_action;
669 }
670 
671 /*
672  * Complete all outstanding commands
673  */
674 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
675 {
676 	int tag = 0;
677 
678 	DBG_FUNC("IN\n");
679 
680 	for (tag = 1; tag < softs->max_outstanding_io; tag++) {
681 		rcb_t *prcb = &softs->rcb[tag];
682 		if(prcb->req_pending && prcb->cm_ccb ) {
683 			prcb->req_pending = false;
684 			prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
685 			xpt_done((union ccb *)prcb->cm_ccb);
686 			prcb->cm_ccb = NULL;
687 		}
688 	}
689 
690 	DBG_FUNC("OUT\n");
691 }
692 
693 /*
694  * IO handling functionality entry point
695  */
696 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
697 {
698 	rcb_t *rcb;
699 	uint32_t tag, no_transfer = 0;
700 	pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
701 					cam_sim_softc(sim);
702 	int32_t error = PQI_STATUS_FAILURE;
703 	pqi_scsi_dev_t *dvp;
704 
705 	DBG_FUNC("IN\n");
706 
707 	if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
708 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
709 		DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
710 		return PQI_STATUS_FAILURE;
711 	}
712 
713 	dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
714 	/* Check  controller state */
715 	if (IN_PQI_RESET(softs)) {
716 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET
717 					| CAM_BUSY | CAM_REQ_INPROG;
718 		DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
719 		return error;
720 	}
721 	/* Check device state */
722 	if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
723 		ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
724 		DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
725 		return error;
726 	}
727 	/* Check device reset */
728 	if (DEV_RESET(dvp)) {
729 		ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
730 		DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
731 		return error;
732 	}
733 
734 	if (dvp->expose_device == false) {
735 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
736 		DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
737 		return error;
738 	}
739 
740 	tag = pqisrc_get_tag(&softs->taglist);
741 	if( tag == INVALID_ELEM ) {
742 		DBG_ERR("Get Tag failed\n");
743 		xpt_freeze_simq(softs->os_specific.sim, 1);
744 		softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
745 		ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
746 		return PQI_STATUS_FAILURE;
747 	}
748 
749 	DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
750 
751 	rcb = &softs->rcb[tag];
752 	os_reset_rcb( rcb );
753 	rcb->tag = tag;
754 	rcb->softs = softs;
755 	rcb->cmdlen = ccb->csio.cdb_len;
756 	ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
757 
758 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
759 		case CAM_DIR_IN:
760 			rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
761 			break;
762 		case CAM_DIR_OUT:
763 			rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
764 			break;
765 		case CAM_DIR_NONE:
766 			no_transfer = 1;
767 			break;
768 		default:
769 			DBG_ERR("Unknown Dir\n");
770 			break;
771 	}
772 	rcb->cm_ccb = ccb;
773 	rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
774 
775 	if (!no_transfer) {
776 		rcb->cm_data = (void *)ccb->csio.data_ptr;
777 		rcb->bcount = ccb->csio.dxfer_len;
778 	} else {
779 		rcb->cm_data = NULL;
780 		rcb->bcount = 0;
781 	}
782 	/*
783 	 * Submit the request to the adapter.
784 	 *
785 	 * Note that this may fail if we're unable to map the request (and
786 	 * if we ever learn a transport layer other than simple, may fail
787 	 * if the adapter rejects the command).
788 	 */
789 	if ((error = pqi_map_request(rcb)) != 0) {
790 		rcb->req_pending = false;
791 		xpt_freeze_simq(softs->os_specific.sim, 1);
792 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
793 		if (error == EINPROGRESS) {
794 			DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
795 			error = 0;
796 		} else {
797 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
798 			DBG_WARN("Requeue req error = %d target = %d\n", error,
799 				ccb->ccb_h.target_id);
800 			pqi_unmap_request(rcb);
801 		}
802 	}
803 
804 	DBG_FUNC("OUT error = %d\n", error);
805 	return error;
806 }
807 
808 /*
809  * Abort a task, task management functionality
810  */
811 static int
812 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
813 {
814 	rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
815 	uint32_t abort_tag = rcb->tag;
816 	uint32_t tag = 0;
817 	int rval = PQI_STATUS_SUCCESS;
818 	uint16_t qid;
819 
820     DBG_FUNC("IN\n");
821 
822 	qid = (uint16_t)rcb->resp_qid;
823 
824 	tag = pqisrc_get_tag(&softs->taglist);
825 	rcb = &softs->rcb[tag];
826 	rcb->tag = tag;
827 	rcb->resp_qid = qid;
828 
829 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
830 		SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
831 
832 	if (PQI_STATUS_SUCCESS == rval) {
833 		rval = rcb->status;
834 		if (REQUEST_SUCCESS == rval) {
835 			ccb->ccb_h.status = CAM_REQ_ABORTED;
836 		}
837 	}
838 	pqisrc_put_tag(&softs->taglist, abort_tag);
839 	pqisrc_put_tag(&softs->taglist,rcb->tag);
840 
841 	DBG_FUNC("OUT rval = %d\n", rval);
842 
843 	return rval;
844 }
845 
846 /*
847  * Abort a taskset, task management functionality
848  */
849 static int
850 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
851 {
852 	rcb_t *rcb = NULL;
853 	uint32_t tag = 0;
854 	int rval = PQI_STATUS_SUCCESS;
855 
856 	DBG_FUNC("IN\n");
857 
858 	tag = pqisrc_get_tag(&softs->taglist);
859 	rcb = &softs->rcb[tag];
860 	rcb->tag = tag;
861 
862 	rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
863 			SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
864 
865 	if (rval == PQI_STATUS_SUCCESS) {
866 		rval = rcb->status;
867 	}
868 
869 	pqisrc_put_tag(&softs->taglist,rcb->tag);
870 
871 	DBG_FUNC("OUT rval = %d\n", rval);
872 
873 	return rval;
874 }
875 
876 /*
877  * Target reset task management functionality
878  */
879 static int
880 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
881 {
882 	pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
883 	rcb_t *rcb = NULL;
884 	uint32_t tag = 0;
885 	int rval = PQI_STATUS_SUCCESS;
886 
887 	DBG_FUNC("IN\n");
888 
889 	if (devp == NULL) {
890 		DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
891 		return (-1);
892 	}
893 
894 	tag = pqisrc_get_tag(&softs->taglist);
895 	rcb = &softs->rcb[tag];
896 	rcb->tag = tag;
897 
898 	devp->reset_in_progress = true;
899 	rval = pqisrc_send_tmf(softs, devp, rcb, 0,
900 		SOP_TASK_MANAGEMENT_LUN_RESET);
901 	if (PQI_STATUS_SUCCESS == rval) {
902 		rval = rcb->status;
903 	}
904 	devp->reset_in_progress = false;
905 	pqisrc_put_tag(&softs->taglist,rcb->tag);
906 
907 	DBG_FUNC("OUT rval = %d\n", rval);
908 
909 	return ((rval == REQUEST_SUCCESS) ?
910 		PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
911 }
912 
913 /*
914  * cam entry point of the smartpqi module.
915  */
916 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
917 {
918 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
919 	struct ccb_hdr  *ccb_h = &ccb->ccb_h;
920 
921 	DBG_FUNC("IN\n");
922 
923 	switch (ccb_h->func_code) {
924 		case XPT_SCSI_IO:
925 		{
926 			if(!pqisrc_io_start(sim, ccb)) {
927 				return;
928 			}
929 			break;
930 		}
931 		case XPT_CALC_GEOMETRY:
932 		{
933 			struct ccb_calc_geometry *ccg;
934 			ccg = &ccb->ccg;
935 			if (ccg->block_size == 0) {
936 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
937 				ccb->ccb_h.status = CAM_REQ_INVALID;
938 				break;
939 			}
940 			cam_calc_geometry(ccg, /* extended */ 1);
941 			ccb->ccb_h.status = CAM_REQ_CMP;
942 			break;
943 		}
944 		case XPT_PATH_INQ:
945 		{
946 			update_sim_properties(sim, &ccb->cpi);
947 			ccb->ccb_h.status = CAM_REQ_CMP;
948 			break;
949 		}
950 		case XPT_GET_TRAN_SETTINGS:
951 			get_transport_settings(softs, &ccb->cts);
952 			ccb->ccb_h.status = CAM_REQ_CMP;
953 			break;
954 		case XPT_ABORT:
955 			if(pqisrc_scsi_abort_task(softs,  ccb)) {
956 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
957 				xpt_done(ccb);
958 				DBG_ERR("Abort task failed on %d\n",
959 					ccb->ccb_h.target_id);
960 				return;
961 			}
962 			break;
963 		case XPT_TERM_IO:
964 			if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
965 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
966 				DBG_ERR("Abort task set failed on %d\n",
967 					ccb->ccb_h.target_id);
968 				xpt_done(ccb);
969 				return;
970 			}
971 			break;
972 		case XPT_RESET_DEV:
973 			if(pqisrc_target_reset(softs,  ccb)) {
974 				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
975 				DBG_ERR("Target reset failed on %d\n",
976 					ccb->ccb_h.target_id);
977 				xpt_done(ccb);
978 				return;
979 			} else {
980 				ccb->ccb_h.status = CAM_REQ_CMP;
981 			}
982 			break;
983 		case XPT_RESET_BUS:
984 			ccb->ccb_h.status = CAM_REQ_CMP;
985 			break;
986 		case XPT_SET_TRAN_SETTINGS:
987 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
988 			return;
989 		default:
990 			DBG_WARN("UNSUPPORTED FUNC CODE\n");
991 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
992 			break;
993 	}
994 	xpt_done(ccb);
995 
996 	DBG_FUNC("OUT\n");
997 }
998 
999 /*
1000  * Function to poll the response, when interrupts are unavailable
1001  * This also serves supporting crash dump.
1002  */
1003 static void smartpqi_poll(struct cam_sim *sim)
1004 {
1005 	struct pqisrc_softstate *softs = cam_sim_softc(sim);
1006 	int i;
1007 
1008 	for (i = 1; i < softs->intr_count; i++ )
1009 		pqisrc_process_response_queue(softs, i);
1010 }
1011 
1012 /*
1013  * Function to adjust the queue depth of a device
1014  */
1015 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1016 {
1017 	struct ccb_relsim crs;
1018 
1019 	DBG_INFO("IN\n");
1020 
1021 	xpt_setup_ccb(&crs.ccb_h, path, 5);
1022 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1023 	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1024 	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1025 	crs.openings = queue_depth;
1026 	xpt_action((union ccb *)&crs);
1027 	if(crs.ccb_h.status != CAM_REQ_CMP) {
1028 		printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1029 	}
1030 
1031 	DBG_INFO("OUT\n");
1032 }
1033 
1034 /*
1035  * Function to register async callback for setting queue depth
1036  */
1037 static void
1038 smartpqi_async(void *callback_arg, u_int32_t code,
1039 		struct cam_path *path, void *arg)
1040 {
1041 	struct pqisrc_softstate *softs;
1042 	softs = (struct pqisrc_softstate*)callback_arg;
1043 
1044 	DBG_FUNC("IN\n");
1045 
1046 	switch (code) {
1047 		case AC_FOUND_DEVICE:
1048 		{
1049 			struct ccb_getdev *cgd;
1050 			cgd = (struct ccb_getdev *)arg;
1051 			if (cgd == NULL) {
1052 				break;
1053 			}
1054 			uint32_t t_id = cgd->ccb_h.target_id;
1055 
1056 			if (t_id <= (PQI_CTLR_INDEX - 1)) {
1057 				if (softs != NULL) {
1058 					pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1059 					smartpqi_adjust_queue_depth(path,
1060 							dvp->queue_depth);
1061 				}
1062 			}
1063 			break;
1064 		}
1065 		default:
1066 			break;
1067 	}
1068 
1069 	DBG_FUNC("OUT\n");
1070 }
1071 
1072 /*
1073  * Function to register sim with CAM layer for smartpqi driver
1074  */
1075 int register_sim(struct pqisrc_softstate *softs, int card_index)
1076 {
1077 	int error = 0;
1078 	int max_transactions;
1079 	union ccb   *ccb = NULL;
1080 	cam_status status = 0;
1081 	struct ccb_setasync csa;
1082 	struct cam_sim *sim;
1083 
1084 	DBG_FUNC("IN\n");
1085 
1086 	max_transactions = softs->max_io_for_scsi_ml;
1087 	softs->os_specific.devq = cam_simq_alloc(max_transactions);
1088 	if (softs->os_specific.devq == NULL) {
1089 		DBG_ERR("cam_simq_alloc failed txns = %d\n",
1090 			max_transactions);
1091 		return PQI_STATUS_FAILURE;
1092 	}
1093 
1094 	sim = cam_sim_alloc(smartpqi_cam_action, \
1095 				smartpqi_poll, "smartpqi", softs, \
1096 				card_index, &softs->os_specific.cam_lock, \
1097 				1, max_transactions, softs->os_specific.devq);
1098 	if (sim == NULL) {
1099 		DBG_ERR("cam_sim_alloc failed txns = %d\n",
1100 			max_transactions);
1101 		cam_simq_free(softs->os_specific.devq);
1102 		return PQI_STATUS_FAILURE;
1103 	}
1104 
1105 	softs->os_specific.sim = sim;
1106 	mtx_lock(&softs->os_specific.cam_lock);
1107 	status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1108 	if (status != CAM_SUCCESS) {
1109 		DBG_ERR("xpt_bus_register failed status=%d\n", status);
1110 		cam_sim_free(softs->os_specific.sim, FALSE);
1111 		cam_simq_free(softs->os_specific.devq);
1112 		mtx_unlock(&softs->os_specific.cam_lock);
1113 		return PQI_STATUS_FAILURE;
1114 	}
1115 
1116 	softs->os_specific.sim_registered = TRUE;
1117 	ccb = xpt_alloc_ccb_nowait();
1118 	if (ccb == NULL) {
1119 		DBG_ERR("xpt_create_path failed\n");
1120 		return PQI_STATUS_FAILURE;
1121 	}
1122 
1123 	if (xpt_create_path(&ccb->ccb_h.path, NULL,
1124 			cam_sim_path(softs->os_specific.sim),
1125 			CAM_TARGET_WILDCARD,
1126 			CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1127 		DBG_ERR("xpt_create_path failed\n");
1128 		xpt_free_ccb(ccb);
1129 		xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1130 		cam_sim_free(softs->os_specific.sim, TRUE);
1131 		mtx_unlock(&softs->os_specific.cam_lock);
1132 		return PQI_STATUS_FAILURE;
1133 	}
1134 	/*
1135  	 * Callback to set the queue depth per target which is
1136 	 * derived from the FW.
1137  	 */
1138 	softs->os_specific.path = ccb->ccb_h.path;
1139 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1140 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1141 	csa.event_enable = AC_FOUND_DEVICE;
1142 	csa.callback = smartpqi_async;
1143 	csa.callback_arg = softs;
1144 	xpt_action((union ccb *)&csa);
1145 	if (csa.ccb_h.status != CAM_REQ_CMP) {
1146 		DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1147 			csa.ccb_h.status);
1148 	}
1149 
1150 	mtx_unlock(&softs->os_specific.cam_lock);
1151 	DBG_INFO("OUT\n");
1152 	return error;
1153 }
1154 
1155 /*
1156  * Function to deregister smartpqi sim from cam layer
1157  */
1158 void deregister_sim(struct pqisrc_softstate *softs)
1159 {
1160 	struct ccb_setasync csa;
1161 
1162 	DBG_FUNC("IN\n");
1163 
1164 	if (softs->os_specific.mtx_init) {
1165 		mtx_lock(&softs->os_specific.cam_lock);
1166 	}
1167 
1168 
1169 	xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1170 	csa.ccb_h.func_code = XPT_SASYNC_CB;
1171 	csa.event_enable = 0;
1172 	csa.callback = smartpqi_async;
1173 	csa.callback_arg = softs;
1174 	xpt_action((union ccb *)&csa);
1175 	xpt_free_path(softs->os_specific.path);
1176 
1177 	xpt_release_simq(softs->os_specific.sim, 0);
1178 
1179 	xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1180 	softs->os_specific.sim_registered = FALSE;
1181 
1182 	if (softs->os_specific.sim) {
1183 		cam_sim_free(softs->os_specific.sim, FALSE);
1184 		softs->os_specific.sim = NULL;
1185 	}
1186 	if (softs->os_specific.mtx_init) {
1187 		mtx_unlock(&softs->os_specific.cam_lock);
1188 	}
1189 	if (softs->os_specific.devq != NULL) {
1190 		cam_simq_free(softs->os_specific.devq);
1191 	}
1192 	if (softs->os_specific.mtx_init) {
1193 		mtx_destroy(&softs->os_specific.cam_lock);
1194 		softs->os_specific.mtx_init = FALSE;
1195 	}
1196 
1197 	mtx_destroy(&softs->os_specific.map_lock);
1198 
1199 	DBG_FUNC("OUT\n");
1200 }
1201