xref: /freebsd/sys/dev/smartpqi/smartpqi_request.c (revision 271171e0d97b88ba2a7c3bf750c9672b484c1c13)
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /* $FreeBSD$ */
27 
28 #include "smartpqi_includes.h"
29 
30 /*
31  * Attempt to perform offload RAID mapping for a logical volume I/O.
32  */
33 
34 #define HPSA_RAID_0		0
35 #define HPSA_RAID_4		1
36 #define HPSA_RAID_1		2	/* also used for RAID 10 */
37 #define HPSA_RAID_5		3	/* also used for RAID 50 */
38 #define HPSA_RAID_51		4
39 #define HPSA_RAID_6		5	/* also used for RAID 60 */
40 #define HPSA_RAID_ADM		6	/* also used for RAID 1+0 ADM */
41 #define HPSA_RAID_MAX		HPSA_RAID_ADM
42 #define HPSA_RAID_UNKNOWN	0xff
43 
44 #define SG_FLAG_LAST	0x40000000
45 #define SG_FLAG_CHAIN	0x80000000
46 
47 /* Subroutine to find out embedded sgl count in IU */
48 static inline uint32_t
49 pqisrc_embedded_sgl_count(uint32_t elem_alloted)
50 {
51 	uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
52 	DBG_FUNC(" IN ");
53 	/**
54 	calculate embedded sgl count using num_elem_alloted for IO
55 	**/
56 	if(elem_alloted - 1)
57 		embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
58 	DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
59 
60 	DBG_FUNC(" OUT ");
61 
62 	return embedded_sgl_count;
63 
64 }
65 
66 /* Subroutine to find out contiguous free elem in IU */
67 static inline uint32_t
68 pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
69 {
70 	uint32_t contiguous_free_elem = 0;
71 
72 	DBG_FUNC(" IN ");
73 
74 	if(pi >= ci) {
75 		contiguous_free_elem = (elem_in_q - pi);
76 		if(ci == 0)
77 			contiguous_free_elem -= 1;
78 	} else {
79 		contiguous_free_elem = (ci - pi - 1);
80 	}
81 
82 	DBG_FUNC(" OUT ");
83 
84 	return contiguous_free_elem;
85 }
86 
87 /* Subroutine to find out num of elements need for the request */
88 static uint32_t
89 pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
90 {
91 	uint32_t num_sg;
92 	uint32_t num_elem_required = 1;
93 	DBG_FUNC(" IN ");
94 	DBG_IO("SGL_Count :%d",SG_Count);
95 	/********
96 	If SG_Count greater than max sg per IU i.e 4 or 68
97 	(4 is with out spanning or 68 is with spanning) chaining is required.
98 	OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
99 	on these two cases one element is enough.
100 	********/
101 	if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
102 		return num_elem_required;
103 	/*
104 	SGL Count Other Than First IU
105 	 */
106 	num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
107 	num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
108 	DBG_FUNC(" OUT ");
109 	return num_elem_required;
110 }
111 
112 /* Subroutine to build SG list for the IU submission*/
113 static boolean_t
114 pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
115 			uint32_t num_elem_alloted)
116 {
117 	uint32_t i;
118 	uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
119 	sgt_t *sgt = sg_array;
120 	sgt_t *sg_chain = NULL;
121 	boolean_t partial = false;
122 
123 	DBG_FUNC(" IN ");
124 
125 	DBG_IO("SGL_Count :%d",num_sg);
126 	if (0 == num_sg) {
127 		goto out;
128 	}
129 
130 	if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
131 		for (i = 0; i < num_sg; i++, sgt++) {
132                         sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
133                         sgt->len= OS_GET_IO_SG_LEN(rcb,i);
134                         sgt->flags= 0;
135                 }
136 
137 		sg_array[num_sg - 1].flags = SG_FLAG_LAST;
138 	} else {
139 	/**
140 	SGL Chaining
141 	**/
142 		sg_chain = rcb->sg_chain_virt;
143 		sgt->addr = rcb->sg_chain_dma;
144 		sgt->len = num_sg * sizeof(sgt_t);
145 		sgt->flags = SG_FLAG_CHAIN;
146 
147 		sgt = sg_chain;
148 		for (i = 0; i < num_sg; i++, sgt++) {
149 			sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
150 			sgt->len = OS_GET_IO_SG_LEN(rcb,i);
151 			sgt->flags = 0;
152 		}
153 
154 		sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
155 		num_sg = 1;
156 		partial = true;
157 
158 	}
159 out:
160 	iu_hdr->iu_length = num_sg * sizeof(sgt_t);
161 	DBG_FUNC(" OUT ");
162 	return partial;
163 
164 }
165 
166 /*Subroutine used to Build the RAID request */
167 static void
168 pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
169  	pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
170 {
171 	DBG_FUNC(" IN ");
172 
173 	raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
174 	raid_req->header.comp_feature = 0;
175 	raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
176 	raid_req->work_area[0] = 0;
177 	raid_req->work_area[1] = 0;
178 	raid_req->request_id = rcb->tag;
179 	raid_req->nexus_id = 0;
180 	raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
181 	memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
182 		sizeof(raid_req->lun_number));
183 	raid_req->protocol_spec = 0;
184 	raid_req->data_direction = rcb->data_dir;
185 	raid_req->reserved1 = 0;
186 	raid_req->fence = 0;
187 	raid_req->error_index = raid_req->request_id;
188 	raid_req->reserved2 = 0;
189   	raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
190   	raid_req->command_priority = 0;
191 	raid_req->reserved3 = 0;
192 	raid_req->reserved4 = 0;
193 	raid_req->reserved5 = 0;
194 
195 	/* As cdb and additional_cdb_bytes are contiguous,
196 	   update them in a single statement */
197 	memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
198 #if 0
199 	DBG_IO("CDB :");
200 	for(i = 0; i < rcb->cmdlen ; i++)
201 		DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
202 #endif
203 
204 	switch (rcb->cmdlen) {
205 		case 6:
206 		case 10:
207 		case 12:
208 		case 16:
209 			raid_req->additional_cdb_bytes_usage =
210 				PQI_ADDITIONAL_CDB_BYTES_0;
211 			break;
212 		case 20:
213 			raid_req->additional_cdb_bytes_usage =
214 				PQI_ADDITIONAL_CDB_BYTES_4;
215 			break;
216 		case 24:
217 			raid_req->additional_cdb_bytes_usage =
218 				PQI_ADDITIONAL_CDB_BYTES_8;
219 			break;
220 		case 28:
221 			raid_req->additional_cdb_bytes_usage =
222 				PQI_ADDITIONAL_CDB_BYTES_12;
223 			break;
224 		case 32:
225 		default: /* todo:review again */
226 			raid_req->additional_cdb_bytes_usage =
227 				PQI_ADDITIONAL_CDB_BYTES_16;
228 			break;
229 	}
230 
231 	/* Frame SGL Descriptor */
232 	raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
233 		&raid_req->header, num_elem_alloted);
234 
235 	raid_req->header.iu_length +=
236 			offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
237 
238 #if 0
239 	DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
240 	DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
241 	DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
242 	DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
243 	DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
244 	DBG_IO("raid_req->lun_number  : 0x%x", raid_req->lun_number);
245 	DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
246 	DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
247 	DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
248 	DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
249 #endif
250 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
251 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
252 	rcb->resp_qid = raid_req->response_queue_id;
253 
254  	DBG_FUNC(" OUT ");
255 
256 }
257 
258 /*Subroutine used to Build the AIO request */
259 static void
260 pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
261  				pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
262 {
263 	DBG_FUNC(" IN ");
264 
265 	aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
266 	aio_req->header.comp_feature = 0;
267 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
268 	aio_req->work_area[0] = 0;
269 	aio_req->work_area[1] = 0;
270 	aio_req->req_id = rcb->tag;
271 	aio_req->res1[0] = 0;
272 	aio_req->res1[1] = 0;
273 	aio_req->nexus = rcb->ioaccel_handle;
274 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
275 	aio_req->data_dir = rcb->data_dir;
276 	aio_req->mem_type = 0;
277 	aio_req->fence = 0;
278 	aio_req->res2 = 0;
279 	aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
280 	aio_req->cmd_prio = 0;
281 	aio_req->res3 = 0;
282 	aio_req->err_idx = aio_req->req_id;
283 	aio_req->cdb_len = rcb->cmdlen;
284 
285 	if(rcb->cmdlen > sizeof(aio_req->cdb))
286 		rcb->cmdlen = sizeof(aio_req->cdb);
287 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
288 #if 0
289 	DBG_IO("CDB : \n");
290 	for(int i = 0; i < rcb->cmdlen ; i++)
291 		 DBG_IO(" 0x%x \n",aio_req->cdb[i]);
292 #endif
293 	memset(aio_req->lun,0,sizeof(aio_req->lun));
294 	memset(aio_req->res4,0,sizeof(aio_req->res4));
295 
296 	if(rcb->encrypt_enable == true) {
297 		aio_req->encrypt_enable = true;
298 		aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
299 		aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
300 		aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
301 	} else {
302 		aio_req->encrypt_enable = 0;
303 		aio_req->encrypt_key_index = 0;
304 		aio_req->encrypt_twk_high = 0;
305 		aio_req->encrypt_twk_low = 0;
306 	}
307 
308 	/* Frame SGL Descriptor */
309 	aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
310  		&aio_req->header, num_elem_alloted);
311 
312 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
313 
314 	DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
315 
316 	aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
317 		sizeof(iu_header_t);
318 #if 0
319 	DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
320 	DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
321 	DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
322 	DBG_IO("aio_req->nexus : 0x%x  \n",aio_req->nexus);
323 	DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
324 	DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
325 	DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
326 	DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
327 	DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
328 	DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
329 	DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
330 	DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
331 #endif
332 
333 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
334 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
335 	rcb->resp_qid = aio_req->response_queue_id;
336 
337 	DBG_FUNC(" OUT ");
338 
339 }
340 
341 /*Function used to build and send RAID/AIO */
342 int
343 pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
344 {
345 	ib_queue_t *ib_q_array = softs->op_aio_ib_q;
346 	ib_queue_t *ib_q = NULL;
347 	char *ib_iu = NULL;
348 	IO_PATH_T io_path = AIO_PATH;
349 	uint32_t TraverseCount = 0;
350 	int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
351 	int qindex = first_qindex;
352 	uint32_t num_op_ib_q = softs->num_op_aio_ibq;
353 	uint32_t num_elem_needed;
354 	uint32_t num_elem_alloted = 0;
355 	pqi_scsi_dev_t *devp = rcb->dvp;
356 	uint8_t raidbypass_cdb[16];
357 
358 	DBG_FUNC(" IN ");
359 
360 	if(!rcb->aio_retry) {
361 		rcb->cdbp = OS_GET_CDBP(rcb);
362 		if(IS_AIO_PATH(devp)) {
363 			/**  IO for Physical Drive  **/
364 			/** Send in AIO PATH**/
365 			rcb->ioaccel_handle = devp->ioaccel_handle;
366 		} else {
367 			int ret = PQI_STATUS_FAILURE;
368 			/** IO for RAID Volume **/
369 			if (devp->offload_enabled) {
370 				/** ByPass IO ,Send in AIO PATH **/
371 				ret = pqisrc_send_scsi_cmd_raidbypass(softs,
372 					devp, rcb, raidbypass_cdb);
373 			}
374 			if (PQI_STATUS_FAILURE == ret) {
375 				/** Send in RAID PATH **/
376 				io_path = RAID_PATH;
377 				num_op_ib_q = softs->num_op_raid_ibq;
378 				ib_q_array = softs->op_raid_ib_q;
379 			} else {
380 				rcb->cdbp = raidbypass_cdb;
381 			}
382 		}
383 	} else {
384 		/* Retrying failed AIO IO */
385 		io_path = RAID_PATH;
386 		rcb->cdbp = OS_GET_CDBP(rcb);
387 		num_op_ib_q = softs->num_op_raid_ibq;
388 		ib_q_array = softs->op_raid_ib_q;
389 	}
390 
391 	num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
392 	DBG_IO("num_elem_needed :%d",num_elem_needed);
393 
394 	do {
395 		uint32_t num_elem_available;
396 		ib_q = (ib_q_array + qindex);
397 		PQI_LOCK(&ib_q->lock);
398 		num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
399 					*(ib_q->ci_virt_addr), ib_q->num_elem);
400 
401 		DBG_IO("num_elem_avialable :%d\n",num_elem_available);
402 		if(num_elem_available >= num_elem_needed) {
403 			num_elem_alloted = num_elem_needed;
404 			break;
405 		}
406 		DBG_IO("Current queue is busy! Hop to next queue\n");
407 
408 		PQI_UNLOCK(&ib_q->lock);
409 		qindex = (qindex + 1) % num_op_ib_q;
410 		if(qindex == first_qindex) {
411 			if (num_elem_needed == 1)
412 				break;
413 			TraverseCount += 1;
414 			num_elem_needed = 1;
415 		}
416 	}while(TraverseCount < 2);
417 
418 	DBG_IO("num_elem_alloted :%d",num_elem_alloted);
419 	if (num_elem_alloted == 0) {
420 		DBG_WARN("OUT: IB Queues were full\n");
421 		return PQI_STATUS_QFULL;
422 	}
423 
424 	pqisrc_increment_device_active_io(softs,devp);
425 
426 	/* Get IB Queue Slot address to build IU */
427 	ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
428 
429 	if(io_path == AIO_PATH) {
430 		/** Build AIO structure **/
431  		pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
432  			num_elem_alloted);
433 	} else {
434 		/** Build RAID structure **/
435 		pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
436 			num_elem_alloted);
437 	}
438 
439 	rcb->req_pending = true;
440 	rcb->req_q = ib_q;
441 	rcb->path = io_path;
442 
443 	/* Update the local PI */
444 	ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
445 
446 	DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
447 	DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
448 
449 	/* Inform the fw about the new IU */
450 	PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
451 
452 	PQI_UNLOCK(&ib_q->lock);
453 	DBG_FUNC(" OUT ");
454 	return PQI_STATUS_SUCCESS;
455 }
456 
457 /* Subroutine used to set encryption info as part of RAID bypass IO*/
458 static inline void
459 pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
460 		struct raid_map *raid_map, uint64_t first_block)
461 {
462 	uint32_t volume_blk_size;
463 
464 	/*
465 	 * Set the encryption tweak values based on logical block address.
466 	 * If the block size is 512, the tweak value is equal to the LBA.
467 	 * For other block sizes, tweak value is (LBA * block size) / 512.
468 	 */
469 	volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
470 	if (volume_blk_size != 512)
471 		first_block = (first_block * volume_blk_size) / 512;
472 
473 	enc_info->data_enc_key_index =
474 		GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
475 	enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
476 	enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
477 }
478 
479 /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
480 int
481 check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
482 				uint32_t *blk_cnt)
483 {
484 
485 	switch (cdb[0]) {
486 	case SCMD_WRITE_6:
487 		*is_write = true;
488 	case SCMD_READ_6:
489 		*fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
490 				(cdb[2] << 8) | cdb[3]);
491 		*blk_cnt = (uint32_t)cdb[4];
492 		if (*blk_cnt == 0)
493 			*blk_cnt = 256;
494 		break;
495 	case SCMD_WRITE_10:
496 		*is_write = true;
497 	case SCMD_READ_10:
498 		*fst_blk = (uint64_t)GET_BE32(&cdb[2]);
499 		*blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
500 		break;
501 	case SCMD_WRITE_12:
502 		*is_write = true;
503 	case SCMD_READ_12:
504 		*fst_blk = (uint64_t)GET_BE32(&cdb[2]);
505 		*blk_cnt = GET_BE32(&cdb[6]);
506 		break;
507 	case SCMD_WRITE_16:
508 		*is_write = true;
509 	case SCMD_READ_16:
510 		*fst_blk = GET_BE64(&cdb[2]);
511 		*blk_cnt = GET_BE32(&cdb[10]);
512 		break;
513 	default:
514 		/* Process via normal I/O path. */
515 		return PQI_STATUS_FAILURE;
516 	}
517 	return PQI_STATUS_SUCCESS;
518 }
519 
520 /* print any arbitrary buffer of length total_len */
521 void
522 pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
523 		uint32_t total_len, uint32_t flags)
524 {
525 #define LINE_BUF_LEN 60
526 #define INDEX_PER_LINE 16
527 	uint32_t buf_consumed = 0;
528 	int ii;
529 	char line_buf[LINE_BUF_LEN];
530 	int line_len; /* written length per line */
531 	uint8_t this_char;
532 
533 	if (user_buf == NULL)
534 		return;
535 
536 	/* Print index columns */
537 	if (flags & PRINT_FLAG_HDR_COLUMN)
538 	{
539 		for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++)
540 		{
541 			line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
542 			if ((line_len + 4) >= LINE_BUF_LEN)
543 			break;
544 		}
545 		DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf);
546 	}
547 
548 	/* Print index columns */
549 	while(buf_consumed < total_len)
550 	{
551 		memset(line_buf, 0, LINE_BUF_LEN);
552 
553 		for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++)
554 		{
555 			this_char = *((char*)(user_buf) + buf_consumed);
556 			line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char);
557 
558 			buf_consumed++;
559 			if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
560 			break;
561 		}
562 		DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf);
563 	}
564 }
565 
566 
567 /*
568  * Function used to build and send RAID bypass request to the adapter
569  */
570 int
571 pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
572 				pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
573 {
574 	struct raid_map *raid_map;
575 	boolean_t is_write = false;
576 	uint32_t map_idx;
577 	uint64_t fst_blk, lst_blk;
578 	uint32_t blk_cnt, blks_per_row;
579 	uint64_t fst_row, lst_row;
580 	uint32_t fst_row_offset, lst_row_offset;
581 	uint32_t fst_col, lst_col;
582 	uint32_t r5or6_blks_per_row;
583 	uint64_t r5or6_fst_row, r5or6_lst_row;
584 	uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
585 	uint32_t r5or6_fst_col, r5or6_lst_col;
586 	uint16_t data_disks_per_row, total_disks_per_row;
587 	uint16_t layout_map_count;
588 	uint32_t stripesz;
589 	uint16_t strip_sz;
590 	uint32_t fst_grp, lst_grp, cur_grp;
591 	uint32_t map_row;
592 	uint64_t disk_block;
593 	uint32_t disk_blk_cnt;
594 	uint8_t cdb_length;
595 	int offload_to_mirror;
596 	int i;
597 	DBG_FUNC(" IN \n");
598 	DBG_IO("!!!!!\n");
599 
600 	/* Check for eligible opcode, get LBA and block count. */
601 	memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
602 
603 	for(i = 0; i < rcb->cmdlen ; i++)
604 		DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
605 	if(check_for_scsi_opcode(cdb, &is_write,
606 		&fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
607 			return PQI_STATUS_FAILURE;
608 	/* Check for write to non-RAID-0. */
609 	if (is_write && device->raid_level != SA_RAID_0)
610 		return PQI_STATUS_FAILURE;
611 
612 	if(blk_cnt == 0)
613 		return PQI_STATUS_FAILURE;
614 
615 	lst_blk = fst_blk + blk_cnt - 1;
616 	raid_map = device->raid_map;
617 
618 	/* Check for invalid block or wraparound. */
619 	if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
620 		lst_blk < fst_blk)
621 		return PQI_STATUS_FAILURE;
622 
623 	data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
624 	strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
625 	layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
626 
627 	/* Calculate stripe information for the request. */
628 	blks_per_row = data_disks_per_row * strip_sz;
629 	if (!blks_per_row)
630 		return PQI_STATUS_FAILURE;  /*Send the IO in raid path itself, not AIO or raidbypass*/
631 
632 	/* use __udivdi3 ? */
633 	fst_row = fst_blk / blks_per_row;
634 	lst_row = lst_blk / blks_per_row;
635 	fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
636 	lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
637 	fst_col = fst_row_offset / strip_sz;
638 	lst_col = lst_row_offset / strip_sz;
639 
640 	/* If this isn't a single row/column then give to the controller. */
641 	if (fst_row != lst_row || fst_col != lst_col)
642 		return PQI_STATUS_FAILURE;
643 
644 	/* Proceeding with driver mapping. */
645 	total_disks_per_row = data_disks_per_row +
646 		GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
647 	map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
648 		GET_LE16((uint8_t *)(&raid_map->row_cnt));
649 	map_idx = (map_row * total_disks_per_row) + fst_col;
650 
651 	/* RAID 1 */
652 	if (device->raid_level == SA_RAID_1) {
653 		if (device->offload_to_mirror)
654 			map_idx += data_disks_per_row;
655 		device->offload_to_mirror = !device->offload_to_mirror;
656 	} else if (device->raid_level == SA_RAID_ADM) {
657 		/* RAID ADM */
658 		/*
659 		 * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
660 		 * divisible by 3.
661 		 */
662 		offload_to_mirror = device->offload_to_mirror;
663 		if (offload_to_mirror == 0)  {
664 			/* use physical disk in the first mirrored group. */
665 			map_idx %= data_disks_per_row;
666 		} else {
667 			do {
668 				/*
669 				 * Determine mirror group that map_idx
670 				 * indicates.
671 				 */
672 				cur_grp = map_idx / data_disks_per_row;
673 
674 				if (offload_to_mirror != cur_grp) {
675 					if (cur_grp <
676 						layout_map_count - 1) {
677 						/*
678 						 * Select raid index from
679 						 * next group.
680 						 */
681 						map_idx += data_disks_per_row;
682 						cur_grp++;
683 					} else {
684 						/*
685 						 * Select raid index from first
686 						 * group.
687 						 */
688 						map_idx %= data_disks_per_row;
689 						cur_grp = 0;
690 					}
691 				}
692 			} while (offload_to_mirror != cur_grp);
693 		}
694 
695 		/* Set mirror group to use next time. */
696 		offload_to_mirror =
697 			(offload_to_mirror >= layout_map_count - 1) ?
698 				0 : offload_to_mirror + 1;
699 		if(offload_to_mirror >= layout_map_count)
700 			return PQI_STATUS_FAILURE;
701 
702 		device->offload_to_mirror = offload_to_mirror;
703 		/*
704 		 * Avoid direct use of device->offload_to_mirror within this
705 		 * function since multiple threads might simultaneously
706 		 * increment it beyond the range of device->layout_map_count -1.
707 		 */
708 	} else if ((device->raid_level == SA_RAID_5 ||
709 		device->raid_level == SA_RAID_6) && layout_map_count > 1) {
710 		/* RAID 50/60 */
711 		/* Verify first and last block are in same RAID group */
712 		r5or6_blks_per_row = strip_sz * data_disks_per_row;
713 		stripesz = r5or6_blks_per_row * layout_map_count;
714 
715 		fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
716 		lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
717 
718 		if (fst_grp != lst_grp)
719 			return PQI_STATUS_FAILURE;
720 
721 		/* Verify request is in a single row of RAID 5/6 */
722 		fst_row = r5or6_fst_row =
723 			fst_blk / stripesz;
724 		r5or6_lst_row = lst_blk / stripesz;
725 
726 		if (r5or6_fst_row != r5or6_lst_row)
727 			return PQI_STATUS_FAILURE;
728 
729 		/* Verify request is in a single column */
730 		fst_row_offset = r5or6_fst_row_offset =
731 			(uint32_t)((fst_blk % stripesz) %
732 			r5or6_blks_per_row);
733 
734 		r5or6_lst_row_offset =
735 			(uint32_t)((lst_blk % stripesz) %
736 			r5or6_blks_per_row);
737 
738 		fst_col = r5or6_fst_row_offset / strip_sz;
739 		r5or6_fst_col = fst_col;
740 		r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
741 
742 		if (r5or6_fst_col != r5or6_lst_col)
743 			return PQI_STATUS_FAILURE;
744 
745 		/* Request is eligible */
746 		map_row =
747 			((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
748 			GET_LE16((uint8_t *)(&raid_map->row_cnt));
749 
750 		map_idx = (fst_grp *
751 			(GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
752 			total_disks_per_row)) +
753 			(map_row * total_disks_per_row) + fst_col;
754 	}
755 
756 	rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
757 	disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
758 		fst_row * strip_sz +
759 		(fst_row_offset - fst_col * strip_sz);
760 	disk_blk_cnt = blk_cnt;
761 
762 	/* Handle differing logical/physical block sizes. */
763 	if (raid_map->phys_blk_shift) {
764 		disk_block <<= raid_map->phys_blk_shift;
765 		disk_blk_cnt <<= raid_map->phys_blk_shift;
766 	}
767 
768 	if (disk_blk_cnt > 0xffff)
769 		return PQI_STATUS_FAILURE;
770 
771 	/* Build the new CDB for the physical disk I/O. */
772 	if (disk_block > 0xffffffff) {
773 		cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
774 		cdb[1] = 0;
775 		PUT_BE64(disk_block, &cdb[2]);
776 		PUT_BE32(disk_blk_cnt, &cdb[10]);
777 		cdb[14] = 0;
778 		cdb[15] = 0;
779 		cdb_length = 16;
780 	} else {
781 		cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
782 		cdb[1] = 0;
783 		PUT_BE32(disk_block, &cdb[2]);
784 		cdb[6] = 0;
785 		PUT_BE16(disk_blk_cnt, &cdb[7]);
786 		cdb[9] = 0;
787 		cdb_length = 10;
788 	}
789 
790 	if (GET_LE16((uint8_t *)(&raid_map->flags)) &
791 		RAID_MAP_ENCRYPTION_ENABLED) {
792 		pqisrc_set_enc_info(&rcb->enc_info, raid_map,
793 			fst_blk);
794 		rcb->encrypt_enable = true;
795 	} else {
796 		rcb->encrypt_enable = false;
797 	}
798 
799 	rcb->cmdlen = cdb_length;
800 
801 
802 	DBG_FUNC("OUT");
803 
804 	return PQI_STATUS_SUCCESS;
805 }
806 
807 /* Function used to submit an AIO TMF to the adapter
808  * DEVICE_RESET is not supported.
809  */
810 static int
811 pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
812                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
813 {
814 	int rval = PQI_STATUS_SUCCESS;
815 	pqi_aio_tmf_req_t tmf_req;
816 	ib_queue_t *op_ib_q = NULL;
817 
818 	memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
819 
820 	DBG_FUNC("IN");
821 
822 	tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
823 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
824 	tmf_req.req_id = rcb->tag;
825 	tmf_req.error_idx = rcb->tag;
826 	tmf_req.nexus = devp->ioaccel_handle;
827 	//memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
828 	tmf_req.tmf = tmf_type;
829 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
830 	op_ib_q = &softs->op_aio_ib_q[0];
831 
832 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
833 		tmf_req.req_id_to_manage = rcb_to_manage->tag;
834 		tmf_req.nexus = rcb_to_manage->ioaccel_handle;
835 	}
836 
837 	DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage);
838 	DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id);
839 
840 	DBG_WARN("aio tmf: iu_type=0x%x req_id_to_manage=0x%x\n",
841 		tmf_req.header.iu_type, tmf_req.req_id_to_manage);
842 	DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%d\n",
843 		tmf_req.req_id, tmf_req.nexus, tmf_req.tmf, op_ib_q->q_id);
844 
845 	rcb->path = AIO_PATH;
846 	rcb->req_pending = true;
847 	/* Timedout tmf response goes here */
848 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
849 
850 	rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
851 	if (rval != PQI_STATUS_SUCCESS) {
852 		DBG_ERR("Unable to submit command rval=%d\n", rval);
853 		return rval;
854 	}
855 
856 	rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
857 	if (rval != PQI_STATUS_SUCCESS){
858 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
859 		rcb->status = rval;
860 	}
861 
862 	if (rcb->status  != REQUEST_SUCCESS) {
863 		DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
864 				"stat:0x%x\n", tmf_type, rcb->status);
865 		rval = PQI_STATUS_FAILURE;
866 	}
867 
868 	DBG_FUNC("OUT");
869 	return rval;
870 }
871 
872 /* Function used to submit a Raid TMF to the adapter */
873 static int
874 pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
875                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
876 {
877 	int rval = PQI_STATUS_SUCCESS;
878 	pqi_raid_tmf_req_t tmf_req;
879 	ib_queue_t *op_ib_q = NULL;
880 
881 	memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
882 
883 	DBG_FUNC("IN");
884 
885 	tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
886 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
887 	tmf_req.req_id = rcb->tag;
888 
889 	memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
890 	tmf_req.tmf = tmf_type;
891 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
892 
893 	/* Decide the queue where the tmf request should be submitted */
894 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
895 		tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid;
896 		tmf_req.req_id_to_manage = rcb_to_manage->tag;
897 	}
898 
899 	if (softs->timeout_in_tmf &&
900 			tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
901 		/* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */
902 		tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
903 		/* if OS tmf timeout is 0, set minimum value for timeout */
904 		if (!tmf_req.timeout_in_sec)
905 			tmf_req.timeout_in_sec = 1;
906 	}
907 
908 	op_ib_q = &softs->op_raid_ib_q[0];
909 	rcb->path = RAID_PATH;
910 	rcb->req_pending = true;
911 	/* Timedout tmf response goes here */
912 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
913 
914 	rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
915 	if (rval != PQI_STATUS_SUCCESS) {
916 		DBG_ERR("Unable to submit command rval=%d\n", rval);
917 		return rval;
918 	}
919 
920 	rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
921 	if (rval != PQI_STATUS_SUCCESS) {
922 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
923 		rcb->status = rval;
924 	}
925 
926 	if (rcb->status  != REQUEST_SUCCESS) {
927 		DBG_NOTE("Task Management failed tmf_type:%d "
928 				"stat:0x%x\n", tmf_type, rcb->status);
929 		rval = PQI_STATUS_FAILURE;
930 	}
931 
932 	DBG_FUNC("OUT");
933 	return rval;
934 }
935 
936 int
937 pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
938                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
939 {
940 	int ret = PQI_STATUS_SUCCESS;
941 
942 	DBG_FUNC("IN");
943 	rcb->softs = softs;
944 
945 	if(!devp->is_physical_device) {
946 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
947 			if(rcb_to_manage->path == AIO_PATH) {
948 				if(devp->offload_enabled)
949 					ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
950 			}
951 			else {
952 				DBG_INFO("TASK ABORT not supported in raid\n");
953 				ret = PQI_STATUS_FAILURE;
954 			}
955 		}
956 		else {
957 			ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
958 		}
959 	} else {
960 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
961 			ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
962 		else
963 			ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
964 	}
965 
966 	DBG_FUNC("IN");
967 
968 	return ret;
969 }
970 
971 /*
972  * Function used to build and send the vendor general request
973  * Used for configuring PQI feature bits between firmware and driver
974  */
975 int
976 pqisrc_build_send_vendor_request(
977 	pqisrc_softstate_t *softs,
978 	pqi_vendor_general_request_t *request,
979 	raid_path_error_info_elem_t *error_info)
980 {
981 	int ret = PQI_STATUS_SUCCESS;
982 	ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
983 	ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
984 
985 	rcb_t *rcb = NULL;
986 
987 	uint16_t request_id = 0;
988 
989 	/* Get the tag */
990 	request_id = pqisrc_get_tag(&softs->taglist);
991 	if (INVALID_ELEM == request_id) {
992 		DBG_ERR("Tag not available\n");
993 		ret = PQI_STATUS_FAILURE;
994 		goto err_notag;
995 	}
996 
997 	((pqi_vendor_general_request_t *)request)->request_id = request_id;
998 	((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id;
999 
1000 	rcb = &softs->rcb[request_id];
1001 
1002 	rcb->req_pending = true;
1003 	rcb->tag = request_id;
1004 
1005 	ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
1006 
1007 	if (ret != PQI_STATUS_SUCCESS) {
1008 		DBG_ERR("Unable to submit command\n");
1009 		goto err_out;
1010 	}
1011 
1012 	ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
1013 	if (ret != PQI_STATUS_SUCCESS) {
1014 		DBG_ERR("Management request timed out!\n");
1015 		goto err_out;
1016 	}
1017 
1018 	ret = rcb->status;
1019 	if (ret) {
1020 		ret = PQI_STATUS_FAILURE;
1021 		if(error_info) {
1022 			// TODO: config table err handling.
1023 		}
1024 	} else {
1025 		if(error_info) {
1026 			ret = PQI_STATUS_SUCCESS;
1027 			memset(error_info, 0, sizeof(*error_info));
1028 		}
1029 	}
1030 
1031 	os_reset_rcb(rcb);
1032 	pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
1033 	DBG_FUNC("OUT\n");
1034 	return ret;
1035 
1036 err_out:
1037 	DBG_ERR("Vender general request submission failed.\n");
1038 	os_reset_rcb(rcb);
1039 	pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
1040 err_notag:
1041 	DBG_FUNC("FAILED \n");
1042 	return ret;
1043 }
1044 
1045 /* return the path as ASCII-string */
1046 char *
1047 io_path_to_ascii(IO_PATH_T path)
1048 {
1049 	switch (path)
1050 	{
1051 		case AIO_PATH:		return "Aio";
1052 		case RAID_PATH:		return "Raid";
1053 		default:		return "Unknown";
1054 	}
1055 }
1056