xref: /freebsd/sys/dev/smartpqi/smartpqi_request.c (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD$ */
28 
29 #include "smartpqi_includes.h"
30 
31 #define SG_FLAG_LAST	0x40000000
32 #define SG_FLAG_CHAIN	0x80000000
33 
34 /* Subroutine to find out embedded sgl count in IU */
35 static inline
36 uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
37 {
38 	uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
39 	DBG_FUNC(" IN ");
40 	/**
41 	calculate embedded sgl count using num_elem_alloted for IO
42 	**/
43 	if(elem_alloted - 1)
44 		embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
45 	DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
46 
47 	DBG_FUNC(" OUT ");
48 
49 	return embedded_sgl_count;
50 
51 }
52 
53 /* Subroutine to find out contiguous free elem in IU */
54 static inline
55 uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
56 {
57 	uint32_t contiguous_free_elem = 0;
58 
59 	DBG_FUNC(" IN ");
60 
61 	if(pi >= ci) {
62 		contiguous_free_elem = (elem_in_q - pi);
63 		if(ci == 0)
64 			contiguous_free_elem -= 1;
65 	} else {
66 		contiguous_free_elem = (ci - pi - 1);
67 	}
68 
69 	DBG_FUNC(" OUT ");
70 
71 	return contiguous_free_elem;
72 }
73 
74 /* Subroutine to find out num of elements need for the request */
75 static uint32_t
76 pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
77 {
78 	uint32_t num_sg;
79 	uint32_t num_elem_required = 1;
80 	DBG_FUNC(" IN ");
81 	DBG_IO("SGL_Count :%d",SG_Count);
82 	/********
83 	If SG_Count greater than max sg per IU i.e 4 or 68
84 	(4 is with out spanning or 68 is with spanning) chaining is required.
85 	OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
86 	on these two cases one element is enough.
87 	********/
88 	if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
89 		return num_elem_required;
90 	/*
91 	SGL Count Other Than First IU
92 	 */
93 	num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
94 	num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
95 	DBG_FUNC(" OUT ");
96 	return num_elem_required;
97 }
98 
99 /* Subroutine to build SG list for the IU submission*/
100 static
101 boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
102 			uint32_t num_elem_alloted)
103 {
104 	uint32_t i;
105 	uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
106 	sgt_t *sgt = sg_array;
107 	sgt_t *sg_chain = NULL;
108 	boolean_t partial = false;
109 
110 	DBG_FUNC(" IN ");
111 
112 	DBG_IO("SGL_Count :%d",num_sg);
113 	if (0 == num_sg) {
114 		goto out;
115 	}
116 
117 	if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
118 		for (i = 0; i < num_sg; i++, sgt++) {
119                         sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
120                         sgt->len= OS_GET_IO_SG_LEN(rcb,i);
121                         sgt->flags= 0;
122                 }
123 
124 		sg_array[num_sg - 1].flags = SG_FLAG_LAST;
125 	} else {
126 	/**
127 	SGL Chaining
128 	**/
129 		sg_chain = rcb->sg_chain_virt;
130 		sgt->addr = rcb->sg_chain_dma;
131 		sgt->len = num_sg * sizeof(sgt_t);
132 		sgt->flags = SG_FLAG_CHAIN;
133 
134 		sgt = sg_chain;
135 		for (i = 0; i < num_sg; i++, sgt++) {
136 			sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
137 			sgt->len = OS_GET_IO_SG_LEN(rcb,i);
138 			sgt->flags = 0;
139 		}
140 
141 		sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
142 		num_sg = 1;
143 		partial = true;
144 
145 	}
146 out:
147 	iu_hdr->iu_length = num_sg * sizeof(sgt_t);
148 	DBG_FUNC(" OUT ");
149 	return partial;
150 
151 }
152 
153 /*Subroutine used to Build the RAID request */
154 static void
155 pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
156  	pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
157 {
158 	DBG_FUNC(" IN ");
159 
160 	raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
161 	raid_req->header.comp_feature = 0;
162 	raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
163 	raid_req->work_area[0] = 0;
164 	raid_req->work_area[1] = 0;
165 	raid_req->request_id = rcb->tag;
166 	raid_req->nexus_id = 0;
167 	raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
168 	memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
169  		sizeof(raid_req->lun_number));
170 	raid_req->protocol_spec = 0;
171 	raid_req->data_direction = rcb->data_dir;
172 	raid_req->reserved1 = 0;
173 	raid_req->fence = 0;
174 	raid_req->error_index = raid_req->request_id;
175 	raid_req->reserved2 = 0;
176   	raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
177   	raid_req->command_priority = 0;
178 	raid_req->reserved3 = 0;
179 	raid_req->reserved4 = 0;
180 	raid_req->reserved5 = 0;
181 
182 	/* As cdb and additional_cdb_bytes are contiguous,
183 	   update them in a single statement */
184 	memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
185 #if 0
186 	DBG_IO("CDB :");
187 	for(i = 0; i < rcb->cmdlen ; i++)
188 		DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
189 #endif
190 
191 	switch (rcb->cmdlen) {
192 		case 6:
193 		case 10:
194 		case 12:
195 		case 16:
196 			raid_req->additional_cdb_bytes_usage =
197 				PQI_ADDITIONAL_CDB_BYTES_0;
198 			break;
199 		case 20:
200 			raid_req->additional_cdb_bytes_usage =
201 				PQI_ADDITIONAL_CDB_BYTES_4;
202 			break;
203 		case 24:
204 			raid_req->additional_cdb_bytes_usage =
205 				PQI_ADDITIONAL_CDB_BYTES_8;
206 			break;
207 		case 28:
208 			raid_req->additional_cdb_bytes_usage =
209 				PQI_ADDITIONAL_CDB_BYTES_12;
210 			break;
211 		case 32:
212 		default: /* todo:review again */
213 			raid_req->additional_cdb_bytes_usage =
214 				PQI_ADDITIONAL_CDB_BYTES_16;
215 			break;
216 	}
217 
218 	/* Frame SGL Descriptor */
219 	raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
220 		&raid_req->header, num_elem_alloted);
221 
222 	raid_req->header.iu_length +=
223 			offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
224 
225 #if 0
226 	DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
227 	DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
228 	DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
229 	DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
230 	DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
231 	DBG_IO("raid_req->lun_number  : 0x%x", raid_req->lun_number);
232 	DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
233 	DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
234 	DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
235 	DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
236 #endif
237 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
238 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
239 	rcb->resp_qid = raid_req->response_queue_id;
240 
241  	DBG_FUNC(" OUT ");
242 
243 }
244 
245 /*Subroutine used to Build the AIO request */
246 static void
247 pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
248  				pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
249 {
250 	DBG_FUNC(" IN ");
251 
252 	aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
253 	aio_req->header.comp_feature = 0;
254 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
255 	aio_req->work_area[0] = 0;
256 	aio_req->work_area[1] = 0;
257 	aio_req->req_id = rcb->tag;
258 	aio_req->res1[0] = 0;
259 	aio_req->res1[1] = 0;
260 	aio_req->nexus = rcb->ioaccel_handle;
261 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
262 	aio_req->data_dir = rcb->data_dir;
263 	aio_req->mem_type = 0;
264 	aio_req->fence = 0;
265 	aio_req->res2 = 0;
266 	aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
267 	aio_req->cmd_prio = 0;
268 	aio_req->res3 = 0;
269 	aio_req->err_idx = aio_req->req_id;
270 	aio_req->cdb_len = rcb->cmdlen;
271 	if(rcb->cmdlen > sizeof(aio_req->cdb))
272 		rcb->cmdlen = sizeof(aio_req->cdb);
273 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
274 #if 0
275 	DBG_IO("CDB : \n");
276 	for(int i = 0; i < rcb->cmdlen ; i++)
277 		 DBG_IO(" 0x%x \n",aio_req->cdb[i]);
278 #endif
279 	memset(aio_req->lun,0,sizeof(aio_req->lun));
280 	memset(aio_req->res4,0,sizeof(aio_req->res4));
281 
282 	if(rcb->encrypt_enable == true) {
283 		aio_req->encrypt_enable = true;
284 		aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
285 		aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
286 		aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
287 	} else {
288 		aio_req->encrypt_enable = 0;
289 		aio_req->encrypt_key_index = 0;
290 		aio_req->encrypt_twk_high = 0;
291 		aio_req->encrypt_twk_low = 0;
292 	}
293 
294 	/* Frame SGL Descriptor */
295 	aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
296  		&aio_req->header, num_elem_alloted);
297 
298 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
299 
300 	DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
301 
302 	aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
303 		sizeof(iu_header_t);
304 #if 0
305 	DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
306 	DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
307 	DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
308 	DBG_IO("aio_req->nexus : 0x%x  \n",aio_req->nexus);
309 	DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
310 	DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
311 	DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
312 	DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
313 	DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
314 	DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
315 	DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
316 	DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
317 #endif
318 
319 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
320 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
321 	rcb->resp_qid = aio_req->response_queue_id;
322 
323 	DBG_FUNC(" OUT ");
324 
325 }
326 
327 /*Function used to build and send RAID/AIO */
328 int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
329 {
330 	ib_queue_t *ib_q_array = softs->op_aio_ib_q;
331 	ib_queue_t *ib_q = NULL;
332 	char *ib_iu = NULL;
333 	IO_PATH_T io_path = AIO_PATH;
334 	uint32_t TraverseCount = 0;
335 	int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
336 	int qindex = first_qindex;
337 	uint32_t num_op_ib_q = softs->num_op_aio_ibq;
338 	uint32_t num_elem_needed;
339 	uint32_t num_elem_alloted = 0;
340 	pqi_scsi_dev_t *devp = rcb->dvp;
341 	uint8_t raidbypass_cdb[16];
342 
343 	DBG_FUNC(" IN ");
344 
345 
346 	rcb->cdbp = OS_GET_CDBP(rcb);
347 
348 	if(IS_AIO_PATH(devp)) {
349 		/**  IO for Physical Drive  **/
350 		/** Send in AIO PATH**/
351 		rcb->ioaccel_handle = devp->ioaccel_handle;
352 	} else {
353 		int ret = PQI_STATUS_FAILURE;
354 		/** IO for RAID Volume **/
355 		if (devp->offload_enabled) {
356 			/** ByPass IO ,Send in AIO PATH **/
357 			ret = pqisrc_send_scsi_cmd_raidbypass(softs,
358 				devp, rcb, raidbypass_cdb);
359 		}
360 
361 		if (PQI_STATUS_FAILURE == ret) {
362 			/** Send in RAID PATH **/
363 			io_path = RAID_PATH;
364 			num_op_ib_q = softs->num_op_raid_ibq;
365 			ib_q_array = softs->op_raid_ib_q;
366 		} else {
367 			rcb->cdbp = raidbypass_cdb;
368 		}
369 	}
370 
371 	num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
372 	DBG_IO("num_elem_needed :%d",num_elem_needed);
373 
374 	do {
375 		uint32_t num_elem_available;
376 		ib_q = (ib_q_array + qindex);
377 		PQI_LOCK(&ib_q->lock);
378 		num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
379 					*(ib_q->ci_virt_addr), ib_q->num_elem);
380 
381 		DBG_IO("num_elem_avialable :%d\n",num_elem_available);
382 		if(num_elem_available >= num_elem_needed) {
383 			num_elem_alloted = num_elem_needed;
384 			break;
385 		}
386 		DBG_IO("Current queue is busy! Hop to next queue\n");
387 
388 		PQI_UNLOCK(&ib_q->lock);
389 		qindex = (qindex + 1) % num_op_ib_q;
390 		if(qindex == first_qindex) {
391 			if (num_elem_needed == 1)
392 				break;
393 			TraverseCount += 1;
394 			num_elem_needed = 1;
395 		}
396 	}while(TraverseCount < 2);
397 
398 	DBG_IO("num_elem_alloted :%d",num_elem_alloted);
399 	if (num_elem_alloted == 0) {
400 		DBG_WARN("OUT: IB Queues were full\n");
401 		return PQI_STATUS_QFULL;
402 	}
403 
404 	/* Get IB Queue Slot address to build IU */
405 	ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
406 
407 	if(io_path == AIO_PATH) {
408 		/** Build AIO structure **/
409  		pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
410  			num_elem_alloted);
411 	} else {
412 		/** Build RAID structure **/
413 		pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
414 			num_elem_alloted);
415 	}
416 
417 	rcb->req_pending = true;
418 
419 	/* Update the local PI */
420 	ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
421 
422 	DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
423 	DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
424 
425 	/* Inform the fw about the new IU */
426 	PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
427 
428 	PQI_UNLOCK(&ib_q->lock);
429 	DBG_FUNC(" OUT ");
430 	return PQI_STATUS_SUCCESS;
431 }
432 
433 /* Subroutine used to set encryption info as part of RAID bypass IO*/
434 static inline void pqisrc_set_enc_info(
435 	struct pqi_enc_info *enc_info, struct raid_map *raid_map,
436 	uint64_t first_block)
437 {
438 	uint32_t volume_blk_size;
439 
440 	/*
441 	 * Set the encryption tweak values based on logical block address.
442 	 * If the block size is 512, the tweak value is equal to the LBA.
443 	 * For other block sizes, tweak value is (LBA * block size) / 512.
444 	 */
445 	volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
446 	if (volume_blk_size != 512)
447 		first_block = (first_block * volume_blk_size) / 512;
448 
449 	enc_info->data_enc_key_index =
450 		GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
451 	enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
452 	enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
453 }
454 
455 
456 /*
457  * Attempt to perform offload RAID mapping for a logical volume I/O.
458  */
459 
460 #define HPSA_RAID_0		0
461 #define HPSA_RAID_4		1
462 #define HPSA_RAID_1		2	/* also used for RAID 10 */
463 #define HPSA_RAID_5		3	/* also used for RAID 50 */
464 #define HPSA_RAID_51		4
465 #define HPSA_RAID_6		5	/* also used for RAID 60 */
466 #define HPSA_RAID_ADM		6	/* also used for RAID 1+0 ADM */
467 #define HPSA_RAID_MAX		HPSA_RAID_ADM
468 #define HPSA_RAID_UNKNOWN	0xff
469 
470 /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
471 int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
472 				uint32_t *blk_cnt) {
473 
474 	switch (cdb[0]) {
475 	case SCMD_WRITE_6:
476 		*is_write = true;
477 	case SCMD_READ_6:
478 		*fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
479 				(cdb[2] << 8) | cdb[3]);
480 		*blk_cnt = (uint32_t)cdb[4];
481 		if (*blk_cnt == 0)
482 			*blk_cnt = 256;
483 		break;
484 	case SCMD_WRITE_10:
485 		*is_write = true;
486 	case SCMD_READ_10:
487 		*fst_blk = (uint64_t)GET_BE32(&cdb[2]);
488 		*blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
489 		break;
490 	case SCMD_WRITE_12:
491 		*is_write = true;
492 	case SCMD_READ_12:
493 		*fst_blk = (uint64_t)GET_BE32(&cdb[2]);
494 		*blk_cnt = GET_BE32(&cdb[6]);
495 		break;
496 	case SCMD_WRITE_16:
497 		*is_write = true;
498 	case SCMD_READ_16:
499 		*fst_blk = GET_BE64(&cdb[2]);
500 		*blk_cnt = GET_BE32(&cdb[10]);
501 		break;
502 	default:
503 		/* Process via normal I/O path. */
504 		return PQI_STATUS_FAILURE;
505 	}
506 	return PQI_STATUS_SUCCESS;
507 }
508 
509 /*
510  * Function used to build and send RAID bypass request to the adapter
511  */
512 int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
513 				pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
514 {
515 	struct raid_map *raid_map;
516 	boolean_t is_write = false;
517 	uint32_t map_idx;
518 	uint64_t fst_blk, lst_blk;
519 	uint32_t blk_cnt, blks_per_row;
520 	uint64_t fst_row, lst_row;
521 	uint32_t fst_row_offset, lst_row_offset;
522 	uint32_t fst_col, lst_col;
523 	uint32_t r5or6_blks_per_row;
524 	uint64_t r5or6_fst_row, r5or6_lst_row;
525 	uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
526 	uint32_t r5or6_fst_col, r5or6_lst_col;
527 	uint16_t data_disks_per_row, total_disks_per_row;
528 	uint16_t layout_map_count;
529 	uint32_t stripesz;
530 	uint16_t strip_sz;
531 	uint32_t fst_grp, lst_grp, cur_grp;
532 	uint32_t map_row;
533 	uint64_t disk_block;
534 	uint32_t disk_blk_cnt;
535 	uint8_t cdb_length;
536 	int offload_to_mirror;
537 	int i;
538 	DBG_FUNC(" IN \n");
539 	DBG_IO("!!!!!\n");
540 
541 	/* Check for eligible opcode, get LBA and block count. */
542 	memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
543 
544 	for(i = 0; i < rcb->cmdlen ; i++)
545 		DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
546 	if(check_for_scsi_opcode(cdb, &is_write,
547 		&fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
548 			return PQI_STATUS_FAILURE;
549 	/* Check for write to non-RAID-0. */
550 	if (is_write && device->raid_level != SA_RAID_0)
551 		return PQI_STATUS_FAILURE;
552 
553 	if(blk_cnt == 0)
554 		return PQI_STATUS_FAILURE;
555 
556 	lst_blk = fst_blk + blk_cnt - 1;
557 	raid_map = device->raid_map;
558 
559 	/* Check for invalid block or wraparound. */
560 	if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
561 		lst_blk < fst_blk)
562 		return PQI_STATUS_FAILURE;
563 
564 	data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
565 	strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
566 	layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
567 
568 	/* Calculate stripe information for the request. */
569 	blks_per_row = data_disks_per_row * strip_sz;
570 	if (!blks_per_row)
571 		return PQI_STATUS_FAILURE;
572 	/* use __udivdi3 ? */
573 	fst_row = fst_blk / blks_per_row;
574 	lst_row = lst_blk / blks_per_row;
575 	fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
576 	lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
577 	fst_col = fst_row_offset / strip_sz;
578 	lst_col = lst_row_offset / strip_sz;
579 
580 	/* If this isn't a single row/column then give to the controller. */
581 	if (fst_row != lst_row || fst_col != lst_col)
582 		return PQI_STATUS_FAILURE;
583 
584 	/* Proceeding with driver mapping. */
585 	total_disks_per_row = data_disks_per_row +
586 		GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
587 	map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
588 		GET_LE16((uint8_t *)(&raid_map->row_cnt));
589 	map_idx = (map_row * total_disks_per_row) + fst_col;
590 
591 	/* RAID 1 */
592 	if (device->raid_level == SA_RAID_1) {
593 		if (device->offload_to_mirror)
594 			map_idx += data_disks_per_row;
595 		device->offload_to_mirror = !device->offload_to_mirror;
596 	} else if (device->raid_level == SA_RAID_ADM) {
597 		/* RAID ADM */
598 		/*
599 		 * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
600 		 * divisible by 3.
601 		 */
602 		offload_to_mirror = device->offload_to_mirror;
603 		if (offload_to_mirror == 0)  {
604 			/* use physical disk in the first mirrored group. */
605 			map_idx %= data_disks_per_row;
606 		} else {
607 			do {
608 				/*
609 				 * Determine mirror group that map_idx
610 				 * indicates.
611 				 */
612 				cur_grp = map_idx / data_disks_per_row;
613 
614 				if (offload_to_mirror != cur_grp) {
615 					if (cur_grp <
616 						layout_map_count - 1) {
617 						/*
618 						 * Select raid index from
619 						 * next group.
620 						 */
621 						map_idx += data_disks_per_row;
622 						cur_grp++;
623 					} else {
624 						/*
625 						 * Select raid index from first
626 						 * group.
627 						 */
628 						map_idx %= data_disks_per_row;
629 						cur_grp = 0;
630 					}
631 				}
632 			} while (offload_to_mirror != cur_grp);
633 		}
634 
635 		/* Set mirror group to use next time. */
636 		offload_to_mirror =
637 			(offload_to_mirror >= layout_map_count - 1) ?
638 				0 : offload_to_mirror + 1;
639 		if(offload_to_mirror >= layout_map_count)
640 			return PQI_STATUS_FAILURE;
641 
642 		device->offload_to_mirror = offload_to_mirror;
643 		/*
644 		 * Avoid direct use of device->offload_to_mirror within this
645 		 * function since multiple threads might simultaneously
646 		 * increment it beyond the range of device->layout_map_count -1.
647 		 */
648 	} else if ((device->raid_level == SA_RAID_5 ||
649 		device->raid_level == SA_RAID_6) && layout_map_count > 1) {
650 		/* RAID 50/60 */
651 		/* Verify first and last block are in same RAID group */
652 		r5or6_blks_per_row = strip_sz * data_disks_per_row;
653 		stripesz = r5or6_blks_per_row * layout_map_count;
654 
655 		fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
656 		lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
657 
658 		if (fst_grp != lst_grp)
659 			return PQI_STATUS_FAILURE;
660 
661 		/* Verify request is in a single row of RAID 5/6 */
662 		fst_row = r5or6_fst_row =
663 			fst_blk / stripesz;
664 		r5or6_lst_row = lst_blk / stripesz;
665 
666 		if (r5or6_fst_row != r5or6_lst_row)
667 			return PQI_STATUS_FAILURE;
668 
669 		/* Verify request is in a single column */
670 		fst_row_offset = r5or6_fst_row_offset =
671 			(uint32_t)((fst_blk % stripesz) %
672 			r5or6_blks_per_row);
673 
674 		r5or6_lst_row_offset =
675 			(uint32_t)((lst_blk % stripesz) %
676 			r5or6_blks_per_row);
677 
678 		fst_col = r5or6_fst_row_offset / strip_sz;
679 		r5or6_fst_col = fst_col;
680 		r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
681 
682 		if (r5or6_fst_col != r5or6_lst_col)
683 			return PQI_STATUS_FAILURE;
684 
685 		/* Request is eligible */
686 		map_row =
687 			((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
688 			GET_LE16((uint8_t *)(&raid_map->row_cnt));
689 
690 		map_idx = (fst_grp *
691 			(GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
692 			total_disks_per_row)) +
693 			(map_row * total_disks_per_row) + fst_col;
694 	}
695 
696 	if (map_idx >= RAID_MAP_MAX_ENTRIES)
697 		return PQI_STATUS_FAILURE;
698 
699 	rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
700 	disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
701 		fst_row * strip_sz +
702 		(fst_row_offset - fst_col * strip_sz);
703 	disk_blk_cnt = blk_cnt;
704 
705 	/* Handle differing logical/physical block sizes. */
706 	if (raid_map->phys_blk_shift) {
707 		disk_block <<= raid_map->phys_blk_shift;
708 		disk_blk_cnt <<= raid_map->phys_blk_shift;
709 	}
710 
711 	if (disk_blk_cnt > 0xffff)
712 		return PQI_STATUS_FAILURE;
713 
714 	/* Build the new CDB for the physical disk I/O. */
715 	if (disk_block > 0xffffffff) {
716 		cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
717 		cdb[1] = 0;
718 		PUT_BE64(disk_block, &cdb[2]);
719 		PUT_BE32(disk_blk_cnt, &cdb[10]);
720 		cdb[14] = 0;
721 		cdb[15] = 0;
722 		cdb_length = 16;
723 	} else {
724 		cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
725 		cdb[1] = 0;
726 		PUT_BE32(disk_block, &cdb[2]);
727 		cdb[6] = 0;
728 		PUT_BE16(disk_blk_cnt, &cdb[7]);
729 		cdb[9] = 0;
730 		cdb_length = 10;
731 	}
732 
733 	if (GET_LE16((uint8_t *)(&raid_map->flags)) &
734 		RAID_MAP_ENCRYPTION_ENABLED) {
735 		pqisrc_set_enc_info(&rcb->enc_info, raid_map,
736 			fst_blk);
737 		rcb->encrypt_enable = true;
738 	} else {
739 		rcb->encrypt_enable = false;
740 	}
741 
742 	rcb->cmdlen = cdb_length;
743 
744 
745 	DBG_FUNC("OUT");
746 
747 	return PQI_STATUS_SUCCESS;
748 }
749 
750 /* Function used to submit a TMF to the adater */
751 int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
752                     rcb_t *rcb, int req_id, int tmf_type)
753 {
754 	int rval = PQI_STATUS_SUCCESS;
755 	pqi_tmf_req_t tmf_req;
756 
757 	memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
758 
759 	DBG_FUNC("IN");
760 
761 	tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
762 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
763 	tmf_req.req_id = rcb->tag;
764 
765 	memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
766 	tmf_req.tmf = tmf_type;
767 	tmf_req.req_id_to_manage = req_id;
768 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
769 	tmf_req.obq_id_to_manage = rcb->resp_qid;
770 
771 	rcb->req_pending = true;
772 
773 	rval = pqisrc_submit_cmnd(softs,
774 	&softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
775 	if (rval != PQI_STATUS_SUCCESS) {
776 		DBG_ERR("Unable to submit command rval=%d\n", rval);
777 		return rval;
778 	}
779 
780 	rval = pqisrc_wait_on_condition(softs, rcb);
781 	if (rval != PQI_STATUS_SUCCESS){
782 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
783 		rcb->status = REQUEST_FAILED;
784 	}
785 
786 	if (rcb->status  != REQUEST_SUCCESS) {
787 		DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
788 				"stat:0x%x\n", tmf_type, rcb->status);
789 		rval = PQI_STATUS_FAILURE;
790 	}
791 
792 	DBG_FUNC("OUT");
793 	return rval;
794 }
795