xref: /freebsd/sys/dev/smartpqi/smartpqi_request.c (revision 2f06449d64298fe508e3c585b45effd69a72d696)
1 /*-
2  * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 
27 #include "smartpqi_includes.h"
28 
29 /* Change this if need to debug why AIO is not being used */
30 #define DBG_AIO DBG_IO
31 
32 #define SG_FLAG_LAST	0x40000000
33 #define SG_FLAG_CHAIN	0x80000000
34 
35 /* Local Prototypes */
36 static void pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb);
37 static int fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l);
38 
39 
40 /* Subroutine to find out embedded sgl count in IU */
41 static inline uint32_t
pqisrc_embedded_sgl_count(uint32_t elem_alloted,uint8_t iu_type)42 pqisrc_embedded_sgl_count(uint32_t elem_alloted, uint8_t iu_type)
43 {
44 	uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
45 
46 	DBG_FUNC("IN\n");
47 
48 	if (iu_type == PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST ||
49 		iu_type == PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST)
50 		embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO;
51 
52 	/**
53 	calculate embedded sgl count using num_elem_alloted for IO
54 	**/
55 	if(elem_alloted - 1)
56 		embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
57 	/* DBG_IO("embedded_sgl_count :%d\n", embedded_sgl_count); */
58 
59 	DBG_FUNC("OUT\n");
60 
61 	return embedded_sgl_count;
62 
63 }
64 
65 /* Subroutine to find out contiguous free elem in IU */
66 static inline uint32_t
pqisrc_contiguous_free_elem(uint32_t pi,uint32_t ci,uint32_t elem_in_q)67 pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
68 {
69 	uint32_t contiguous_free_elem = 0;
70 
71 	DBG_FUNC("IN\n");
72 
73 	if(pi >= ci) {
74 		contiguous_free_elem = (elem_in_q - pi);
75 		if(ci == 0)
76 			contiguous_free_elem -= 1;
77 	} else {
78 		contiguous_free_elem = (ci - pi - 1);
79 	}
80 
81 	DBG_FUNC("OUT\n");
82 
83 	return contiguous_free_elem;
84 }
85 
86 /* Subroutine to find out num of elements need for the request */
87 static uint32_t
pqisrc_num_elem_needed(pqisrc_softstate_t * softs,uint32_t SG_Count,pqi_scsi_dev_t * devp,boolean_t is_write,IO_PATH_T io_path)88 pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count,
89                 pqi_scsi_dev_t *devp, boolean_t is_write, IO_PATH_T io_path)
90 {
91 	uint32_t num_sg;
92 	uint32_t num_elem_required = 1;
93 	uint32_t sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
94 
95 	DBG_FUNC("IN\n");
96 	DBG_IO("SGL_Count :%u\n",SG_Count);
97 
98 	if ((devp->raid_level == SA_RAID_5 || devp->raid_level == SA_RAID_6)
99 		&& is_write && (io_path == AIO_PATH))
100 		sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO;
101 	/********
102 	If SG_Count greater than max sg per IU i.e 4 or 68
103 	(4 is with out spanning or 68 is with spanning) chaining is required.
104 	OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU_* then,
105 	on these two cases one element is enough.
106 	********/
107 	if(SG_Count > softs->max_sg_per_spanning_cmd ||
108 		SG_Count <= sg_in_first_iu)
109 		return num_elem_required;
110 	/*
111 	SGL Count Other Than First IU
112 	 */
113 	num_sg = SG_Count - sg_in_first_iu;
114 	num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
115 	DBG_FUNC("OUT\n");
116 	return num_elem_required;
117 }
118 
119 /* Subroutine to build SG list for the IU submission*/
120 static boolean_t
pqisrc_build_sgl(sgt_t * sg_array,rcb_t * rcb,iu_header_t * iu_hdr,uint32_t num_elem_alloted)121 pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
122 			uint32_t num_elem_alloted)
123 {
124 	uint32_t i;
125 	uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
126 	sgt_t *sgt = sg_array;
127 	sgt_t *sg_chain = NULL;
128 	boolean_t partial = false;
129 
130 	DBG_FUNC("IN\n");
131 
132 	/* DBG_IO("SGL_Count :%d",num_sg); */
133 	if (0 == num_sg) {
134 		goto out;
135 	}
136 
137 	if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted,
138 		iu_hdr->iu_type)) {
139 
140 		for (i = 0; i < num_sg; i++, sgt++) {
141 			sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
142 			sgt->len= OS_GET_IO_SG_LEN(rcb,i);
143 			sgt->flags= 0;
144 		}
145 
146 		sg_array[num_sg - 1].flags = SG_FLAG_LAST;
147 	} else {
148 	/**
149 	SGL Chaining
150 	**/
151 		sg_chain = rcb->sg_chain_virt;
152 		sgt->addr = rcb->sg_chain_dma;
153 		sgt->len = num_sg * sizeof(sgt_t);
154 		sgt->flags = SG_FLAG_CHAIN;
155 
156 		sgt = sg_chain;
157 		for (i = 0; i < num_sg; i++, sgt++) {
158 			sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
159 			sgt->len = OS_GET_IO_SG_LEN(rcb,i);
160 			sgt->flags = 0;
161 		}
162 
163 		sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
164 		num_sg = 1;
165 		partial = true;
166 
167 	}
168 out:
169 	iu_hdr->iu_length = num_sg * sizeof(sgt_t);
170 	DBG_FUNC("OUT\n");
171 	return partial;
172 
173 }
174 
175 #if 0
176 static inline void
177 pqisrc_show_raid_req(pqisrc_softstate_t *softs, pqisrc_raid_req_t *raid_req)
178 {
179 	DBG_IO("%30s: 0x%x\n", "raid_req->header.iu_type",
180 		raid_req->header.iu_type);
181 	DBG_IO("%30s: 0x%d\n", "raid_req->response_queue_id",
182 		raid_req->response_queue_id);
183 	DBG_IO("%30s: 0x%x\n", "raid_req->request_id",
184 		raid_req->request_id);
185 	DBG_IO("%30s: 0x%x\n", "raid_req->buffer_length",
186 		raid_req->buffer_length);
187 	DBG_IO("%30s: 0x%x\n", "raid_req->task_attribute",
188 		raid_req->task_attribute);
189 	DBG_IO("%30s: 0x%llx\n", "raid_req->lun_number",
190 		*((long long unsigned int*)raid_req->lun_number));
191 	DBG_IO("%30s: 0x%x\n", "raid_req->error_index",
192 		raid_req->error_index);
193 	DBG_IO("%30s: 0x%p\n", "raid_req->sg_descriptors[0].addr",
194 		(void *)raid_req->sg_descriptors[0].addr);
195 	DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].len",
196 		raid_req->sg_descriptors[0].len);
197 	DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].flags",
198 		raid_req->sg_descriptors[0].flags);
199 }
200 #endif
201 
202 /*Subroutine used to Build the RAID request */
203 static void
pqisrc_build_raid_io(pqisrc_softstate_t * softs,rcb_t * rcb,pqisrc_raid_req_t * raid_req,uint32_t num_elem_alloted)204 pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
205  	pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
206 {
207 	DBG_FUNC("IN\n");
208 
209 	raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
210 	raid_req->header.comp_feature = 0;
211 	raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
212 	raid_req->work_area[0] = 0;
213 	raid_req->work_area[1] = 0;
214 	raid_req->request_id = rcb->tag;
215 	raid_req->nexus_id = 0;
216 	raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
217 	memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
218 		sizeof(raid_req->lun_number));
219 	raid_req->protocol_spec = 0;
220 	raid_req->data_direction = rcb->data_dir;
221 	raid_req->reserved1 = 0;
222 	raid_req->fence = 0;
223 	raid_req->error_index = raid_req->request_id;
224 	raid_req->reserved2 = 0;
225 	raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
226 	raid_req->command_priority = 0;
227 	raid_req->reserved3 = 0;
228 	raid_req->reserved4 = 0;
229 	raid_req->reserved5 = 0;
230 	raid_req->ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun;
231 
232 	/* As cdb and additional_cdb_bytes are contiguous,
233 	   update them in a single statement */
234 	memcpy(raid_req->cmd.cdb, rcb->cdbp, rcb->cmdlen);
235 #if 0
236 	DBG_IO("CDB :");
237 	for(i = 0; i < rcb->cmdlen ; i++)
238 		DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
239 #endif
240 
241 	switch (rcb->cmdlen) {
242 		case 6:
243 		case 10:
244 		case 12:
245 		case 16:
246 			raid_req->additional_cdb_bytes_usage =
247 				PQI_ADDITIONAL_CDB_BYTES_0;
248 			break;
249 		case 20:
250 			raid_req->additional_cdb_bytes_usage =
251 				PQI_ADDITIONAL_CDB_BYTES_4;
252 			break;
253 		case 24:
254 			raid_req->additional_cdb_bytes_usage =
255 				PQI_ADDITIONAL_CDB_BYTES_8;
256 			break;
257 		case 28:
258 			raid_req->additional_cdb_bytes_usage =
259 				PQI_ADDITIONAL_CDB_BYTES_12;
260 			break;
261 		case 32:
262 		default: /* todo:review again */
263 			raid_req->additional_cdb_bytes_usage =
264 				PQI_ADDITIONAL_CDB_BYTES_16;
265 			break;
266 	}
267 
268 	/* Frame SGL Descriptor */
269 	raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
270 		&raid_req->header, num_elem_alloted);
271 
272 	raid_req->header.iu_length +=
273 			offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
274 
275 #if 0
276 	pqisrc_show_raid_req(softs, raid_req);
277 #endif
278 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
279 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
280 	rcb->resp_qid = raid_req->response_queue_id;
281 
282 	DBG_FUNC("OUT\n");
283 
284 }
285 
286 /* We will need to expand this to handle different types of
287  * aio request structures.
288  */
289 #if 0
290 static inline void
291 pqisrc_show_aio_req(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req)
292 {
293 	DBG_IO("%30s: 0x%x\n", "aio_req->header.iu_type",
294 		aio_req->header.iu_type);
295 	DBG_IO("%30s: 0x%x\n", "aio_req->resp_qid",
296 		aio_req->response_queue_id);
297 	DBG_IO("%30s: 0x%x\n", "aio_req->req_id",
298 		aio_req->req_id);
299 	DBG_IO("%30s: 0x%x\n", "aio_req->nexus",
300 		aio_req->nexus);
301 	DBG_IO("%30s: 0x%x\n", "aio_req->buf_len",
302 		aio_req->buf_len);
303 	DBG_IO("%30s: 0x%x\n", "aio_req->cmd_flags.data_dir",
304 		aio_req->cmd_flags.data_dir);
305 	DBG_IO("%30s: 0x%x\n", "aio_req->attr_prio.task_attr",
306 		aio_req->attr_prio.task_attr);
307 	DBG_IO("%30s: 0x%x\n", "aio_req->err_idx",
308 		aio_req->err_idx);
309 	DBG_IO("%30s: 0x%x\n", "aio_req->num_sg",
310 		aio_req->num_sg);
311 	DBG_IO("%30s: 0x%p\n", "aio_req->sg_desc[0].addr",
312 		(void *)aio_req->sg_desc[0].addr);
313 	DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].len",
314 		aio_req->sg_desc[0].len);
315 	DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].flags",
316 		aio_req->sg_desc[0].flags);
317 }
318 #endif
319 
320 void
int_to_scsilun(uint64_t lun,uint8_t * scsi_lun)321 int_to_scsilun(uint64_t lun, uint8_t *scsi_lun)
322 {
323    int i;
324 
325 	memset(scsi_lun, 0, sizeof(lun));
326         for (i = 0; i < sizeof(lun); i += 2) {
327                 scsi_lun[i] = (lun >> 8) & 0xFF;
328                 scsi_lun[i+1] = lun & 0xFF;
329                 lun = lun >> 16;
330         }
331 }
332 
333 
334 /*Subroutine used to populate AIO IUs. */
335 void
pqisrc_build_aio_common(pqisrc_softstate_t * softs,pqi_aio_req_t * aio_req,rcb_t * rcb,uint32_t num_elem_alloted)336 pqisrc_build_aio_common(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req,
337                         rcb_t *rcb, uint32_t num_elem_alloted)
338 {
339 	DBG_FUNC("IN\n");
340 	aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
341 	aio_req->header.comp_feature = 0;
342 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
343 	aio_req->work_area[0] = 0;
344 	aio_req->work_area[1] = 0;
345 	aio_req->req_id = rcb->tag;
346 	aio_req->res1[0] = 0;
347 	aio_req->res1[1] = 0;
348 	aio_req->nexus = rcb->ioaccel_handle;
349 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
350 	aio_req->cmd_flags.data_dir = rcb->data_dir;
351 	aio_req->cmd_flags.mem_type = 0;
352 	aio_req->cmd_flags.fence = 0;
353 	aio_req->cmd_flags.res2 = 0;
354 	aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
355 	aio_req->attr_prio.cmd_prio = 0;
356 	aio_req->attr_prio.res3 = 0;
357 	aio_req->err_idx = aio_req->req_id;
358 	aio_req->cdb_len = rcb->cmdlen;
359 
360 	if (rcb->cmdlen > sizeof(aio_req->cdb))
361 		rcb->cmdlen = sizeof(aio_req->cdb);
362 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
363 	memset(aio_req->res4, 0, sizeof(aio_req->res4));
364 
365 	uint64_t lun = rcb->cm_ccb->ccb_h.target_lun;
366 	if (lun && (rcb->dvp->is_multi_lun)) {
367 		int_to_scsilun(lun, aio_req->lun);
368 	}
369 	else {
370 		memset(aio_req->lun, 0, sizeof(aio_req->lun));
371 	}
372 
373 	/* handle encryption fields */
374 	if (rcb->encrypt_enable == true) {
375 		aio_req->cmd_flags.encrypt_enable = true;
376 		aio_req->encrypt_key_index =
377 			LE_16(rcb->enc_info.data_enc_key_index);
378 		aio_req->encrypt_twk_low =
379 			LE_32(rcb->enc_info.encrypt_tweak_lower);
380 		aio_req->encrypt_twk_high =
381 			LE_32(rcb->enc_info.encrypt_tweak_upper);
382 	} else {
383 		aio_req->cmd_flags.encrypt_enable = 0;
384 		aio_req->encrypt_key_index = 0;
385 		aio_req->encrypt_twk_high = 0;
386 		aio_req->encrypt_twk_low = 0;
387 	}
388 	/* Frame SGL Descriptor */
389 	aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
390 		&aio_req->header, num_elem_alloted);
391 
392 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
393 
394 	/* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
395 
396 	aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
397 		sizeof(iu_header_t);
398 	/* set completion and error handlers. */
399 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
400 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
401 	rcb->resp_qid = aio_req->response_queue_id;
402 	DBG_FUNC("OUT\n");
403 
404 }
405 /*Subroutine used to show standard AIO IU fields */
406 void
pqisrc_show_aio_common(pqisrc_softstate_t * softs,rcb_t * rcb,pqi_aio_req_t * aio_req)407 pqisrc_show_aio_common(pqisrc_softstate_t *softs, rcb_t *rcb,
408                        pqi_aio_req_t *aio_req)
409 {
410 #ifdef DEBUG_AIO
411 	DBG_INFO("AIO IU Content, tag# 0x%08x", rcb->tag);
412 	DBG_INFO("%15s: 0x%x\n", "iu_type",	aio_req->header.iu_type);
413 	DBG_INFO("%15s: 0x%x\n", "comp_feat",	aio_req->header.comp_feature);
414 	DBG_INFO("%15s: 0x%x\n", "length",	aio_req->header.iu_length);
415 	DBG_INFO("%15s: 0x%x\n", "resp_qid",	aio_req->response_queue_id);
416 	DBG_INFO("%15s: 0x%x\n", "req_id",	aio_req->req_id);
417 	DBG_INFO("%15s: 0x%x\n", "nexus",	aio_req->nexus);
418 	DBG_INFO("%15s: 0x%x\n", "buf_len",	aio_req->buf_len);
419 	DBG_INFO("%15s:\n", "cmd_flags");
420 	DBG_INFO("%15s: 0x%x\n", "data_dir",	aio_req->cmd_flags.data_dir);
421 	DBG_INFO("%15s: 0x%x\n", "partial",	aio_req->cmd_flags.partial);
422 	DBG_INFO("%15s: 0x%x\n", "mem_type",	aio_req->cmd_flags.mem_type);
423 	DBG_INFO("%15s: 0x%x\n", "fence",	aio_req->cmd_flags.fence);
424 	DBG_INFO("%15s: 0x%x\n", "encryption",
425 		aio_req->cmd_flags.encrypt_enable);
426 	DBG_INFO("%15s:\n", "attr_prio");
427 	DBG_INFO("%15s: 0x%x\n", "task_attr",	aio_req->attr_prio.task_attr);
428 	DBG_INFO("%15s: 0x%x\n", "cmd_prio",	aio_req->attr_prio.cmd_prio);
429 	DBG_INFO("%15s: 0x%x\n", "dek_index",	aio_req->encrypt_key_index);
430 	DBG_INFO("%15s: 0x%x\n", "tweak_lower",	aio_req->encrypt_twk_low);
431 	DBG_INFO("%15s: 0x%x\n", "tweak_upper",	aio_req->encrypt_twk_high);
432 	pqisrc_show_cdb(softs, "AIOC", rcb, aio_req->cdb);
433 	DBG_INFO("%15s: 0x%x\n", "err_idx",	aio_req->err_idx);
434 	DBG_INFO("%15s: 0x%x\n", "num_sg",	aio_req->num_sg);
435 	DBG_INFO("%15s: 0x%x\n", "cdb_len",	aio_req->cdb_len);
436 #if 0
437 	DBG_INFO("%15s: 0x%x\n", "lun",		aio_req->lun);
438 	DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
439 		(void *)aio_req->sg_desc[0].addr);
440 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
441 		aio_req->sg_desc[0].len);
442 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
443 		aio_req->sg_desc[0].flags);
444 #endif
445 #endif /* DEBUG_AIO */
446 }
447 
448 /*Subroutine used to populate AIO RAID 1 write bypass IU. */
449 void
pqisrc_build_aio_R1_write(pqisrc_softstate_t * softs,pqi_aio_raid1_write_req_t * aio_req,rcb_t * rcb,uint32_t num_elem_alloted)450 pqisrc_build_aio_R1_write(pqisrc_softstate_t *softs,
451 	pqi_aio_raid1_write_req_t *aio_req, rcb_t *rcb,
452 	uint32_t num_elem_alloted)
453 {
454 	DBG_FUNC("IN\n");
455 	if (!rcb->dvp) {
456 		DBG_WARN("%s: DEBUG: dev ptr is null", __func__);
457 		return;
458 	}
459 	if (!rcb->dvp->raid_map) {
460 		DBG_WARN("%s: DEBUG: raid_map is null", __func__);
461 		return;
462 	}
463 
464 	aio_req->header.iu_type = PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST;
465 	aio_req->header.comp_feature = 0;
466 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
467 	aio_req->work_area[0] = 0;
468 	aio_req->work_area[1] = 0;
469 	aio_req->req_id = rcb->tag;
470 	aio_req->volume_id =  (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF);
471 	aio_req->nexus_1 = rcb->it_nexus[0];
472 	aio_req->nexus_2 = rcb->it_nexus[1];
473 	aio_req->nexus_3 = rcb->it_nexus[2];
474 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
475 	aio_req->cmd_flags.data_dir = rcb->data_dir;
476 	aio_req->cmd_flags.mem_type = 0;
477 	aio_req->cmd_flags.fence = 0;
478 	aio_req->cmd_flags.res2 = 0;
479 	aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
480 	aio_req->attr_prio.cmd_prio = 0;
481 	aio_req->attr_prio.res3 = 0;
482 	if(rcb->cmdlen > sizeof(aio_req->cdb))
483 		rcb->cmdlen = sizeof(aio_req->cdb);
484 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
485 	aio_req->err_idx = aio_req->req_id;
486 	aio_req->cdb_len = rcb->cmdlen;
487 	aio_req->num_drives = LE_16(rcb->dvp->raid_map->layout_map_count);
488 
489 	/* handle encryption fields */
490 	if (rcb->encrypt_enable == true) {
491 		aio_req->cmd_flags.encrypt_enable = true;
492 		aio_req->encrypt_key_index =
493 			LE_16(rcb->enc_info.data_enc_key_index);
494 		aio_req->encrypt_twk_low =
495 			LE_32(rcb->enc_info.encrypt_tweak_lower);
496 		aio_req->encrypt_twk_high =
497 			LE_32(rcb->enc_info.encrypt_tweak_upper);
498 	} else {
499 		aio_req->cmd_flags.encrypt_enable = 0;
500 		aio_req->encrypt_key_index = 0;
501 		aio_req->encrypt_twk_high = 0;
502 		aio_req->encrypt_twk_low = 0;
503 	}
504 	/* Frame SGL Descriptor */
505 	aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
506 		&aio_req->header, num_elem_alloted);
507 
508 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
509 
510 	/* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
511 
512 	aio_req->header.iu_length += offsetof(pqi_aio_raid1_write_req_t, sg_desc) -
513 		sizeof(iu_header_t);
514 
515 	/* set completion and error handlers. */
516 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
517 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
518 	rcb->resp_qid = aio_req->response_queue_id;
519 	DBG_FUNC("OUT\n");
520 
521 }
522 
523 /*Subroutine used to show AIO RAID1 Write bypass IU fields */
524 void
pqisrc_show_aio_R1_write(pqisrc_softstate_t * softs,rcb_t * rcb,pqi_aio_raid1_write_req_t * aio_req)525 pqisrc_show_aio_R1_write(pqisrc_softstate_t *softs, rcb_t *rcb,
526 	pqi_aio_raid1_write_req_t *aio_req)
527 {
528 
529 #ifdef DEBUG_AIO
530 	DBG_INFO("AIO RAID1 Write IU Content, tag# 0x%08x", rcb->tag);
531 	DBG_INFO("%15s: 0x%x\n", "iu_type",	aio_req->header.iu_type);
532 	DBG_INFO("%15s: 0x%x\n", "comp_feat",	aio_req->header.comp_feature);
533 	DBG_INFO("%15s: 0x%x\n", "length",	aio_req->header.iu_length);
534 	DBG_INFO("%15s: 0x%x\n", "resp_qid",	aio_req->response_queue_id);
535 	DBG_INFO("%15s: 0x%x\n", "req_id",	aio_req->req_id);
536 	DBG_INFO("%15s: 0x%x\n", "volume_id",	aio_req->volume_id);
537 	DBG_INFO("%15s: 0x%x\n", "nexus_1",	aio_req->nexus_1);
538 	DBG_INFO("%15s: 0x%x\n", "nexus_2",	aio_req->nexus_2);
539 	DBG_INFO("%15s: 0x%x\n", "nexus_3",	aio_req->nexus_3);
540 	DBG_INFO("%15s: 0x%x\n", "buf_len",	aio_req->buf_len);
541 	DBG_INFO("%15s:\n", "cmd_flags");
542 	DBG_INFO("%15s: 0x%x\n", "data_dir",	aio_req->cmd_flags.data_dir);
543 	DBG_INFO("%15s: 0x%x\n", "partial",	aio_req->cmd_flags.partial);
544 	DBG_INFO("%15s: 0x%x\n", "mem_type",	aio_req->cmd_flags.mem_type);
545 	DBG_INFO("%15s: 0x%x\n", "fence",	aio_req->cmd_flags.fence);
546 	DBG_INFO("%15s: 0x%x\n", "encryption",
547 		aio_req->cmd_flags.encrypt_enable);
548 	DBG_INFO("%15s:\n", "attr_prio");
549 	DBG_INFO("%15s: 0x%x\n", "task_attr",	aio_req->attr_prio.task_attr);
550 	DBG_INFO("%15s: 0x%x\n", "cmd_prio",	aio_req->attr_prio.cmd_prio);
551 	DBG_INFO("%15s: 0x%x\n", "dek_index",	aio_req->encrypt_key_index);
552 	pqisrc_show_cdb(softs, "AIOR1W", rcb, aio_req->cdb);
553 	DBG_INFO("%15s: 0x%x\n", "err_idx",	aio_req->err_idx);
554 	DBG_INFO("%15s: 0x%x\n", "num_sg",	aio_req->num_sg);
555 	DBG_INFO("%15s: 0x%x\n", "cdb_len",	aio_req->cdb_len);
556 	DBG_INFO("%15s: 0x%x\n", "num_drives",	aio_req->num_drives);
557 	DBG_INFO("%15s: 0x%x\n", "tweak_lower",	aio_req->encrypt_twk_low);
558 	DBG_INFO("%15s: 0x%x\n", "tweak_upper",	aio_req->encrypt_twk_high);
559 #if 0
560 	DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
561 		(void *)aio_req->sg_desc[0].addr);
562 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
563 		aio_req->sg_desc[0].len);
564 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
565 		aio_req->sg_desc[0].flags);
566 #endif
567 #endif /* DEBUG_AIO */
568 }
569 
570 /*Subroutine used to populate AIO Raid5 or 6 write bypass IU */
571 void
pqisrc_build_aio_R5or6_write(pqisrc_softstate_t * softs,pqi_aio_raid5or6_write_req_t * aio_req,rcb_t * rcb,uint32_t num_elem_alloted)572 pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *softs,
573 	pqi_aio_raid5or6_write_req_t *aio_req, rcb_t *rcb,
574 	uint32_t num_elem_alloted)
575 {
576 	DBG_FUNC("IN\n");
577 	uint32_t index;
578 	unsigned num_data_disks;
579 	unsigned num_metadata_disks;
580 	unsigned total_disks;
581 	num_data_disks = LE_16(rcb->dvp->raid_map->data_disks_per_row);
582 	num_metadata_disks = LE_16(rcb->dvp->raid_map->metadata_disks_per_row);
583 	total_disks = num_data_disks + num_metadata_disks;
584 
585 	index = PQISRC_DIV_ROUND_UP(rcb->raid_map_index + 1, total_disks);
586 	index *= total_disks;
587 	index -= num_metadata_disks;
588 
589 	switch (rcb->dvp->raid_level) {
590 	case SA_RAID_5:
591 		aio_req->header.iu_type =
592 		PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST;
593 		break;
594 	case SA_RAID_6:
595 		aio_req->header.iu_type =
596 		PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST;
597 		break;
598 	default:
599 		DBG_ERR("WRONG RAID TYPE FOR FUNCTION\n");
600 	}
601 	aio_req->header.comp_feature = 0;
602 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
603 	aio_req->work_area[0] = 0;
604 	aio_req->work_area[1] = 0;
605 	aio_req->req_id = rcb->tag;
606 	aio_req->volume_id =  (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF);
607 	aio_req->data_it_nexus = rcb->dvp->raid_map->dev_data[rcb->raid_map_index].ioaccel_handle;
608 	aio_req->p_parity_it_nexus =
609 		rcb->dvp->raid_map->dev_data[index].ioaccel_handle;
610 	if (aio_req->header.iu_type ==
611 		PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST) {
612 			aio_req->q_parity_it_nexus =
613 				rcb->dvp->raid_map->dev_data[index + 1].ioaccel_handle;
614 	}
615 	aio_req->xor_multiplier =
616 		rcb->dvp->raid_map->dev_data[rcb->raid_map_index].xor_mult[1];
617 	aio_req->row = rcb->row_num;
618 	/*aio_req->reserved = rcb->row_num * rcb->blocks_per_row +
619 		rcb->dvp->raid_map->disk_starting_blk;*/
620 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
621 	aio_req->cmd_flags.data_dir = rcb->data_dir;
622 	aio_req->cmd_flags.mem_type = 0;
623 	aio_req->cmd_flags.fence = 0;
624 	aio_req->cmd_flags.res2 = 0;
625 	aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
626 	aio_req->attr_prio.cmd_prio = 0;
627 	aio_req->attr_prio.res3 = 0;
628 	if (rcb->cmdlen > sizeof(aio_req->cdb))
629 		rcb->cmdlen = sizeof(aio_req->cdb);
630 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
631 	aio_req->err_idx = aio_req->req_id;
632 	aio_req->cdb_len = rcb->cmdlen;
633 #if 0
634 	/* Stubbed out for later */
635 	aio_req->header.iu_type = iu_type;
636 	aio_req->data_it_nexus = ;
637 	aio_req->p_parity_it_nexus = ;
638 	aio_req->q_parity_it_nexus = ;
639 	aio_req->row = ;
640 	aio_req->stripe_lba = ;
641 #endif
642 	/* handle encryption fields */
643 	if (rcb->encrypt_enable == true) {
644 		aio_req->cmd_flags.encrypt_enable = true;
645 		aio_req->encrypt_key_index =
646 			LE_16(rcb->enc_info.data_enc_key_index);
647 		aio_req->encrypt_twk_low =
648 			LE_32(rcb->enc_info.encrypt_tweak_lower);
649 		aio_req->encrypt_twk_high =
650 			LE_32(rcb->enc_info.encrypt_tweak_upper);
651 	} else {
652 		aio_req->cmd_flags.encrypt_enable = 0;
653 		aio_req->encrypt_key_index = 0;
654 		aio_req->encrypt_twk_high = 0;
655 		aio_req->encrypt_twk_low = 0;
656 	}
657 	/* Frame SGL Descriptor */
658 	aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
659 		&aio_req->header, num_elem_alloted);
660 
661 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
662 
663 	/* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
664 
665 	aio_req->header.iu_length += offsetof(pqi_aio_raid5or6_write_req_t, sg_desc) -
666 		sizeof(iu_header_t);
667 	/* set completion and error handlers. */
668 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
669 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
670 	rcb->resp_qid = aio_req->response_queue_id;
671 	DBG_FUNC("OUT\n");
672 
673 }
674 
675 /*Subroutine used to show AIO RAID5/6 Write bypass IU fields */
676 void
pqisrc_show_aio_R5or6_write(pqisrc_softstate_t * softs,rcb_t * rcb,pqi_aio_raid5or6_write_req_t * aio_req)677 pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t *rcb,
678 	pqi_aio_raid5or6_write_req_t *aio_req)
679 {
680 #ifdef DEBUG_AIO
681 	DBG_INFO("AIO RAID5or6 Write IU Content, tag# 0x%08x\n", rcb->tag);
682 	DBG_INFO("%15s: 0x%x\n", "iu_type",	aio_req->header.iu_type);
683 	DBG_INFO("%15s: 0x%x\n", "comp_feat",	aio_req->header.comp_feature);
684 	DBG_INFO("%15s: 0x%x\n", "length",	aio_req->header.iu_length);
685 	DBG_INFO("%15s: 0x%x\n", "resp_qid",	aio_req->response_queue_id);
686 	DBG_INFO("%15s: 0x%x\n", "req_id",	aio_req->req_id);
687 	DBG_INFO("%15s: 0x%x\n", "volume_id",	aio_req->volume_id);
688 	DBG_INFO("%15s: 0x%x\n", "data_it_nexus",
689 		aio_req->data_it_nexus);
690 	DBG_INFO("%15s: 0x%x\n", "p_parity_it_nexus",
691 		aio_req->p_parity_it_nexus);
692 	DBG_INFO("%15s: 0x%x\n", "q_parity_it_nexus",
693 		aio_req->q_parity_it_nexus);
694 	DBG_INFO("%15s: 0x%x\n", "buf_len",	aio_req->buf_len);
695 	DBG_INFO("%15s:\n", "cmd_flags");
696 	DBG_INFO("%15s: 0x%x\n", "data_dir",	aio_req->cmd_flags.data_dir);
697 	DBG_INFO("%15s: 0x%x\n", "partial",	aio_req->cmd_flags.partial);
698 	DBG_INFO("%15s: 0x%x\n", "mem_type",	aio_req->cmd_flags.mem_type);
699 	DBG_INFO("%15s: 0x%x\n", "fence",	aio_req->cmd_flags.fence);
700 	DBG_INFO("%15s: 0x%x\n", "encryption",
701 		aio_req->cmd_flags.encrypt_enable);
702 	DBG_INFO("%15s:\n", "attr_prio");
703 	DBG_INFO("%15s: 0x%x\n", "task_attr",	aio_req->attr_prio.task_attr);
704 	DBG_INFO("%15s: 0x%x\n", "cmd_prio",	aio_req->attr_prio.cmd_prio);
705 	DBG_INFO("%15s: 0x%x\n", "dek_index",	aio_req->encrypt_key_index);
706 	pqisrc_show_cdb(softs, "AIOR56W", rcb, aio_req->cdb);
707 	DBG_INFO("%15s: 0x%x\n", "err_idx",	aio_req->err_idx);
708 	DBG_INFO("%15s: 0x%x\n", "num_sg",	aio_req->num_sg);
709 	DBG_INFO("%15s: 0x%x\n", "cdb_len",	aio_req->cdb_len);
710 	DBG_INFO("%15s: 0x%x\n", "tweak_lower",	aio_req->encrypt_twk_low);
711 	DBG_INFO("%15s: 0x%x\n", "tweak_upper",	aio_req->encrypt_twk_high);
712 	DBG_INFO("%15s: 0x%lx\n", "row",	aio_req->row);
713 #if 0
714 	DBG_INFO("%15s: 0x%lx\n", "stripe_lba",	aio_req->stripe_lba);
715 	DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
716 		(void *)aio_req->sg_desc[0].addr);
717 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
718 		aio_req->sg_desc[0].len);
719 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
720 		aio_req->sg_desc[0].flags);
721 #endif
722 #endif /* DEBUG_AIO */
723 }
724 
725 /* Is the cdb a read command? */
726 boolean_t
pqisrc_cdb_is_read(uint8_t * cdb)727 pqisrc_cdb_is_read(uint8_t *cdb)
728 {
729 	if (cdb[0] == SCMD_READ_6 || cdb[0] == SCMD_READ_10 ||
730 		cdb[0] == SCMD_READ_12 || cdb[0] == SCMD_READ_16)
731 		return true;
732 	return false;
733 }
734 
735 /* Is the cdb a write command? */
736 boolean_t
pqisrc_cdb_is_write(uint8_t * cdb)737 pqisrc_cdb_is_write(uint8_t *cdb)
738 {
739 	if (cdb == NULL)
740 		return false;
741 
742 	if (cdb[0] == SCMD_WRITE_6 || cdb[0] == SCMD_WRITE_10 ||
743 		cdb[0] == SCMD_WRITE_12 || cdb[0] == SCMD_WRITE_16)
744 		return true;
745 	return false;
746 }
747 
748 /*Subroutine used to show the AIO request */
749 void
pqisrc_show_aio_io(pqisrc_softstate_t * softs,rcb_t * rcb,pqi_aio_req_t * aio_req,uint32_t num_elem_alloted)750 pqisrc_show_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
751 	pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
752 {
753 	boolean_t is_write;
754 	DBG_FUNC("IN\n");
755 
756 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
757 
758 	if (!is_write) {
759 		pqisrc_show_aio_common(softs, rcb, aio_req);
760 		goto out;
761 	}
762 
763 	switch (rcb->dvp->raid_level) {
764 	case SA_RAID_0:
765 		pqisrc_show_aio_common(softs, rcb, aio_req);
766 		break;
767 	case SA_RAID_1:
768 	case SA_RAID_ADM:
769 		pqisrc_show_aio_R1_write(softs, rcb,
770 			(pqi_aio_raid1_write_req_t *)aio_req);
771 		break;
772 	case SA_RAID_5:
773 	case SA_RAID_6:
774 		pqisrc_show_aio_R5or6_write(softs, rcb,
775 			(pqi_aio_raid5or6_write_req_t *)aio_req);
776 		break;
777 	}
778 
779 out:
780 	DBG_FUNC("OUT\n");
781 
782 }
783 
784 
785 void
pqisrc_build_aio_io(pqisrc_softstate_t * softs,rcb_t * rcb,pqi_aio_req_t * aio_req,uint32_t num_elem_alloted)786 pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
787 	pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
788 {
789 	boolean_t is_write;
790 	DBG_FUNC("IN\n");
791 
792 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
793 
794 	if (is_write) {
795 		switch (rcb->dvp->raid_level) {
796 		case SA_RAID_0:
797 			pqisrc_build_aio_common(softs, aio_req,
798 				rcb, num_elem_alloted);
799 			break;
800 		case SA_RAID_1:
801 		case SA_RAID_ADM:
802 			pqisrc_build_aio_R1_write(softs,
803 				(pqi_aio_raid1_write_req_t *)aio_req,
804 				rcb, num_elem_alloted);
805 
806 			break;
807 		case SA_RAID_5:
808 		case SA_RAID_6:
809 			pqisrc_build_aio_R5or6_write(softs,
810 				(pqi_aio_raid5or6_write_req_t *)aio_req,
811 				rcb, num_elem_alloted);
812 			break;
813 		}
814 	} else {
815 		pqisrc_build_aio_common(softs, aio_req, rcb, num_elem_alloted);
816 	}
817 
818 	pqisrc_show_aio_io(softs, rcb, aio_req, num_elem_alloted);
819 
820 	DBG_FUNC("OUT\n");
821 }
822 
823 /*
824  *	Return true from this function to prevent AIO from handling this request.
825  *	True is returned if the request is determined to be part of a stream, or
826  *	if the controller does not handle AIO at the appropriate RAID level.
827  */
828 static boolean_t
pqisrc_is_parity_write_stream(pqisrc_softstate_t * softs,rcb_t * rcb)829 pqisrc_is_parity_write_stream(pqisrc_softstate_t *softs, rcb_t *rcb)
830 {
831 	os_ticks_t oldest_ticks;
832 	uint8_t lru_index;
833 	int i;
834 	int rc;
835 	pqi_scsi_dev_t *device;
836 	struct pqi_stream_data *pqi_stream_data;
837 	aio_req_locator_t loc;
838 
839 	DBG_FUNC("IN\n");
840 
841 	rc = fill_lba_for_scsi_rw(softs, rcb->cdbp , &loc);
842 	if (rc != PQI_STATUS_SUCCESS) {
843 		return false;
844 	}
845 
846 	/* check writes only */
847 	if (!pqisrc_cdb_is_write(rcb->cdbp)) {
848 	    return false;
849 	}
850 
851 	if (!softs->enable_stream_detection) {
852 		return false;
853 	}
854 
855 	device = rcb->dvp;
856 	if (!device) {
857 		return false;
858 	}
859 
860 	/*
861 	 * check for R5/R6 streams.
862 	 */
863 	if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) {
864 		return false;
865 	}
866 
867 	/*
868 	 * If controller does not support AIO R{5,6} writes, need to send
869 	 * requests down non-aio path.
870 	 */
871 	if ((device->raid_level == SA_RAID_5 && !softs->aio_raid5_write_bypass) ||
872 		(device->raid_level == SA_RAID_6 && !softs->aio_raid6_write_bypass)) {
873 		return true;
874 	}
875 
876 	lru_index = 0;
877 	oldest_ticks = INT_MAX;
878 	for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
879 		pqi_stream_data = &device->stream_data[i];
880 		/*
881 		 * check for adjacent request or request is within
882 		 * the previous request.
883 		 */
884 		if ((pqi_stream_data->next_lba &&
885 			loc.block.first >= pqi_stream_data->next_lba) &&
886 			loc.block.first <= pqi_stream_data->next_lba +
887 				loc.block.cnt) {
888 			pqi_stream_data->next_lba = loc.block.first +
889 				loc.block.cnt;
890 			pqi_stream_data->last_accessed = TICKS;
891 			return true;
892 		}
893 
894 		/* unused entry */
895 		if (pqi_stream_data->last_accessed == 0) {
896 			lru_index = i;
897 			break;
898 		}
899 
900 		/* Find entry with oldest last accessed time */
901 		if (pqi_stream_data->last_accessed <= oldest_ticks) {
902 			oldest_ticks = pqi_stream_data->last_accessed;
903 			lru_index = i;
904 		}
905 	}
906 
907 	/*
908 	 * Set LRU entry
909 	 */
910 	pqi_stream_data = &device->stream_data[lru_index];
911 	pqi_stream_data->last_accessed = TICKS;
912 	pqi_stream_data->next_lba = loc.block.first + loc.block.cnt;
913 
914 	DBG_FUNC("OUT\n");
915 
916 	return false;
917 }
918 
919 /**
920  Determine if a request is eligible for AIO.  Build/map
921  the request if using AIO path to a RAID volume.
922 
923  return the path that should be used for this request
924 */
925 static IO_PATH_T
determine_io_path_build_bypass(pqisrc_softstate_t * softs,rcb_t * rcb)926 determine_io_path_build_bypass(pqisrc_softstate_t *softs,rcb_t *rcb)
927 {
928 	IO_PATH_T io_path = AIO_PATH;
929 	pqi_scsi_dev_t *devp = rcb->dvp;
930 	int ret = PQI_STATUS_FAILURE;
931 
932 	/* Default to using the host CDB directly (will be used if targeting RAID
933 		path or HBA mode */
934 	rcb->cdbp = OS_GET_CDBP(rcb);
935 
936 	if(!rcb->aio_retry) {
937 
938 		/**  IO for Physical Drive, Send in AIO PATH **/
939 		if(IS_AIO_PATH(devp)) {
940 			rcb->ioaccel_handle = devp->ioaccel_handle;
941 			return io_path;
942 		}
943 
944 		/** IO for RAID Volume, ByPass IO, Send in AIO PATH unless part of stream **/
945 		if (devp->offload_enabled && !pqisrc_is_parity_write_stream(softs, rcb)) {
946 			ret = pqisrc_build_scsi_cmd_raidbypass(softs, devp, rcb);
947 		}
948 
949 		if (PQI_STATUS_FAILURE == ret) {
950 			io_path = RAID_PATH;
951 		} else {
952 			ASSERT(rcb->cdbp == rcb->bypass_cdb);
953 		}
954 	} else {
955 		/* Retrying failed AIO IO */
956 		io_path = RAID_PATH;
957 	}
958 
959 	return io_path;
960 }
961 
962 uint8_t
pqisrc_get_aio_data_direction(rcb_t * rcb)963 pqisrc_get_aio_data_direction(rcb_t *rcb)
964 {
965         switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) {
966         case CAM_DIR_IN:  	return SOP_DATA_DIR_FROM_DEVICE;
967         case CAM_DIR_OUT:   	return SOP_DATA_DIR_TO_DEVICE;
968         case CAM_DIR_NONE:  	return SOP_DATA_DIR_NONE;
969         default:		return SOP_DATA_DIR_UNKNOWN;
970         }
971 }
972 
973 uint8_t
pqisrc_get_raid_data_direction(rcb_t * rcb)974 pqisrc_get_raid_data_direction(rcb_t *rcb)
975 {
976         switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) {
977         case CAM_DIR_IN:  	return SOP_DATA_DIR_TO_DEVICE;
978         case CAM_DIR_OUT:   	return SOP_DATA_DIR_FROM_DEVICE;
979         case CAM_DIR_NONE:  	return SOP_DATA_DIR_NONE;
980         default:		return SOP_DATA_DIR_UNKNOWN;
981         }
982 }
983 
984 /* Function used to build and send RAID/AIO */
985 int
pqisrc_build_send_io(pqisrc_softstate_t * softs,rcb_t * rcb)986 pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
987 {
988 	ib_queue_t *ib_q_array = softs->op_aio_ib_q;
989 	ib_queue_t *ib_q = NULL;
990 	char *ib_iu = NULL;
991 	IO_PATH_T io_path;
992 	uint32_t TraverseCount = 0;
993 	int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
994 	int qindex = first_qindex;
995 	uint32_t num_op_ib_q = softs->num_op_aio_ibq;
996 	uint32_t num_elem_needed;
997 	uint32_t num_elem_alloted = 0;
998 	pqi_scsi_dev_t *devp = rcb->dvp;
999 	boolean_t is_write;
1000 
1001 	DBG_FUNC("IN\n");
1002 
1003 	/* Note: this will determine if the request is eligble for AIO */
1004 	io_path = determine_io_path_build_bypass(softs, rcb);
1005 
1006 	if (io_path == RAID_PATH)
1007 	{
1008 		/* Update direction for RAID path */
1009 		rcb->data_dir = pqisrc_get_raid_data_direction(rcb);
1010 		num_op_ib_q = softs->num_op_raid_ibq;
1011 		ib_q_array = softs->op_raid_ib_q;
1012 	}
1013 	else {
1014 		rcb->data_dir = pqisrc_get_aio_data_direction(rcb);
1015 		if (rcb->data_dir == SOP_DATA_DIR_UNKNOWN) {
1016 			DBG_ERR("Unknown Direction\n");
1017 		}
1018 	}
1019 
1020 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
1021 	/* coverity[unchecked_value] */
1022 	num_elem_needed = pqisrc_num_elem_needed(softs,
1023 		OS_GET_IO_SG_COUNT(rcb), devp, is_write, io_path);
1024 	DBG_IO("num_elem_needed :%u",num_elem_needed);
1025 
1026 	do {
1027 		uint32_t num_elem_available;
1028 		ib_q = (ib_q_array + qindex);
1029 		PQI_LOCK(&ib_q->lock);
1030 		num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
1031 					*(ib_q->ci_virt_addr), ib_q->num_elem);
1032 
1033 		DBG_IO("num_elem_avialable :%u\n",num_elem_available);
1034 		if(num_elem_available >= num_elem_needed) {
1035 			num_elem_alloted = num_elem_needed;
1036 			break;
1037 		}
1038 		DBG_IO("Current queue is busy! Hop to next queue\n");
1039 
1040 		PQI_UNLOCK(&ib_q->lock);
1041 		qindex = (qindex + 1) % num_op_ib_q;
1042 		if(qindex == first_qindex) {
1043 			if (num_elem_needed == 1)
1044 				break;
1045 			TraverseCount += 1;
1046 			num_elem_needed = 1;
1047 		}
1048 	}while(TraverseCount < 2);
1049 
1050 	DBG_IO("num_elem_alloted :%u",num_elem_alloted);
1051 	if (num_elem_alloted == 0) {
1052 		DBG_WARN("OUT: IB Queues were full\n");
1053 		return PQI_STATUS_QFULL;
1054 	}
1055 
1056 	pqisrc_increment_device_active_io(softs,devp);
1057 
1058 	/* Get IB Queue Slot address to build IU */
1059 	ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
1060 
1061 	if(io_path == AIO_PATH) {
1062 		/* Fill in the AIO IU per request and raid type */
1063 		pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t *)ib_iu,
1064 			num_elem_alloted);
1065 	} else {
1066 		/** Build RAID structure **/
1067 		pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t *)ib_iu,
1068 			num_elem_alloted);
1069 	}
1070 
1071 	rcb->req_pending = true;
1072 	rcb->req_q = ib_q;
1073 	rcb->path = io_path;
1074 
1075 	pqisrc_increment_io_counters(softs, rcb);
1076 
1077 	/* Update the local PI */
1078 	ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
1079 
1080 	DBG_IO("ib_q->pi_local : %x\n", ib_q->pi_local);
1081 	DBG_IO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
1082 
1083 	/* Inform the fw about the new IU */
1084 	PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
1085 
1086 	PQI_UNLOCK(&ib_q->lock);
1087 	DBG_FUNC("OUT\n");
1088 	return PQI_STATUS_SUCCESS;
1089 }
1090 
1091 /* Subroutine used to set encryption info as part of RAID bypass IO*/
1092 static inline void
pqisrc_set_enc_info(struct pqi_enc_info * enc_info,struct raid_map * raid_map,uint64_t first_block)1093 pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
1094 		struct raid_map *raid_map, uint64_t first_block)
1095 {
1096 	uint32_t volume_blk_size;
1097 
1098 	/*
1099 	 * Set the encryption tweak values based on logical block address.
1100 	 * If the block size is 512, the tweak value is equal to the LBA.
1101 	 * For other block sizes, tweak value is (LBA * block size) / 512.
1102 	 */
1103 	volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
1104 	if (volume_blk_size != 512)
1105 		first_block = (first_block * volume_blk_size) / 512;
1106 
1107 	enc_info->data_enc_key_index =
1108 		GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
1109 	enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
1110 	enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
1111 }
1112 
1113 
1114 /*
1115  * Attempt to perform offload RAID mapping for a logical volume I/O.
1116  */
1117 
1118 #define HPSA_RAID_0		0
1119 #define HPSA_RAID_4		1
1120 #define HPSA_RAID_1		2	/* also used for RAID 10 */
1121 #define HPSA_RAID_5		3	/* also used for RAID 50 */
1122 #define HPSA_RAID_51		4
1123 #define HPSA_RAID_6		5	/* also used for RAID 60 */
1124 #define HPSA_RAID_ADM		6	/* also used for RAID 1+0 ADM */
1125 #define HPSA_RAID_MAX		HPSA_RAID_ADM
1126 #define HPSA_RAID_UNKNOWN	0xff
1127 
1128 /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
1129 static int
fill_lba_for_scsi_rw(pqisrc_softstate_t * softs,uint8_t * cdb,aio_req_locator_t * l)1130 fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l)
1131 {
1132 
1133 	if (!l) {
1134 		DBG_INFO("No locator ptr: AIO ineligible");
1135 		return PQI_STATUS_FAILURE;
1136 	}
1137 
1138 	if (cdb == NULL)
1139 		return PQI_STATUS_FAILURE;
1140 
1141 	switch (cdb[0]) {
1142 	case SCMD_WRITE_6:
1143 		l->is_write = true;
1144 		/* coverity[fallthrough] */
1145 	case SCMD_READ_6:
1146 		l->block.first = (uint64_t)(((cdb[1] & 0x1F) << 16) |
1147 				(cdb[2] << 8) | cdb[3]);
1148 		l->block.cnt = (uint32_t)cdb[4];
1149 		if (l->block.cnt == 0)
1150 				l->block.cnt = 256; /*blkcnt 0 means 256 */
1151 		break;
1152 	case SCMD_WRITE_10:
1153 		l->is_write = true;
1154 		/* coverity[fallthrough] */
1155 	case SCMD_READ_10:
1156 		l->block.first = (uint64_t)GET_BE32(&cdb[2]);
1157 		l->block.cnt = (uint32_t)GET_BE16(&cdb[7]);
1158 		break;
1159 	case SCMD_WRITE_12:
1160 		l->is_write = true;
1161 		/* coverity[fallthrough] */
1162 	case SCMD_READ_12:
1163 		l->block.first = (uint64_t)GET_BE32(&cdb[2]);
1164 		l->block.cnt = GET_BE32(&cdb[6]);
1165 		break;
1166 	case SCMD_WRITE_16:
1167 		l->is_write = true;
1168 		/* coverity[fallthrough] */
1169 	case SCMD_READ_16:
1170 		l->block.first = GET_BE64(&cdb[2]);
1171 		l->block.cnt = GET_BE32(&cdb[10]);
1172 		break;
1173 	default:
1174 		/* Process via normal I/O path. */
1175 		DBG_AIO("NOT read or write 6/10/12/16: AIO ineligible");
1176 		return PQI_STATUS_FAILURE;
1177 	}
1178 	return PQI_STATUS_SUCCESS;
1179 }
1180 
1181 
1182 /* determine whether writes to certain types of RAID are supported. */
1183 static boolean_t
pqisrc_is_supported_write(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device)1184 pqisrc_is_supported_write(pqisrc_softstate_t *softs,
1185 	pqi_scsi_dev_t *device)
1186 {
1187 
1188 	DBG_FUNC("IN\n");
1189 
1190 	/* Raid0 was always supported */
1191 	if (device->raid_level == SA_RAID_0)
1192 		return true;
1193 
1194 	/* module params for individual adv. aio write features may be on,
1195 	 * which affects ALL controllers, but some controllers
1196 	 * do not support adv. aio write.
1197 	 */
1198 	if (!softs->adv_aio_capable)
1199 		return false;
1200 
1201 	/* if the raid write bypass feature is turned on,
1202 	 * then the write is supported.
1203 	 */
1204 	switch (device->raid_level) {
1205 	case SA_RAID_1:
1206 	case SA_RAID_ADM:
1207 		if (softs->aio_raid1_write_bypass)
1208 			return true;
1209 		break;
1210 	case SA_RAID_5:
1211 		if (softs->aio_raid5_write_bypass)
1212 			return true;
1213 		break;
1214 	case SA_RAID_6:
1215 		if (softs->aio_raid6_write_bypass)
1216 			return true;
1217 	}
1218 
1219 	/* otherwise, it must be an unsupported write. */
1220 	DBG_IO("AIO ineligible: write not supported for raid type\n");
1221 	DBG_FUNC("OUT\n");
1222 	return false;
1223 
1224 }
1225 
1226 /* check for zero-byte transfers, invalid blocks, and wraparound */
1227 static inline boolean_t
pqisrc_is_invalid_block(pqisrc_softstate_t * softs,aio_req_locator_t * l)1228 pqisrc_is_invalid_block(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1229 {
1230 	DBG_FUNC("IN\n");
1231 
1232 	if (l->block.cnt == 0) {
1233 		DBG_AIO("AIO ineligible: blk_cnt=0\n");
1234 		DBG_FUNC("OUT\n");
1235 		return true;
1236 	}
1237 
1238 	if (l->block.last < l->block.first ||
1239 		l->block.last >=
1240 			GET_LE64((uint8_t *)&l->raid_map->volume_blk_cnt)) {
1241 		DBG_AIO("AIO ineligible: last block < first\n");
1242 		DBG_FUNC("OUT\n");
1243 		return true;
1244 	}
1245 
1246 	DBG_FUNC("OUT\n");
1247 	return false;
1248 }
1249 
1250 /* Compute various attributes of request's location */
1251 static inline boolean_t
pqisrc_calc_disk_params(pqisrc_softstate_t * softs,aio_req_locator_t * l,rcb_t * rcb)1252 pqisrc_calc_disk_params(pqisrc_softstate_t *softs, aio_req_locator_t *l,  rcb_t *rcb)
1253 {
1254 	DBG_FUNC("IN\n");
1255 
1256 	/* grab #disks, strip size, and layout map count from raid map */
1257 	l->row.data_disks =
1258 		GET_LE16((uint8_t *)&l->raid_map->data_disks_per_row);
1259 	l->strip_sz =
1260 		GET_LE16((uint8_t *)(&l->raid_map->strip_size));
1261 	l->map.layout_map_count =
1262 		GET_LE16((uint8_t *)(&l->raid_map->layout_map_count));
1263 
1264 	/* Calculate stripe information for the request. */
1265 	l->row.blks_per_row =  l->row.data_disks * l->strip_sz;
1266 	if (!l->row.blks_per_row || !l->strip_sz) {
1267 		DBG_AIO("AIO ineligible\n");
1268 		DBG_FUNC("OUT\n");
1269 		return false;
1270 	}
1271 	/* use __udivdi3 ? */
1272 	rcb->blocks_per_row = l->row.blks_per_row;
1273 	l->row.first = l->block.first / l->row.blks_per_row;
1274 	rcb->row_num = l->row.first;
1275 	l->row.last = l->block.last / l->row.blks_per_row;
1276 	l->row.offset_first = (uint32_t)(l->block.first -
1277 		(l->row.first * l->row.blks_per_row));
1278 	l->row.offset_last = (uint32_t)(l->block.last -
1279 		(l->row.last * l->row.blks_per_row));
1280 	l->col.first = l->row.offset_first / l->strip_sz;
1281 	l->col.last = l->row.offset_last / l->strip_sz;
1282 
1283 	DBG_FUNC("OUT\n");
1284 	return true;
1285 }
1286 
1287 /* Not AIO-eligible if it isnt' a single row/column. */
1288 static inline boolean_t
pqisrc_is_single_row_column(pqisrc_softstate_t * softs,aio_req_locator_t * l)1289 pqisrc_is_single_row_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1290 {
1291 	boolean_t ret = true;
1292 	DBG_FUNC("IN\n");
1293 
1294 	if (l->row.first != l->row.last || l->col.first != l->col.last) {
1295 		DBG_AIO("AIO ineligible\n");
1296 		ret = false;
1297 	}
1298 	DBG_FUNC("OUT\n");
1299 	return ret;
1300 }
1301 
1302 /* figure out disks/row, row, and map index. */
1303 static inline boolean_t
pqisrc_set_map_row_and_idx(pqisrc_softstate_t * softs,aio_req_locator_t * l,rcb_t * rcb)1304 pqisrc_set_map_row_and_idx(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t *rcb)
1305 {
1306 	if (!l->row.data_disks) {
1307 		DBG_INFO("AIO ineligible: no data disks?\n");
1308 		return false;
1309 	}
1310 
1311 	l->row.total_disks = l->row.data_disks +
1312 		LE_16(l->raid_map->metadata_disks_per_row);
1313 
1314 	l->map.row = ((uint32_t)(l->row.first >>
1315 		l->raid_map->parity_rotation_shift)) %
1316 		GET_LE16((uint8_t *)(&l->raid_map->row_cnt));
1317 
1318 	l->map.idx = (l->map.row * l->row.total_disks) + l->col.first;
1319 	rcb->raid_map_index = l->map.idx;
1320 	rcb->raid_map_row = l->map.row;
1321 
1322 	return true;
1323 }
1324 
1325 /* set the mirror for a raid 1/10/ADM */
1326 static inline void
pqisrc_set_read_mirror(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,aio_req_locator_t * l)1327 pqisrc_set_read_mirror(pqisrc_softstate_t *softs,
1328 	pqi_scsi_dev_t *device, aio_req_locator_t *l)
1329 {
1330 	/* Avoid direct use of device->offload_to_mirror within this
1331 	 * function since multiple threads might simultaneously
1332 	 * increment it beyond the range of device->layout_map_count -1.
1333 	 */
1334 
1335 	int mirror = device->offload_to_mirror[l->map.idx];
1336 	int next_mirror = mirror + 1;
1337 
1338 	if (next_mirror >= l->map.layout_map_count)
1339 		next_mirror = 0;
1340 
1341 	device->offload_to_mirror[l->map.idx] = next_mirror;
1342 	l->map.idx += mirror * l->row.data_disks;
1343 }
1344 
1345 /* collect ioaccel handles for mirrors of given location. */
1346 static inline boolean_t
pqisrc_set_write_mirrors(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,aio_req_locator_t * l,rcb_t * rcb)1347 pqisrc_set_write_mirrors(
1348 	pqisrc_softstate_t *softs,
1349 	pqi_scsi_dev_t *device,
1350 	aio_req_locator_t *l,
1351 	rcb_t *rcb)
1352 {
1353 	uint32_t mirror = 0;
1354 	uint32_t index;
1355 
1356 	if (l->map.layout_map_count > PQISRC_MAX_SUPPORTED_MIRRORS)
1357 		return false;
1358 
1359 	do {
1360 		index = l->map.idx + (l->row.data_disks * mirror);
1361 		rcb->it_nexus[mirror] =
1362 			l->raid_map->dev_data[index].ioaccel_handle;
1363 		mirror++;
1364 	} while (mirror != l->map.layout_map_count);
1365 
1366 	return true;
1367 }
1368 
1369 /* Make sure first and last block are in the same R5/R6 RAID group. */
1370 static inline boolean_t
pqisrc_is_r5or6_single_group(pqisrc_softstate_t * softs,aio_req_locator_t * l)1371 pqisrc_is_r5or6_single_group(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1372 {
1373 	boolean_t ret = true;
1374 
1375 	DBG_FUNC("IN\n");
1376 	l->r5or6.row.blks_per_row = l->strip_sz * l->row.data_disks;
1377 	l->stripesz = l->r5or6.row.blks_per_row * l->map.layout_map_count;
1378 	l->group.first = (l->block.first % l->stripesz) /
1379 				l->r5or6.row.blks_per_row;
1380 	l->group.last = (l->block.last % l->stripesz) /
1381 				l->r5or6.row.blks_per_row;
1382 
1383 	if (l->group.first != l->group.last) {
1384 		DBG_AIO("AIO ineligible");
1385 		ret = false;
1386 	}
1387 
1388 	DBG_FUNC("OUT\n");
1389 	ASSERT(ret == true);
1390 	return ret;
1391 }
1392 /* Make sure R5 or R6 request doesn't span rows. */
1393 static inline boolean_t
pqisrc_is_r5or6_single_row(pqisrc_softstate_t * softs,aio_req_locator_t * l)1394 pqisrc_is_r5or6_single_row(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1395 {
1396 	boolean_t ret = true;
1397 
1398 	DBG_FUNC("IN\n");
1399 
1400 	/* figure row nums containing first & last block */
1401 	l->row.first = l->r5or6.row.first =
1402 		l->block.first / l->stripesz;
1403 	l->r5or6.row.last = l->block.last / l->stripesz;
1404 
1405 	if (l->r5or6.row.first != l->r5or6.row.last) {
1406 		DBG_AIO("AIO ineligible");
1407 		ret = false;
1408 	}
1409 
1410 	DBG_FUNC("OUT\n");
1411 	ASSERT(ret == true);
1412 	return ret;
1413 }
1414 
1415 /* Make sure R5 or R6 request doesn't span columns. */
1416 static inline boolean_t
pqisrc_is_r5or6_single_column(pqisrc_softstate_t * softs,aio_req_locator_t * l)1417 pqisrc_is_r5or6_single_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1418 {
1419 	boolean_t ret = true;
1420 
1421 	/* Find the columns of the first and last block */
1422 	l->row.offset_first = l->r5or6.row.offset_first =
1423 		(uint32_t)((l->block.first % l->stripesz) %
1424 		l->r5or6.row.blks_per_row);
1425 	l->r5or6.row.offset_last =
1426 		(uint32_t)((l->block.last % l->stripesz) %
1427 		l->r5or6.row.blks_per_row);
1428 
1429 	l->col.first = l->r5or6.row.offset_first / l->strip_sz;
1430 	l->r5or6.col.first = l->col.first;
1431 	l->r5or6.col.last = l->r5or6.row.offset_last / l->strip_sz;
1432 
1433 	if (l->r5or6.col.first != l->r5or6.col.last) {
1434 		DBG_AIO("AIO ineligible");
1435 		ret = false;
1436 	}
1437 
1438 	ASSERT(ret == true);
1439 	return ret;
1440 }
1441 
1442 
1443 /* Set the map row and index for a R5 or R6 AIO request */
1444 static inline void
pqisrc_set_r5or6_row_and_index(aio_req_locator_t * l,rcb_t * rcb)1445 pqisrc_set_r5or6_row_and_index(aio_req_locator_t *l,
1446 	rcb_t *rcb)
1447 {
1448 	l->map.row = ((uint32_t)
1449 		(l->row.first >> l->raid_map->parity_rotation_shift)) %
1450 		GET_LE16((uint8_t *)(&l->raid_map->row_cnt));
1451 
1452 	l->map.idx = (l->group.first *
1453 		(GET_LE16((uint8_t *)(&l->raid_map->row_cnt))
1454 		* l->row.total_disks))
1455 		+ (l->map.row * l->row.total_disks)
1456 		+ l->col.first;
1457 
1458 	rcb->raid_map_index = l->map.idx;
1459 	rcb->raid_map_row = l->map.row;
1460 }
1461 
1462 /* calculate physical disk block for aio request */
1463 static inline boolean_t
pqisrc_calc_aio_block(aio_req_locator_t * l)1464 pqisrc_calc_aio_block(aio_req_locator_t *l)
1465 {
1466 	boolean_t ret = true;
1467 
1468 	l->block.disk_block =
1469 		GET_LE64((uint8_t *) (&l->raid_map->disk_starting_blk))
1470 		+ (l->row.first * l->strip_sz)
1471 		+ ((uint64_t)(l->row.offset_first) - (uint64_t)(l->col.first) * l->strip_sz);
1472 
1473 	/* any values we should be checking here? if not convert to void */
1474 	return ret;
1475 }
1476 
1477 /* Handle differing logical/physical block sizes. */
1478 static inline uint32_t
pqisrc_handle_blk_size_diffs(aio_req_locator_t * l)1479 pqisrc_handle_blk_size_diffs(aio_req_locator_t *l)
1480 {
1481 	uint32_t disk_blk_cnt;
1482 	disk_blk_cnt = l->block.cnt;
1483 
1484 	if (l->raid_map->phys_blk_shift) {
1485 		l->block.disk_block <<= l->raid_map->phys_blk_shift;
1486 		disk_blk_cnt <<= l->raid_map->phys_blk_shift;
1487 	}
1488 	return disk_blk_cnt;
1489 }
1490 
1491 /* Make sure AIO request doesn't exceed the max that AIO device can
1492  * handle based on dev type, Raid level, and encryption status.
1493  * TODO: make limits dynamic when this becomes possible.
1494  */
1495 static boolean_t
pqisrc_aio_req_too_big(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,rcb_t * rcb,aio_req_locator_t * l,uint32_t disk_blk_cnt)1496 pqisrc_aio_req_too_big(pqisrc_softstate_t *softs,
1497 	pqi_scsi_dev_t *device, rcb_t *rcb,
1498 	aio_req_locator_t *l, uint32_t disk_blk_cnt)
1499 {
1500 	boolean_t ret = false;
1501 	uint32_t dev_max;
1502 	uint32_t size = disk_blk_cnt * device->raid_map->volume_blk_size;
1503 	dev_max = size;
1504 
1505 	/* filter for nvme crypto */
1506 	if (device->is_nvme && rcb->encrypt_enable) {
1507 		if (softs->max_aio_rw_xfer_crypto_nvme != 0) {
1508 			dev_max = MIN(dev_max,softs->max_aio_rw_xfer_crypto_nvme);
1509 		}
1510 	}
1511 
1512 	/* filter for RAID 5/6/50/60 */
1513 	if (!device->is_physical_device &&
1514 		(device->raid_level == SA_RAID_5 ||
1515 		device->raid_level == SA_RAID_51 ||
1516 		device->raid_level == SA_RAID_6)) {
1517 		if (softs->max_aio_write_raid5_6 != 0) {
1518 			dev_max = MIN(dev_max,softs->max_aio_write_raid5_6);
1519 		}
1520 	}
1521 
1522 	/* filter for RAID ADM */
1523 	if (!device->is_physical_device &&
1524 		(device->raid_level == SA_RAID_ADM) &&
1525 		(softs->max_aio_write_raid1_10_3drv != 0)) {
1526 			dev_max = MIN(dev_max,
1527 				softs->max_aio_write_raid1_10_3drv);
1528 	}
1529 
1530 	/* filter for RAID 1/10 */
1531 	if (!device->is_physical_device &&
1532 		(device->raid_level == SA_RAID_1) &&
1533 		(softs->max_aio_write_raid1_10_2drv != 0)) {
1534 			dev_max = MIN(dev_max,
1535 				softs->max_aio_write_raid1_10_2drv);
1536 	}
1537 
1538 
1539 	if (size > dev_max) {
1540 		DBG_AIO("AIO ineligible: size=%u, max=%u", size, dev_max);
1541 		ret = true;
1542 	}
1543 
1544 	return ret;
1545 }
1546 
1547 
1548 #ifdef DEBUG_RAID_MAP
1549 static inline void
pqisrc_aio_show_raid_map(pqisrc_softstate_t * softs,struct raid_map * m)1550 pqisrc_aio_show_raid_map(pqisrc_softstate_t *softs, struct raid_map *m)
1551 {
1552 	int i;
1553 
1554 	if (!m) {
1555 		DBG_WARN("No RAID MAP!\n");
1556 		return;
1557 	}
1558 	DBG_INFO("======= Raid Map ================\n");
1559 	DBG_INFO("%-25s: 0x%x\n", "StructureSize", m->structure_size);
1560 	DBG_INFO("%-25s: 0x%x\n", "LogicalBlockSize", m->volume_blk_size);
1561 	DBG_INFO("%-25s: 0x%lx\n", "LogicalBlockCount", m->volume_blk_cnt);
1562 	DBG_INFO("%-25s: 0x%x\n", "PhysicalBlockShift", m->phys_blk_shift);
1563 	DBG_INFO("%-25s: 0x%x\n", "ParityRotationShift",
1564 				m->parity_rotation_shift);
1565 	DBG_INFO("%-25s: 0x%x\n", "StripSize", m->strip_size);
1566 	DBG_INFO("%-25s: 0x%lx\n", "DiskStartingBlock", m->disk_starting_blk);
1567 	DBG_INFO("%-25s: 0x%lx\n", "DiskBlockCount", m->disk_blk_cnt);
1568 	DBG_INFO("%-25s: 0x%x\n", "DataDisksPerRow", m->data_disks_per_row);
1569 	DBG_INFO("%-25s: 0x%x\n", "MetdataDisksPerRow",
1570 				m->metadata_disks_per_row);
1571 	DBG_INFO("%-25s: 0x%x\n", "RowCount", m->row_cnt);
1572 	DBG_INFO("%-25s: 0x%x\n", "LayoutMapCnt", m->layout_map_count);
1573 	DBG_INFO("%-25s: 0x%x\n", "fEncryption", m->flags);
1574 	DBG_INFO("%-25s: 0x%x\n", "DEK", m->data_encryption_key_index);
1575 	for (i = 0; i < RAID_MAP_MAX_ENTRIES; i++) {
1576 		if (m->dev_data[i].ioaccel_handle == 0)
1577 			break;
1578 		DBG_INFO("%-25s: %d: 0x%04x\n", "ioaccel_handle, disk",
1579 			i, m->dev_data[i].ioaccel_handle);
1580 	}
1581 }
1582 #endif /* DEBUG_RAID_MAP */
1583 
1584 static inline void
pqisrc_aio_show_locator_info(pqisrc_softstate_t * softs,aio_req_locator_t * l,uint32_t disk_blk_cnt,rcb_t * rcb)1585 pqisrc_aio_show_locator_info(pqisrc_softstate_t *softs,
1586 	aio_req_locator_t *l, uint32_t disk_blk_cnt, rcb_t *rcb)
1587 {
1588 #ifdef DEBUG_AIO_LOCATOR
1589 	pqisrc_aio_show_raid_map(softs, l->raid_map);
1590 
1591 	DBG_INFO("======= AIO Locator Content, tag#0x%08x =====\n", rcb->tag);
1592 	DBG_INFO("%-25s: 0x%lx\n", "block.first", l->block.first);
1593 	DBG_INFO("%-25s: 0x%lx\n", "block.last", l->block.last);
1594 	DBG_INFO("%-25s: 0x%x\n", "block.cnt", l->block.cnt);
1595 	DBG_INFO("%-25s: 0x%lx\n", "block.disk_block", l->block.disk_block);
1596 	DBG_INFO("%-25s: 0x%x\n", "row.blks_per_row", l->row.blks_per_row);
1597 	DBG_INFO("%-25s: 0x%lx\n", "row.first", l->row.first);
1598 	DBG_INFO("%-25s: 0x%lx\n", "row.last", l->row.last);
1599 	DBG_INFO("%-25s: 0x%x\n", "row.offset_first", l->row.offset_first);
1600 	DBG_INFO("%-25s: 0x%x\n", "row.offset_last", l->row.offset_last);
1601 	DBG_INFO("%-25s: 0x%x\n", "row.data_disks", l->row.data_disks);
1602 	DBG_INFO("%-25s: 0x%x\n", "row.total_disks", l->row.total_disks);
1603 	DBG_INFO("%-25s: 0x%x\n", "col.first", l->col.first);
1604 	DBG_INFO("%-25s: 0x%x\n", "col.last", l->col.last);
1605 
1606 	if (l->raid_level == SA_RAID_5 || l->raid_level == SA_RAID_6) {
1607 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.blks_per_row",
1608 				l->r5or6.row.blks_per_row);
1609 		DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.first", l->r5or6.row.first);
1610 		DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.last", l->r5or6.row.last);
1611 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_first",
1612 					l->r5or6.row.offset_first);
1613 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_last",
1614 					l->r5or6.row.offset_last);
1615 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.data_disks",
1616 					l->r5or6.row.data_disks);
1617 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.total_disks",
1618 					l->r5or6.row.total_disks);
1619 		DBG_INFO("%-25s: 0x%x\n", "r5or6.col.first", l->r5or6.col.first);
1620 		DBG_INFO("%-25s: 0x%x\n", "r5or6.col.last", l->r5or6.col.last);
1621 	}
1622 	DBG_INFO("%-25s: 0x%x\n", "map.row", l->map.row);
1623 	DBG_INFO("%-25s: 0x%x\n", "map.idx", l->map.idx);
1624 	DBG_INFO("%-25s: 0x%x\n", "map.layout_map_count",
1625 				l->map.layout_map_count);
1626 	DBG_INFO("%-25s: 0x%x\n", "group.first", l->group.first);
1627 	DBG_INFO("%-25s: 0x%x\n", "group.last", l->group.last);
1628 	DBG_INFO("%-25s: 0x%x\n", "group.cur", l->group.cur);
1629 	DBG_INFO("%-25s: %d\n", "is_write", l->is_write);
1630 	DBG_INFO("%-25s: 0x%x\n", "stripesz", l->stripesz);
1631 	DBG_INFO("%-25s: 0x%x\n", "strip_sz", l->strip_sz);
1632 	DBG_INFO("%-25s: %d\n", "offload_to_mirror", l->offload_to_mirror);
1633 	DBG_INFO("%-25s: %d\n", "raid_level", l->raid_level);
1634 
1635 #endif /* DEBUG_AIO_LOCATOR */
1636 }
1637 
1638 /* build the aio cdb */
1639 static void
pqisrc_aio_build_cdb(aio_req_locator_t * l,uint32_t disk_blk_cnt,rcb_t * rcb,uint8_t * cdb)1640 pqisrc_aio_build_cdb(aio_req_locator_t *l,
1641 		uint32_t disk_blk_cnt, rcb_t *rcb, uint8_t *cdb)
1642 {
1643 	uint8_t cdb_length;
1644 
1645 	if (l->block.disk_block > 0xffffffff) {
1646 		cdb[0] = l->is_write ? SCMD_WRITE_16 : SCMD_READ_16;
1647 		cdb[1] = 0;
1648 		PUT_BE64(l->block.disk_block, &cdb[2]);
1649 		PUT_BE32(disk_blk_cnt, &cdb[10]);
1650 		cdb[15] = 0;
1651 		cdb_length = 16;
1652 	} else {
1653 		cdb[0] = l->is_write ? SCMD_WRITE_10 : SCMD_READ_10;
1654 		cdb[1] = 0;
1655 		PUT_BE32(l->block.disk_block, &cdb[2]);
1656 		cdb[6] = 0;
1657 		PUT_BE16(disk_blk_cnt, &cdb[7]);
1658 		cdb[9] = 0;
1659 		cdb_length = 10;
1660 	}
1661 
1662 	rcb->cmdlen = cdb_length;
1663 
1664 }
1665 
1666 /* print any arbitrary buffer of length total_len */
1667 void
pqisrc_print_buffer(pqisrc_softstate_t * softs,char * msg,void * user_buf,uint32_t total_len,uint32_t flags)1668 pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
1669 		uint32_t total_len, uint32_t flags)
1670 {
1671 #define LINE_BUF_LEN 60
1672 #define INDEX_PER_LINE 16
1673 	uint32_t buf_consumed = 0;
1674 	int ii;
1675 	char line_buf[LINE_BUF_LEN];
1676 	int line_len; /* written length per line */
1677 	uint8_t this_char;
1678 
1679 	if (user_buf == NULL)
1680 		return;
1681 
1682 	memset(line_buf, 0, LINE_BUF_LEN);
1683 
1684 	/* Print index columns */
1685 	if (flags & PRINT_FLAG_HDR_COLUMN)
1686 	{
1687 		for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++)
1688 		{
1689 			line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
1690 			if ((line_len + 4) >= LINE_BUF_LEN)
1691 				break;
1692 		}
1693 		DBG_INFO("%15.15s:[ %s ]\n", "header", line_buf);
1694 	}
1695 
1696 	/* Print index columns */
1697 	while(buf_consumed < total_len)
1698 	{
1699 		memset(line_buf, 0, LINE_BUF_LEN);
1700 
1701 		for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++)
1702 		{
1703 			this_char = *((char*)(user_buf) + buf_consumed);
1704 			line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char);
1705 
1706 			buf_consumed++;
1707 			if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
1708 				break;
1709 		}
1710 		DBG_INFO("%15.15s:[ %s ]\n", msg, line_buf);
1711 	}
1712 }
1713 
1714 /* print CDB with column header */
1715 void
pqisrc_show_cdb(pqisrc_softstate_t * softs,char * msg,rcb_t * rcb,uint8_t * cdb)1716 pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb)
1717 {
1718 	/* Print the CDB contents */
1719 	pqisrc_print_buffer(softs, msg, cdb, rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
1720 }
1721 
1722 void
pqisrc_show_rcb_details(pqisrc_softstate_t * softs,rcb_t * rcb,char * msg,void * err_info)1723 pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info)
1724 {
1725    pqi_scsi_dev_t *devp;
1726 
1727 	if (rcb == NULL || rcb->dvp == NULL)
1728 	{
1729 		DBG_ERR("Invalid rcb or dev ptr! rcb=%p\n", rcb);
1730 		return;
1731 	}
1732 
1733 	devp = rcb->dvp;
1734 
1735 	/* print the host and mapped CDB */
1736 	DBG_INFO("\n");
1737 	DBG_INFO("----- Start Dump: %s -----\n", msg);
1738 	pqisrc_print_buffer(softs, "host cdb", OS_GET_CDBP(rcb), rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
1739 	if (OS_GET_CDBP(rcb) != rcb->cdbp)
1740 		pqisrc_print_buffer(softs, "aio mapped cdb", rcb->cdbp, rcb->cmdlen, 0);
1741 
1742 	DBG_INFO("tag=0x%x dir=%u host_timeout=%ums\n", rcb->tag,
1743 		rcb->data_dir, (uint32_t)rcb->host_timeout_ms);
1744 
1745 	DBG_INFO("BTL: %d:%d:%d addr=0x%x\n", devp->bus, devp->target,
1746 		devp->lun, GET_LE32(devp->scsi3addr));
1747 
1748 	if (rcb->path == AIO_PATH)
1749 	{
1750 		DBG_INFO("handle=0x%x\n", rcb->ioaccel_handle);
1751 		DBG_INFO("row=%u blk/row=%u index=%u map_row=%u\n",
1752 			rcb->row_num, rcb->blocks_per_row, rcb->raid_map_index, rcb->raid_map_row);
1753 
1754 		if (err_info)
1755 			pqisrc_show_aio_error_info(softs, rcb, err_info);
1756 	}
1757 
1758 	else /* RAID path */
1759 	{
1760 		if (err_info)
1761 			pqisrc_show_raid_error_info(softs, rcb, err_info);
1762 	}
1763 
1764 
1765 	DBG_INFO("-----  Done -----\n\n");
1766 }
1767 
1768 
1769 /*
1770  * Function used to build and send RAID bypass request to the adapter
1771  */
1772 int
pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t * softs,pqi_scsi_dev_t * device,rcb_t * rcb)1773 pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
1774 			pqi_scsi_dev_t *device, rcb_t *rcb)
1775 {
1776 	uint32_t disk_blk_cnt;
1777 	struct aio_req_locator loc;
1778 	struct aio_req_locator *l = &loc;
1779 	int rc;
1780 	memset(l, 0, sizeof(*l));
1781 
1782 	DBG_FUNC("IN\n");
1783 
1784 	if (device == NULL) {
1785 		DBG_INFO("device is NULL\n");
1786 		return PQI_STATUS_FAILURE;
1787 	}
1788 	if (device->raid_map == NULL) {
1789 		DBG_INFO("tag=0x%x BTL: %d:%d:%d Raid map is NULL\n",
1790 			rcb->tag, device->bus, device->target, device->lun);
1791 		return PQI_STATUS_FAILURE;
1792 	}
1793 
1794 	/* Check for eligible op, get LBA and block count. */
1795 	rc =  fill_lba_for_scsi_rw(softs, OS_GET_CDBP(rcb), l);
1796 	if (rc == PQI_STATUS_FAILURE)
1797 		return PQI_STATUS_FAILURE;
1798 
1799 	if (l->is_write && !pqisrc_is_supported_write(softs, device))
1800 		return PQI_STATUS_FAILURE;
1801 
1802 	l->raid_map = device->raid_map;
1803 	l->block.last = l->block.first + l->block.cnt - 1;
1804 	l->raid_level = device->raid_level;
1805 
1806 	if (pqisrc_is_invalid_block(softs, l))
1807 		return PQI_STATUS_FAILURE;
1808 
1809 	if (!pqisrc_calc_disk_params(softs, l, rcb))
1810 		return PQI_STATUS_FAILURE;
1811 
1812 	if (!pqisrc_is_single_row_column(softs, l))
1813 		return PQI_STATUS_FAILURE;
1814 
1815 	if (!pqisrc_set_map_row_and_idx(softs, l, rcb))
1816 		return PQI_STATUS_FAILURE;
1817 
1818 	/* Proceeding with driver mapping. */
1819 
1820 
1821 	switch (device->raid_level) {
1822 	case SA_RAID_1:
1823 	case SA_RAID_ADM:
1824 		if (l->is_write) {
1825 			if (!pqisrc_set_write_mirrors(softs, device, l, rcb))
1826 				return PQI_STATUS_FAILURE;
1827 		} else
1828 			pqisrc_set_read_mirror(softs, device, l);
1829 		break;
1830 	case SA_RAID_5:
1831 	case SA_RAID_6:
1832 		if (l->map.layout_map_count > 1 || l->is_write) {
1833 
1834 			if (!pqisrc_is_r5or6_single_group(softs, l))
1835 				return PQI_STATUS_FAILURE;
1836 
1837 			if (!pqisrc_is_r5or6_single_row(softs, l))
1838 				return PQI_STATUS_FAILURE;
1839 
1840 			if (!pqisrc_is_r5or6_single_column(softs, l))
1841 				return PQI_STATUS_FAILURE;
1842 
1843 			pqisrc_set_r5or6_row_and_index(l, rcb);
1844 		}
1845 		break;
1846 	}
1847 
1848 	if (l->map.idx >= RAID_MAP_MAX_ENTRIES) {
1849 		DBG_INFO("AIO ineligible: index exceeds max map entries");
1850 		return PQI_STATUS_FAILURE;
1851 	}
1852 
1853 	rcb->ioaccel_handle =
1854 		l->raid_map->dev_data[l->map.idx].ioaccel_handle;
1855 
1856 	if (!pqisrc_calc_aio_block(l))
1857 		return PQI_STATUS_FAILURE;
1858 
1859 	disk_blk_cnt = pqisrc_handle_blk_size_diffs(l);
1860 
1861 
1862 	/* Set encryption flag if needed. */
1863 	rcb->encrypt_enable = false;
1864 	if (GET_LE16((uint8_t *)(&l->raid_map->flags)) &
1865 		RAID_MAP_ENCRYPTION_ENABLED) {
1866 		pqisrc_set_enc_info(&rcb->enc_info, l->raid_map,
1867 			l->block.first);
1868 		rcb->encrypt_enable = true;
1869 	}
1870 
1871 	if (pqisrc_aio_req_too_big(softs, device, rcb, l, disk_blk_cnt))
1872 		return PQI_STATUS_FAILURE;
1873 
1874 	/* set the cdb ptr to the local bypass cdb */
1875 	rcb->cdbp = &rcb->bypass_cdb[0];
1876 
1877 	/* Build the new CDB for the physical disk I/O. */
1878 	pqisrc_aio_build_cdb(l, disk_blk_cnt, rcb, rcb->cdbp);
1879 
1880 	pqisrc_aio_show_locator_info(softs, l, disk_blk_cnt, rcb);
1881 
1882 	DBG_FUNC("OUT\n");
1883 
1884 	return PQI_STATUS_SUCCESS;
1885 }
1886 
1887 /* Function used to submit an AIO TMF to the adapter
1888  * DEVICE_RESET is not supported.
1889  */
1890 
1891 static int
pqisrc_send_aio_tmf(pqisrc_softstate_t * softs,pqi_scsi_dev_t * devp,rcb_t * rcb,rcb_t * rcb_to_manage,int tmf_type)1892 pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
1893                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
1894 {
1895 	int rval = PQI_STATUS_SUCCESS;
1896 	pqi_aio_tmf_req_t tmf_req;
1897 	ib_queue_t *op_ib_q = NULL;
1898 	boolean_t is_write;
1899 
1900 	memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
1901 
1902 	DBG_FUNC("IN\n");
1903 
1904 	tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
1905 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
1906 	tmf_req.req_id = rcb->tag;
1907 	tmf_req.error_idx = rcb->tag;
1908 	tmf_req.nexus = devp->ioaccel_handle;
1909 	/* memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); */
1910 	tmf_req.tmf = tmf_type;
1911 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
1912 	op_ib_q = &softs->op_aio_ib_q[0];
1913 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
1914 
1915 	uint64_t lun = rcb->cm_ccb->ccb_h.target_lun;
1916 	if (lun && (rcb->dvp->is_multi_lun)) {
1917 		int_to_scsilun(lun, tmf_req.lun);
1918 	}
1919 	else {
1920 		memset(tmf_req.lun, 0, sizeof(tmf_req.lun));
1921 	}
1922 
1923 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
1924 		tmf_req.req_id_to_manage = rcb_to_manage->tag;
1925 		tmf_req.nexus = rcb_to_manage->ioaccel_handle;
1926 	}
1927 
1928 	if (devp->raid_level == SA_RAID_1 ||
1929 	    devp->raid_level == SA_RAID_5 ||
1930 	    devp->raid_level == SA_RAID_6) {
1931 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK && is_write)
1932 			tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT;
1933 	}
1934 
1935 	DBG_WARN("aio tmf: iu_type=0x%x req_id_to_manage=0x%x\n",
1936 		tmf_req.header.iu_type, tmf_req.req_id_to_manage);
1937 	DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%u\n",
1938 		tmf_req.req_id, tmf_req.nexus, tmf_req.tmf, op_ib_q->q_id);
1939 
1940 	rcb->path = AIO_PATH;
1941 	rcb->req_pending = true;
1942 	/* Timedout tmf response goes here */
1943 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
1944 
1945 	rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
1946 	if (rval != PQI_STATUS_SUCCESS) {
1947 		DBG_ERR("Unable to submit command rval=%d\n", rval);
1948 		return rval;
1949 	}
1950 
1951 	rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
1952 	if (rval != PQI_STATUS_SUCCESS){
1953 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
1954 		rcb->status = rval;
1955 	}
1956 
1957 	if (rcb->status  != PQI_STATUS_SUCCESS) {
1958 		DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
1959 				"stat:0x%x\n", tmf_type, rcb->status);
1960 		rval = PQI_STATUS_FAILURE;
1961 	}
1962 
1963 	DBG_FUNC("OUT\n");
1964 	return rval;
1965 }
1966 
1967 /* Function used to submit a Raid TMF to the adapter */
1968 static int
pqisrc_send_raid_tmf(pqisrc_softstate_t * softs,pqi_scsi_dev_t * devp,rcb_t * rcb,rcb_t * rcb_to_manage,int tmf_type)1969 pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
1970                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
1971 {
1972 	int rval = PQI_STATUS_SUCCESS;
1973 	pqi_raid_tmf_req_t tmf_req;
1974 	ib_queue_t *op_ib_q = NULL;
1975 
1976 	memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
1977 
1978 	DBG_FUNC("IN\n");
1979 
1980 	tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
1981 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
1982 	tmf_req.req_id = rcb->tag;
1983 
1984 	memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
1985 	tmf_req.ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun;
1986 
1987 	tmf_req.tmf = tmf_type;
1988 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
1989 
1990 	/* Decide the queue where the tmf request should be submitted */
1991 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
1992 		tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid;
1993 		tmf_req.req_id_to_manage = rcb_to_manage->tag;
1994 	}
1995 
1996 	if (softs->timeout_in_tmf &&
1997 			tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
1998 		/* OS_TMF_TIMEOUT_SEC - 1 to accomodate driver processing */
1999 		tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
2000 		/* if OS tmf timeout is 0, set minimum value for timeout */
2001 		if (!tmf_req.timeout_in_sec)
2002 			tmf_req.timeout_in_sec = 1;
2003 	}
2004 
2005 	op_ib_q = &softs->op_raid_ib_q[0];
2006 
2007 	DBG_WARN("raid tmf: iu_type=0x%x req_id_to_manage=%d\n",
2008 		tmf_req.header.iu_type, tmf_req.req_id_to_manage);
2009 
2010 	rcb->path = RAID_PATH;
2011 	rcb->req_pending = true;
2012 	/* Timedout tmf response goes here */
2013 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
2014 
2015 	rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
2016 	if (rval != PQI_STATUS_SUCCESS) {
2017 		DBG_ERR("Unable to submit command rval=%d\n", rval);
2018 		return rval;
2019 	}
2020 
2021 	rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
2022 	if (rval != PQI_STATUS_SUCCESS) {
2023 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
2024 		rcb->status = rval;
2025 	}
2026 
2027 	if (rcb->status  != PQI_STATUS_SUCCESS) {
2028 		DBG_NOTE("Task Management failed tmf_type:%d "
2029 				"stat:0x%x\n", tmf_type, rcb->status);
2030 		rval = PQI_STATUS_FAILURE;
2031 	}
2032 
2033 	DBG_FUNC("OUT\n");
2034 	return rval;
2035 }
2036 
2037 void
dump_tmf_details(pqisrc_softstate_t * softs,rcb_t * rcb,char * msg)2038 dump_tmf_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg)
2039 {
2040 	uint32_t qid = rcb->req_q ? rcb->req_q->q_id : -1;
2041 
2042 	DBG_INFO("%s: pending=%d path=%d tag=0x%x=%u qid=%u timeout=%ums\n",
2043 		msg, rcb->req_pending, rcb->path, rcb->tag,
2044 		rcb->tag, qid, (uint32_t)rcb->host_timeout_ms);
2045 }
2046 
2047 int
pqisrc_send_tmf(pqisrc_softstate_t * softs,pqi_scsi_dev_t * devp,rcb_t * rcb,rcb_t * rcb_to_manage,int tmf_type)2048 pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
2049                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
2050 {
2051 	int ret = PQI_STATUS_SUCCESS;
2052 
2053 	DBG_FUNC("IN\n");
2054 
2055 	DBG_WARN("sending TMF. io outstanding=%u\n",
2056 		softs->max_outstanding_io - softs->taglist.num_elem);
2057 
2058 	rcb->is_abort_cmd_from_host = true;
2059 	rcb->softs = softs;
2060 
2061 	/* No target rcb for general purpose TMFs like LUN RESET */
2062 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
2063 	{
2064 		rcb_to_manage->host_wants_to_abort_this = true;
2065 		dump_tmf_details(softs, rcb_to_manage, "rcb_to_manage");
2066 	}
2067 
2068 
2069 	dump_tmf_details(softs, rcb, "rcb");
2070 
2071 	if(!devp->is_physical_device) {
2072 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
2073 			if(rcb_to_manage->path == AIO_PATH) {
2074 				if(devp->offload_enabled)
2075 					ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
2076 			}
2077 			else {
2078 				DBG_INFO("TASK ABORT not supported in raid\n");
2079 				ret = PQI_STATUS_FAILURE;
2080 			}
2081 		}
2082 		else {
2083 			ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
2084 		}
2085 	} else {
2086 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
2087 			ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
2088 		else
2089 			ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
2090 	}
2091 
2092 	DBG_FUNC("OUT\n");
2093 
2094 	return ret;
2095 }
2096 
2097 /* return index into the global (softs) counters based on raid level */
2098 static counter_types_t
get_counter_index(rcb_t * rcb)2099 get_counter_index(rcb_t *rcb)
2100 {
2101 	if (IS_AIO_PATH(rcb->dvp))
2102 		return HBA_COUNTER;
2103 
2104 	switch (rcb->dvp->raid_level) {
2105 		case SA_RAID_0:	return RAID0_COUNTER;
2106 		case SA_RAID_1:
2107 		case SA_RAID_ADM:	return RAID1_COUNTER;
2108 		case SA_RAID_5:	return RAID5_COUNTER;
2109 		case SA_RAID_6:	return RAID6_COUNTER;
2110 		case SA_RAID_UNKNOWN:
2111 		default:
2112 		{
2113 			static boolean_t asserted = false;
2114 			if (!asserted)
2115 			{
2116 				asserted = true;
2117 				ASSERT(rcb->path == RAID_PATH);
2118 				ASSERT(0);
2119 			}
2120 			return UNKNOWN_COUNTER;
2121 		}
2122 	}
2123 }
2124 
2125 /* return the counter type as ASCII-string */
2126 static char *
counter_type_to_raid_ascii(counter_types_t type)2127 counter_type_to_raid_ascii(counter_types_t type)
2128 {
2129 	switch (type)
2130 	{
2131 		case UNKNOWN_COUNTER: return "Unknown";
2132 		case HBA_COUNTER:		return "HbaPath";
2133 		case RAID0_COUNTER:	return "Raid0";
2134 		case RAID1_COUNTER:	return "Raid1";
2135 		case RAID5_COUNTER:	return "Raid5";
2136 		case RAID6_COUNTER:	return "Raid6";
2137 		default:					return "Unsupported";
2138 	}
2139 }
2140 
2141 /* return the path as ASCII-string */
2142 char *
io_path_to_ascii(IO_PATH_T path)2143 io_path_to_ascii(IO_PATH_T path)
2144 {
2145 	switch (path)
2146 	{
2147 		case AIO_PATH:		return "Aio";
2148 		case RAID_PATH:	return "Raid";
2149 		default:				return "Unknown";
2150 	}
2151 }
2152 
2153 /* return the io type as ASCII-string */
2154 static char *
io_type_to_ascii(io_type_t io_type)2155 io_type_to_ascii(io_type_t io_type)
2156 {
2157 	switch (io_type)
2158 	{
2159 		case UNKNOWN_IO_TYPE:	return "Unknown";
2160 		case READ_IO_TYPE:		return "Read";
2161 		case WRITE_IO_TYPE:		return "Write";
2162 		case NON_RW_IO_TYPE:		return "NonRW";
2163 		default:						return "Unsupported";
2164 	}
2165 }
2166 
2167 
2168 /* return the io type based on cdb */
2169 io_type_t
get_io_type_from_cdb(uint8_t * cdb)2170 get_io_type_from_cdb(uint8_t *cdb)
2171 {
2172 	if (cdb == NULL)
2173 		return UNKNOWN_IO_TYPE;
2174 
2175 	else if (pqisrc_cdb_is_read(cdb))
2176 		return READ_IO_TYPE;
2177 
2178 	else if (pqisrc_cdb_is_write(cdb))
2179 		return WRITE_IO_TYPE;
2180 
2181 	return NON_RW_IO_TYPE;
2182 }
2183 
2184 /* increment this counter based on path and read/write */
2185 OS_ATOMIC64_T
increment_this_counter(io_counters_t * pcounter,IO_PATH_T path,io_type_t io_type)2186 increment_this_counter(io_counters_t *pcounter, IO_PATH_T path, io_type_t io_type)
2187 {
2188 	OS_ATOMIC64_T ret_val;
2189 
2190 	if (path == AIO_PATH)
2191 	{
2192 		if (io_type == READ_IO_TYPE)
2193 			ret_val = OS_ATOMIC64_INC(&pcounter->aio_read_cnt);
2194 		else if (io_type == WRITE_IO_TYPE)
2195 			ret_val = OS_ATOMIC64_INC(&pcounter->aio_write_cnt);
2196 		else
2197 			ret_val = OS_ATOMIC64_INC(&pcounter->aio_non_read_write);
2198 	}
2199 	else
2200 	{
2201 		if (io_type == READ_IO_TYPE)
2202 			ret_val = OS_ATOMIC64_INC(&pcounter->raid_read_cnt);
2203 		else if (io_type == WRITE_IO_TYPE)
2204 			ret_val = OS_ATOMIC64_INC(&pcounter->raid_write_cnt);
2205 		else
2206 			ret_val = OS_ATOMIC64_INC(&pcounter->raid_non_read_write);
2207 	}
2208 
2209 	return ret_val;
2210 }
2211 
2212 /* increment appropriate counter(s) anytime we post a new request */
2213 static void
pqisrc_increment_io_counters(pqisrc_softstate_t * softs,rcb_t * rcb)2214 pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb)
2215 {
2216 	io_type_t io_type = get_io_type_from_cdb(rcb->cdbp);
2217 	counter_types_t type_index = get_counter_index(rcb);
2218 	io_counters_t *pcounter = &softs->counters[type_index];
2219 	OS_ATOMIC64_T ret_val;
2220 
2221 	ret_val = increment_this_counter(pcounter, rcb->path, io_type);
2222 
2223 #if 1 /* leave this enabled while we gain confidence for each io path */
2224 	if (ret_val == 1)
2225 	{
2226 		char *raid_type = counter_type_to_raid_ascii(type_index);
2227 		char *path = io_path_to_ascii(rcb->path);
2228 		char *io_ascii = io_type_to_ascii(io_type);
2229 
2230 		DBG_INFO("Got first path/type hit. "
2231 			"Path=%s RaidType=%s IoType=%s\n",
2232 			path, raid_type, io_ascii);
2233 	}
2234 #endif
2235 
2236 	/* @todo future: may want to make a per-dev counter */
2237 }
2238 
2239 /* public routine to print a particular counter with header msg */
2240 void
print_this_counter(pqisrc_softstate_t * softs,io_counters_t * pcounter,char * msg)2241 print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg)
2242 {
2243 	io_counters_t counter;
2244 	uint32_t percent_reads;
2245 	uint32_t percent_aio;
2246 
2247 	if (!softs->log_io_counters)
2248 		return;
2249 
2250 	/* Use a cached copy so percentages are based on the data that is printed */
2251 	memcpy(&counter, pcounter, sizeof(counter));
2252 
2253 	DBG_NOTE("Counter: %s (ptr=%p)\n", msg, pcounter);
2254 
2255 	percent_reads = CALC_PERCENT_VS(counter.aio_read_cnt + counter.raid_read_cnt,
2256 											counter.aio_write_cnt + counter.raid_write_cnt);
2257 
2258 	percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt + counter.aio_write_cnt,
2259 											counter.raid_read_cnt + counter.raid_write_cnt);
2260 
2261 	DBG_NOTE("   R/W Percentages: Reads=%3u%% AIO=%3u%%\n", percent_reads, percent_aio);
2262 
2263 	/* Print the Read counts */
2264 	percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt, counter.raid_read_cnt);
2265 	DBG_NOTE("   Reads : AIO=%8u(%3u%%) RAID=%8u\n",
2266 		(uint32_t)counter.aio_read_cnt, percent_aio, (uint32_t)counter.raid_read_cnt);
2267 
2268 	/* Print the Write counts */
2269 	percent_aio = CALC_PERCENT_VS(counter.aio_write_cnt, counter.raid_write_cnt);
2270 	DBG_NOTE("   Writes: AIO=%8u(%3u%%) RAID=%8u\n",
2271 		(uint32_t)counter.aio_write_cnt, percent_aio, (uint32_t)counter.raid_write_cnt);
2272 
2273 	/* Print the Non-Rw counts */
2274 	percent_aio = CALC_PERCENT_VS(counter.aio_non_read_write, counter.raid_non_read_write);
2275 	DBG_NOTE("   Non-RW: AIO=%8u(%3u%%) RAID=%8u\n",
2276 		(uint32_t)counter.aio_non_read_write, percent_aio, (uint32_t)counter.raid_non_read_write);
2277 }
2278 
2279 /* return true if buffer is all zeroes */
2280 boolean_t
is_buffer_zero(void * buffer,uint32_t size)2281 is_buffer_zero(void *buffer, uint32_t size)
2282 {
2283 	char *buf = buffer;
2284 	DWORD ii;
2285 
2286 	if (buffer == NULL || size == 0)
2287 		return false;
2288 
2289 	for (ii = 0; ii < size; ii++)
2290 	{
2291 		if (buf[ii] != 0x00)
2292 			return false;
2293 	}
2294 	return true;
2295 }
2296 
2297 /* public routine to print a all global counter types */
2298 void
print_all_counters(pqisrc_softstate_t * softs,uint32_t flags)2299 print_all_counters(pqisrc_softstate_t *softs, uint32_t flags)
2300 {
2301 	int ii;
2302 	io_counters_t *pcounter;
2303 	char *raid_type;
2304 
2305 	for (ii = 0; ii < MAX_IO_COUNTER; ii++)
2306 	{
2307 		pcounter = &softs->counters[ii];
2308 		raid_type = counter_type_to_raid_ascii(ii);
2309 
2310 		if ((flags & COUNTER_FLAG_ONLY_NON_ZERO) &&
2311 			is_buffer_zero(pcounter, sizeof(*pcounter)))
2312 		{
2313 			continue;
2314 		}
2315 
2316 		print_this_counter(softs, pcounter, raid_type);
2317 	}
2318 
2319 	if (flags & COUNTER_FLAG_CLEAR_COUNTS)
2320 	{
2321 		DBG_NOTE("Clearing all counters\n");
2322 		memset(softs->counters, 0, sizeof(softs->counters));
2323 	}
2324 }
2325