xref: /freebsd/sys/dev/smartpqi/smartpqi_request.c (revision 7ea28254ec5376b5deb86c136e1838d0134dbb22)
11e66f787SSean Bruno /*-
2*7ea28254SJohn Hall  * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
31e66f787SSean Bruno  *
41e66f787SSean Bruno  * Redistribution and use in source and binary forms, with or without
51e66f787SSean Bruno  * modification, are permitted provided that the following conditions
61e66f787SSean Bruno  * are met:
71e66f787SSean Bruno  * 1. Redistributions of source code must retain the above copyright
81e66f787SSean Bruno  *    notice, this list of conditions and the following disclaimer.
91e66f787SSean Bruno  * 2. Redistributions in binary form must reproduce the above copyright
101e66f787SSean Bruno  *    notice, this list of conditions and the following disclaimer in the
111e66f787SSean Bruno  *    documentation and/or other materials provided with the distribution.
121e66f787SSean Bruno  *
131e66f787SSean Bruno  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
141e66f787SSean Bruno  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
151e66f787SSean Bruno  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
161e66f787SSean Bruno  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
171e66f787SSean Bruno  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
181e66f787SSean Bruno  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
191e66f787SSean Bruno  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
201e66f787SSean Bruno  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
211e66f787SSean Bruno  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
221e66f787SSean Bruno  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
231e66f787SSean Bruno  * SUCH DAMAGE.
241e66f787SSean Bruno  */
251e66f787SSean Bruno 
261e66f787SSean Bruno 
271e66f787SSean Bruno #include "smartpqi_includes.h"
281e66f787SSean Bruno 
29*7ea28254SJohn Hall /* Change this if need to debug why AIO is not being used */
30*7ea28254SJohn Hall #define DBG_AIO DBG_IO
319fac68fcSPAPANI SRIKANTH 
321e66f787SSean Bruno #define SG_FLAG_LAST	0x40000000
331e66f787SSean Bruno #define SG_FLAG_CHAIN	0x80000000
341e66f787SSean Bruno 
35*7ea28254SJohn Hall /* Local Prototypes */
36*7ea28254SJohn Hall static void pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb);
37*7ea28254SJohn Hall static int fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l);
38*7ea28254SJohn Hall 
39*7ea28254SJohn Hall 
401e66f787SSean Bruno /* Subroutine to find out embedded sgl count in IU */
419fac68fcSPAPANI SRIKANTH static inline uint32_t
42*7ea28254SJohn Hall pqisrc_embedded_sgl_count(uint32_t elem_alloted, uint8_t iu_type)
431e66f787SSean Bruno {
44*7ea28254SJohn Hall 	uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
45*7ea28254SJohn Hall 
46*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
47*7ea28254SJohn Hall 
48*7ea28254SJohn Hall 	if (iu_type == PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST ||
49*7ea28254SJohn Hall 		iu_type == PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST)
50*7ea28254SJohn Hall 		embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO;
51*7ea28254SJohn Hall 
521e66f787SSean Bruno 	/**
531e66f787SSean Bruno 	calculate embedded sgl count using num_elem_alloted for IO
541e66f787SSean Bruno 	**/
551e66f787SSean Bruno 	if(elem_alloted - 1)
561e66f787SSean Bruno 		embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
57*7ea28254SJohn Hall 	/* DBG_IO("embedded_sgl_count :%d\n", embedded_sgl_count); */
581e66f787SSean Bruno 
59*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
601e66f787SSean Bruno 
611e66f787SSean Bruno 	return embedded_sgl_count;
621e66f787SSean Bruno 
631e66f787SSean Bruno }
641e66f787SSean Bruno 
651e66f787SSean Bruno /* Subroutine to find out contiguous free elem in IU */
669fac68fcSPAPANI SRIKANTH static inline uint32_t
679fac68fcSPAPANI SRIKANTH pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
681e66f787SSean Bruno {
691e66f787SSean Bruno 	uint32_t contiguous_free_elem = 0;
701e66f787SSean Bruno 
71*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
721e66f787SSean Bruno 
731e66f787SSean Bruno 	if(pi >= ci) {
741e66f787SSean Bruno 		contiguous_free_elem = (elem_in_q - pi);
751e66f787SSean Bruno 		if(ci == 0)
761e66f787SSean Bruno 			contiguous_free_elem -= 1;
771e66f787SSean Bruno 	} else {
781e66f787SSean Bruno 		contiguous_free_elem = (ci - pi - 1);
791e66f787SSean Bruno 	}
801e66f787SSean Bruno 
81*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
821e66f787SSean Bruno 
831e66f787SSean Bruno 	return contiguous_free_elem;
841e66f787SSean Bruno }
851e66f787SSean Bruno 
861e66f787SSean Bruno /* Subroutine to find out num of elements need for the request */
871e66f787SSean Bruno static uint32_t
88*7ea28254SJohn Hall pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count,
89*7ea28254SJohn Hall                 pqi_scsi_dev_t *devp, boolean_t is_write, IO_PATH_T io_path)
901e66f787SSean Bruno {
911e66f787SSean Bruno 	uint32_t num_sg;
921e66f787SSean Bruno 	uint32_t num_elem_required = 1;
93*7ea28254SJohn Hall 	uint32_t sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT;
94*7ea28254SJohn Hall 
95*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
96*7ea28254SJohn Hall 	DBG_IO("SGL_Count :%u\n",SG_Count);
97*7ea28254SJohn Hall 
98*7ea28254SJohn Hall 	if ((devp->raid_level == SA_RAID_5 || devp->raid_level == SA_RAID_6)
99*7ea28254SJohn Hall 		&& is_write && (io_path == AIO_PATH))
100*7ea28254SJohn Hall 		sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO;
1011e66f787SSean Bruno 	/********
1021e66f787SSean Bruno 	If SG_Count greater than max sg per IU i.e 4 or 68
1031e66f787SSean Bruno 	(4 is with out spanning or 68 is with spanning) chaining is required.
104*7ea28254SJohn Hall 	OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU_* then,
1051e66f787SSean Bruno 	on these two cases one element is enough.
1061e66f787SSean Bruno 	********/
107*7ea28254SJohn Hall 	if(SG_Count > softs->max_sg_per_spanning_cmd ||
108*7ea28254SJohn Hall 		SG_Count <= sg_in_first_iu)
1091e66f787SSean Bruno 		return num_elem_required;
1101e66f787SSean Bruno 	/*
1111e66f787SSean Bruno 	SGL Count Other Than First IU
1121e66f787SSean Bruno 	 */
113*7ea28254SJohn Hall 	num_sg = SG_Count - sg_in_first_iu;
1141e66f787SSean Bruno 	num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
115*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1161e66f787SSean Bruno 	return num_elem_required;
1171e66f787SSean Bruno }
1181e66f787SSean Bruno 
1191e66f787SSean Bruno /* Subroutine to build SG list for the IU submission*/
1209fac68fcSPAPANI SRIKANTH static boolean_t
1219fac68fcSPAPANI SRIKANTH pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
1221e66f787SSean Bruno 			uint32_t num_elem_alloted)
1231e66f787SSean Bruno {
1241e66f787SSean Bruno 	uint32_t i;
1251e66f787SSean Bruno 	uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
1261e66f787SSean Bruno 	sgt_t *sgt = sg_array;
1271e66f787SSean Bruno 	sgt_t *sg_chain = NULL;
1281e66f787SSean Bruno 	boolean_t partial = false;
1291e66f787SSean Bruno 
130*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1311e66f787SSean Bruno 
132*7ea28254SJohn Hall 	/* DBG_IO("SGL_Count :%d",num_sg); */
1331e66f787SSean Bruno 	if (0 == num_sg) {
1341e66f787SSean Bruno 		goto out;
1351e66f787SSean Bruno 	}
1361e66f787SSean Bruno 
137*7ea28254SJohn Hall 	if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted,
138*7ea28254SJohn Hall 		iu_hdr->iu_type)) {
139*7ea28254SJohn Hall 
1401e66f787SSean Bruno 		for (i = 0; i < num_sg; i++, sgt++) {
1411e66f787SSean Bruno 			sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
1421e66f787SSean Bruno 			sgt->len= OS_GET_IO_SG_LEN(rcb,i);
1431e66f787SSean Bruno 			sgt->flags= 0;
1441e66f787SSean Bruno 		}
1451e66f787SSean Bruno 
1461e66f787SSean Bruno 		sg_array[num_sg - 1].flags = SG_FLAG_LAST;
1471e66f787SSean Bruno 	} else {
1481e66f787SSean Bruno 	/**
1491e66f787SSean Bruno 	SGL Chaining
1501e66f787SSean Bruno 	**/
1511e66f787SSean Bruno 		sg_chain = rcb->sg_chain_virt;
1521e66f787SSean Bruno 		sgt->addr = rcb->sg_chain_dma;
1531e66f787SSean Bruno 		sgt->len = num_sg * sizeof(sgt_t);
1541e66f787SSean Bruno 		sgt->flags = SG_FLAG_CHAIN;
1551e66f787SSean Bruno 
1561e66f787SSean Bruno 		sgt = sg_chain;
1571e66f787SSean Bruno 		for (i = 0; i < num_sg; i++, sgt++) {
1581e66f787SSean Bruno 			sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
1591e66f787SSean Bruno 			sgt->len = OS_GET_IO_SG_LEN(rcb,i);
1601e66f787SSean Bruno 			sgt->flags = 0;
1611e66f787SSean Bruno 		}
1621e66f787SSean Bruno 
1631e66f787SSean Bruno 		sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
1641e66f787SSean Bruno 		num_sg = 1;
1651e66f787SSean Bruno 		partial = true;
1669fac68fcSPAPANI SRIKANTH 
1671e66f787SSean Bruno 	}
1681e66f787SSean Bruno out:
1691e66f787SSean Bruno 	iu_hdr->iu_length = num_sg * sizeof(sgt_t);
170*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1711e66f787SSean Bruno 	return partial;
1721e66f787SSean Bruno 
1731e66f787SSean Bruno }
1741e66f787SSean Bruno 
175*7ea28254SJohn Hall #if 0
176*7ea28254SJohn Hall static inline void
177*7ea28254SJohn Hall pqisrc_show_raid_req(pqisrc_softstate_t *softs, pqisrc_raid_req_t *raid_req)
178*7ea28254SJohn Hall {
179*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->header.iu_type",
180*7ea28254SJohn Hall 		raid_req->header.iu_type);
181*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%d\n", "raid_req->response_queue_id",
182*7ea28254SJohn Hall 		raid_req->response_queue_id);
183*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->request_id",
184*7ea28254SJohn Hall 		raid_req->request_id);
185*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->buffer_length",
186*7ea28254SJohn Hall 		raid_req->buffer_length);
187*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->task_attribute",
188*7ea28254SJohn Hall 		raid_req->task_attribute);
189*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%llx\n", "raid_req->lun_number",
190*7ea28254SJohn Hall 		*((long long unsigned int*)raid_req->lun_number));
191*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->error_index",
192*7ea28254SJohn Hall 		raid_req->error_index);
193*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%p\n", "raid_req->sg_descriptors[0].addr",
194*7ea28254SJohn Hall 		(void *)raid_req->sg_descriptors[0].addr);
195*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].len",
196*7ea28254SJohn Hall 		raid_req->sg_descriptors[0].len);
197*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].flags",
198*7ea28254SJohn Hall 		raid_req->sg_descriptors[0].flags);
199*7ea28254SJohn Hall }
200*7ea28254SJohn Hall #endif
201*7ea28254SJohn Hall 
2021e66f787SSean Bruno /*Subroutine used to Build the RAID request */
2031e66f787SSean Bruno static void
2041e66f787SSean Bruno pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
2051e66f787SSean Bruno  	pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
2061e66f787SSean Bruno {
207*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
2081e66f787SSean Bruno 
2091e66f787SSean Bruno 	raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
2101e66f787SSean Bruno 	raid_req->header.comp_feature = 0;
2111e66f787SSean Bruno 	raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
2121e66f787SSean Bruno 	raid_req->work_area[0] = 0;
2131e66f787SSean Bruno 	raid_req->work_area[1] = 0;
2141e66f787SSean Bruno 	raid_req->request_id = rcb->tag;
2151e66f787SSean Bruno 	raid_req->nexus_id = 0;
2161e66f787SSean Bruno 	raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
2171e66f787SSean Bruno 	memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
2181e66f787SSean Bruno 		sizeof(raid_req->lun_number));
2191e66f787SSean Bruno 	raid_req->protocol_spec = 0;
2201e66f787SSean Bruno 	raid_req->data_direction = rcb->data_dir;
2211e66f787SSean Bruno 	raid_req->reserved1 = 0;
2221e66f787SSean Bruno 	raid_req->fence = 0;
2231e66f787SSean Bruno 	raid_req->error_index = raid_req->request_id;
2241e66f787SSean Bruno 	raid_req->reserved2 = 0;
2251e66f787SSean Bruno 	raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
2261e66f787SSean Bruno 	raid_req->command_priority = 0;
2271e66f787SSean Bruno 	raid_req->reserved3 = 0;
2281e66f787SSean Bruno 	raid_req->reserved4 = 0;
2291e66f787SSean Bruno 	raid_req->reserved5 = 0;
230*7ea28254SJohn Hall 	raid_req->ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun;
2311e66f787SSean Bruno 
2321e66f787SSean Bruno 	/* As cdb and additional_cdb_bytes are contiguous,
2331e66f787SSean Bruno 	   update them in a single statement */
234*7ea28254SJohn Hall 	memcpy(raid_req->cmd.cdb, rcb->cdbp, rcb->cmdlen);
2351e66f787SSean Bruno #if 0
2361e66f787SSean Bruno 	DBG_IO("CDB :");
2371e66f787SSean Bruno 	for(i = 0; i < rcb->cmdlen ; i++)
2381e66f787SSean Bruno 		DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
2391e66f787SSean Bruno #endif
2401e66f787SSean Bruno 
2411e66f787SSean Bruno 	switch (rcb->cmdlen) {
2421e66f787SSean Bruno 		case 6:
2431e66f787SSean Bruno 		case 10:
2441e66f787SSean Bruno 		case 12:
2451e66f787SSean Bruno 		case 16:
2461e66f787SSean Bruno 			raid_req->additional_cdb_bytes_usage =
2471e66f787SSean Bruno 				PQI_ADDITIONAL_CDB_BYTES_0;
2481e66f787SSean Bruno 			break;
2491e66f787SSean Bruno 		case 20:
2501e66f787SSean Bruno 			raid_req->additional_cdb_bytes_usage =
2511e66f787SSean Bruno 				PQI_ADDITIONAL_CDB_BYTES_4;
2521e66f787SSean Bruno 			break;
2531e66f787SSean Bruno 		case 24:
2541e66f787SSean Bruno 			raid_req->additional_cdb_bytes_usage =
2551e66f787SSean Bruno 				PQI_ADDITIONAL_CDB_BYTES_8;
2561e66f787SSean Bruno 			break;
2571e66f787SSean Bruno 		case 28:
2581e66f787SSean Bruno 			raid_req->additional_cdb_bytes_usage =
2591e66f787SSean Bruno 				PQI_ADDITIONAL_CDB_BYTES_12;
2601e66f787SSean Bruno 			break;
2611e66f787SSean Bruno 		case 32:
2621e66f787SSean Bruno 		default: /* todo:review again */
2631e66f787SSean Bruno 			raid_req->additional_cdb_bytes_usage =
2641e66f787SSean Bruno 				PQI_ADDITIONAL_CDB_BYTES_16;
2651e66f787SSean Bruno 			break;
2661e66f787SSean Bruno 	}
2671e66f787SSean Bruno 
2681e66f787SSean Bruno 	/* Frame SGL Descriptor */
2691e66f787SSean Bruno 	raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
2701e66f787SSean Bruno 		&raid_req->header, num_elem_alloted);
2711e66f787SSean Bruno 
2721e66f787SSean Bruno 	raid_req->header.iu_length +=
2731e66f787SSean Bruno 			offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
2741e66f787SSean Bruno 
2751e66f787SSean Bruno #if 0
276*7ea28254SJohn Hall 	pqisrc_show_raid_req(softs, raid_req);
2771e66f787SSean Bruno #endif
2781e66f787SSean Bruno 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
2791e66f787SSean Bruno 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
2801e66f787SSean Bruno 	rcb->resp_qid = raid_req->response_queue_id;
2811e66f787SSean Bruno 
282*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
2831e66f787SSean Bruno 
2841e66f787SSean Bruno }
2851e66f787SSean Bruno 
286*7ea28254SJohn Hall /* We will need to expand this to handle different types of
287*7ea28254SJohn Hall  * aio request structures.
288*7ea28254SJohn Hall  */
289*7ea28254SJohn Hall #if 0
290*7ea28254SJohn Hall static inline void
291*7ea28254SJohn Hall pqisrc_show_aio_req(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req)
2921e66f787SSean Bruno {
293*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->header.iu_type",
294*7ea28254SJohn Hall 		aio_req->header.iu_type);
295*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->resp_qid",
296*7ea28254SJohn Hall 		aio_req->response_queue_id);
297*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->req_id",
298*7ea28254SJohn Hall 		aio_req->req_id);
299*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->nexus",
300*7ea28254SJohn Hall 		aio_req->nexus);
301*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->buf_len",
302*7ea28254SJohn Hall 		aio_req->buf_len);
303*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->cmd_flags.data_dir",
304*7ea28254SJohn Hall 		aio_req->cmd_flags.data_dir);
305*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->attr_prio.task_attr",
306*7ea28254SJohn Hall 		aio_req->attr_prio.task_attr);
307*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->err_idx",
308*7ea28254SJohn Hall 		aio_req->err_idx);
309*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->num_sg",
310*7ea28254SJohn Hall 		aio_req->num_sg);
311*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%p\n", "aio_req->sg_desc[0].addr",
312*7ea28254SJohn Hall 		(void *)aio_req->sg_desc[0].addr);
313*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].len",
314*7ea28254SJohn Hall 		aio_req->sg_desc[0].len);
315*7ea28254SJohn Hall 	DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].flags",
316*7ea28254SJohn Hall 		aio_req->sg_desc[0].flags);
317*7ea28254SJohn Hall }
318*7ea28254SJohn Hall #endif
3191e66f787SSean Bruno 
320*7ea28254SJohn Hall void
321*7ea28254SJohn Hall int_to_scsilun(uint64_t lun, uint8_t *scsi_lun)
322*7ea28254SJohn Hall {
323*7ea28254SJohn Hall    int i;
324*7ea28254SJohn Hall 
325*7ea28254SJohn Hall 	memset(scsi_lun, 0, sizeof(lun));
326*7ea28254SJohn Hall         for (i = 0; i < sizeof(lun); i += 2) {
327*7ea28254SJohn Hall                 scsi_lun[i] = (lun >> 8) & 0xFF;
328*7ea28254SJohn Hall                 scsi_lun[i+1] = lun & 0xFF;
329*7ea28254SJohn Hall                 lun = lun >> 16;
330*7ea28254SJohn Hall         }
331*7ea28254SJohn Hall }
332*7ea28254SJohn Hall 
333*7ea28254SJohn Hall 
334*7ea28254SJohn Hall /*Subroutine used to populate AIO IUs. */
335*7ea28254SJohn Hall void
336*7ea28254SJohn Hall pqisrc_build_aio_common(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req,
337*7ea28254SJohn Hall                         rcb_t *rcb, uint32_t num_elem_alloted)
338*7ea28254SJohn Hall {
339*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
3401e66f787SSean Bruno 	aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
3411e66f787SSean Bruno 	aio_req->header.comp_feature = 0;
3421e66f787SSean Bruno 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
3431e66f787SSean Bruno 	aio_req->work_area[0] = 0;
3441e66f787SSean Bruno 	aio_req->work_area[1] = 0;
3451e66f787SSean Bruno 	aio_req->req_id = rcb->tag;
3461e66f787SSean Bruno 	aio_req->res1[0] = 0;
3471e66f787SSean Bruno 	aio_req->res1[1] = 0;
3481e66f787SSean Bruno 	aio_req->nexus = rcb->ioaccel_handle;
3491e66f787SSean Bruno 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
350*7ea28254SJohn Hall 	aio_req->cmd_flags.data_dir = rcb->data_dir;
351*7ea28254SJohn Hall 	aio_req->cmd_flags.mem_type = 0;
352*7ea28254SJohn Hall 	aio_req->cmd_flags.fence = 0;
353*7ea28254SJohn Hall 	aio_req->cmd_flags.res2 = 0;
354*7ea28254SJohn Hall 	aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
355*7ea28254SJohn Hall 	aio_req->attr_prio.cmd_prio = 0;
356*7ea28254SJohn Hall 	aio_req->attr_prio.res3 = 0;
3571e66f787SSean Bruno 	aio_req->err_idx = aio_req->req_id;
3581e66f787SSean Bruno 	aio_req->cdb_len = rcb->cmdlen;
3599fac68fcSPAPANI SRIKANTH 
360b17f4335SSean Bruno 	if (rcb->cmdlen > sizeof(aio_req->cdb))
361b17f4335SSean Bruno 		rcb->cmdlen = sizeof(aio_req->cdb);
3621e66f787SSean Bruno 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
3631e66f787SSean Bruno 	memset(aio_req->res4, 0, sizeof(aio_req->res4));
3641e66f787SSean Bruno 
365*7ea28254SJohn Hall 	uint64_t lun = rcb->cm_ccb->ccb_h.target_lun;
366*7ea28254SJohn Hall 	if (lun && (rcb->dvp->is_multi_lun)) {
367*7ea28254SJohn Hall 		int_to_scsilun(lun, aio_req->lun);
368*7ea28254SJohn Hall 	}
369*7ea28254SJohn Hall 	else {
370*7ea28254SJohn Hall 		memset(aio_req->lun, 0, sizeof(aio_req->lun));
371*7ea28254SJohn Hall 	}
372*7ea28254SJohn Hall 
373*7ea28254SJohn Hall 	/* handle encryption fields */
3741e66f787SSean Bruno 	if (rcb->encrypt_enable == true) {
375*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable = true;
376*7ea28254SJohn Hall 		aio_req->encrypt_key_index =
377*7ea28254SJohn Hall 			LE_16(rcb->enc_info.data_enc_key_index);
378*7ea28254SJohn Hall 		aio_req->encrypt_twk_low =
379*7ea28254SJohn Hall 			LE_32(rcb->enc_info.encrypt_tweak_lower);
380*7ea28254SJohn Hall 		aio_req->encrypt_twk_high =
381*7ea28254SJohn Hall 			LE_32(rcb->enc_info.encrypt_tweak_upper);
3821e66f787SSean Bruno 	} else {
383*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable = 0;
3841e66f787SSean Bruno 		aio_req->encrypt_key_index = 0;
3851e66f787SSean Bruno 		aio_req->encrypt_twk_high = 0;
3861e66f787SSean Bruno 		aio_req->encrypt_twk_low = 0;
3871e66f787SSean Bruno 	}
3881e66f787SSean Bruno 	/* Frame SGL Descriptor */
389*7ea28254SJohn Hall 	aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
3901e66f787SSean Bruno 		&aio_req->header, num_elem_alloted);
3911e66f787SSean Bruno 
3921e66f787SSean Bruno 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
3931e66f787SSean Bruno 
394*7ea28254SJohn Hall 	/* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
3951e66f787SSean Bruno 
3961e66f787SSean Bruno 	aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
3971e66f787SSean Bruno 		sizeof(iu_header_t);
398*7ea28254SJohn Hall 	/* set completion and error handlers. */
3991e66f787SSean Bruno 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
4001e66f787SSean Bruno 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
4011e66f787SSean Bruno 	rcb->resp_qid = aio_req->response_queue_id;
402*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
4031e66f787SSean Bruno 
404*7ea28254SJohn Hall }
405*7ea28254SJohn Hall /*Subroutine used to show standard AIO IU fields */
406*7ea28254SJohn Hall void
407*7ea28254SJohn Hall pqisrc_show_aio_common(pqisrc_softstate_t *softs, rcb_t *rcb,
408*7ea28254SJohn Hall                        pqi_aio_req_t *aio_req)
409*7ea28254SJohn Hall {
410*7ea28254SJohn Hall #ifdef DEBUG_AIO
411*7ea28254SJohn Hall 	DBG_INFO("AIO IU Content, tag# 0x%08x", rcb->tag);
412*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "iu_type",	aio_req->header.iu_type);
413*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "comp_feat",	aio_req->header.comp_feature);
414*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "length",	aio_req->header.iu_length);
415*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "resp_qid",	aio_req->response_queue_id);
416*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "req_id",	aio_req->req_id);
417*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "nexus",	aio_req->nexus);
418*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "buf_len",	aio_req->buf_len);
419*7ea28254SJohn Hall 	DBG_INFO("%15s:\n", "cmd_flags");
420*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "data_dir",	aio_req->cmd_flags.data_dir);
421*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "partial",	aio_req->cmd_flags.partial);
422*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "mem_type",	aio_req->cmd_flags.mem_type);
423*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "fence",	aio_req->cmd_flags.fence);
424*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "encryption",
425*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable);
426*7ea28254SJohn Hall 	DBG_INFO("%15s:\n", "attr_prio");
427*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "task_attr",	aio_req->attr_prio.task_attr);
428*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "cmd_prio",	aio_req->attr_prio.cmd_prio);
429*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "dek_index",	aio_req->encrypt_key_index);
430*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "tweak_lower",	aio_req->encrypt_twk_low);
431*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "tweak_upper",	aio_req->encrypt_twk_high);
432*7ea28254SJohn Hall 	pqisrc_show_cdb(softs, "AIOC", rcb, aio_req->cdb);
433*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "err_idx",	aio_req->err_idx);
434*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "num_sg",	aio_req->num_sg);
435*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "cdb_len",	aio_req->cdb_len);
436*7ea28254SJohn Hall #if 0
437*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "lun",		aio_req->lun);
438*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
439*7ea28254SJohn Hall 		(void *)aio_req->sg_desc[0].addr);
440*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
441*7ea28254SJohn Hall 		aio_req->sg_desc[0].len);
442*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
443*7ea28254SJohn Hall 		aio_req->sg_desc[0].flags);
444*7ea28254SJohn Hall #endif
445*7ea28254SJohn Hall #endif /* DEBUG_AIO */
446*7ea28254SJohn Hall }
4471e66f787SSean Bruno 
448*7ea28254SJohn Hall /*Subroutine used to populate AIO RAID 1 write bypass IU. */
449*7ea28254SJohn Hall void
450*7ea28254SJohn Hall pqisrc_build_aio_R1_write(pqisrc_softstate_t *softs,
451*7ea28254SJohn Hall 	pqi_aio_raid1_write_req_t *aio_req, rcb_t *rcb,
452*7ea28254SJohn Hall 	uint32_t num_elem_alloted)
453*7ea28254SJohn Hall {
454*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
455*7ea28254SJohn Hall 	if (!rcb->dvp) {
456*7ea28254SJohn Hall 		DBG_WARN("%s: DEBUG: dev ptr is null", __func__);
457*7ea28254SJohn Hall 		return;
458*7ea28254SJohn Hall 	}
459*7ea28254SJohn Hall 	if (!rcb->dvp->raid_map) {
460*7ea28254SJohn Hall 		DBG_WARN("%s: DEBUG: raid_map is null", __func__);
461*7ea28254SJohn Hall 		return;
462*7ea28254SJohn Hall 	}
463*7ea28254SJohn Hall 
464*7ea28254SJohn Hall 	aio_req->header.iu_type = PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST;
465*7ea28254SJohn Hall 	aio_req->header.comp_feature = 0;
466*7ea28254SJohn Hall 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
467*7ea28254SJohn Hall 	aio_req->work_area[0] = 0;
468*7ea28254SJohn Hall 	aio_req->work_area[1] = 0;
469*7ea28254SJohn Hall 	aio_req->req_id = rcb->tag;
470*7ea28254SJohn Hall 	aio_req->volume_id =  (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF);
471*7ea28254SJohn Hall 	aio_req->nexus_1 = rcb->it_nexus[0];
472*7ea28254SJohn Hall 	aio_req->nexus_2 = rcb->it_nexus[1];
473*7ea28254SJohn Hall 	aio_req->nexus_3 = rcb->it_nexus[2];
474*7ea28254SJohn Hall 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
475*7ea28254SJohn Hall 	aio_req->cmd_flags.data_dir = rcb->data_dir;
476*7ea28254SJohn Hall 	aio_req->cmd_flags.mem_type = 0;
477*7ea28254SJohn Hall 	aio_req->cmd_flags.fence = 0;
478*7ea28254SJohn Hall 	aio_req->cmd_flags.res2 = 0;
479*7ea28254SJohn Hall 	aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
480*7ea28254SJohn Hall 	aio_req->attr_prio.cmd_prio = 0;
481*7ea28254SJohn Hall 	aio_req->attr_prio.res3 = 0;
482*7ea28254SJohn Hall 	if(rcb->cmdlen > sizeof(aio_req->cdb))
483*7ea28254SJohn Hall 		rcb->cmdlen = sizeof(aio_req->cdb);
484*7ea28254SJohn Hall 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
485*7ea28254SJohn Hall 	aio_req->err_idx = aio_req->req_id;
486*7ea28254SJohn Hall 	aio_req->cdb_len = rcb->cmdlen;
487*7ea28254SJohn Hall 	aio_req->num_drives = LE_16(rcb->dvp->raid_map->layout_map_count);
488*7ea28254SJohn Hall 
489*7ea28254SJohn Hall 	/* handle encryption fields */
490*7ea28254SJohn Hall 	if (rcb->encrypt_enable == true) {
491*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable = true;
492*7ea28254SJohn Hall 		aio_req->encrypt_key_index =
493*7ea28254SJohn Hall 			LE_16(rcb->enc_info.data_enc_key_index);
494*7ea28254SJohn Hall 		aio_req->encrypt_twk_low =
495*7ea28254SJohn Hall 			LE_32(rcb->enc_info.encrypt_tweak_lower);
496*7ea28254SJohn Hall 		aio_req->encrypt_twk_high =
497*7ea28254SJohn Hall 			LE_32(rcb->enc_info.encrypt_tweak_upper);
498*7ea28254SJohn Hall 	} else {
499*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable = 0;
500*7ea28254SJohn Hall 		aio_req->encrypt_key_index = 0;
501*7ea28254SJohn Hall 		aio_req->encrypt_twk_high = 0;
502*7ea28254SJohn Hall 		aio_req->encrypt_twk_low = 0;
503*7ea28254SJohn Hall 	}
504*7ea28254SJohn Hall 	/* Frame SGL Descriptor */
505*7ea28254SJohn Hall 	aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
506*7ea28254SJohn Hall 		&aio_req->header, num_elem_alloted);
507*7ea28254SJohn Hall 
508*7ea28254SJohn Hall 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
509*7ea28254SJohn Hall 
510*7ea28254SJohn Hall 	/* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
511*7ea28254SJohn Hall 
512*7ea28254SJohn Hall 	aio_req->header.iu_length += offsetof(pqi_aio_raid1_write_req_t, sg_desc) -
513*7ea28254SJohn Hall 		sizeof(iu_header_t);
514*7ea28254SJohn Hall 
515*7ea28254SJohn Hall 	/* set completion and error handlers. */
516*7ea28254SJohn Hall 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
517*7ea28254SJohn Hall 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
518*7ea28254SJohn Hall 	rcb->resp_qid = aio_req->response_queue_id;
519*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
520*7ea28254SJohn Hall 
521*7ea28254SJohn Hall }
522*7ea28254SJohn Hall 
523*7ea28254SJohn Hall /*Subroutine used to show AIO RAID1 Write bypass IU fields */
524*7ea28254SJohn Hall void
525*7ea28254SJohn Hall pqisrc_show_aio_R1_write(pqisrc_softstate_t *softs, rcb_t *rcb,
526*7ea28254SJohn Hall 	pqi_aio_raid1_write_req_t *aio_req)
527*7ea28254SJohn Hall {
528*7ea28254SJohn Hall 
529*7ea28254SJohn Hall #ifdef DEBUG_AIO
530*7ea28254SJohn Hall 	DBG_INFO("AIO RAID1 Write IU Content, tag# 0x%08x", rcb->tag);
531*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "iu_type",	aio_req->header.iu_type);
532*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "comp_feat",	aio_req->header.comp_feature);
533*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "length",	aio_req->header.iu_length);
534*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "resp_qid",	aio_req->response_queue_id);
535*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "req_id",	aio_req->req_id);
536*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "volume_id",	aio_req->volume_id);
537*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "nexus_1",	aio_req->nexus_1);
538*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "nexus_2",	aio_req->nexus_2);
539*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "nexus_3",	aio_req->nexus_3);
540*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "buf_len",	aio_req->buf_len);
541*7ea28254SJohn Hall 	DBG_INFO("%15s:\n", "cmd_flags");
542*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "data_dir",	aio_req->cmd_flags.data_dir);
543*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "partial",	aio_req->cmd_flags.partial);
544*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "mem_type",	aio_req->cmd_flags.mem_type);
545*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "fence",	aio_req->cmd_flags.fence);
546*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "encryption",
547*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable);
548*7ea28254SJohn Hall 	DBG_INFO("%15s:\n", "attr_prio");
549*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "task_attr",	aio_req->attr_prio.task_attr);
550*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "cmd_prio",	aio_req->attr_prio.cmd_prio);
551*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "dek_index",	aio_req->encrypt_key_index);
552*7ea28254SJohn Hall 	pqisrc_show_cdb(softs, "AIOR1W", rcb, aio_req->cdb);
553*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "err_idx",	aio_req->err_idx);
554*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "num_sg",	aio_req->num_sg);
555*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "cdb_len",	aio_req->cdb_len);
556*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "num_drives",	aio_req->num_drives);
557*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "tweak_lower",	aio_req->encrypt_twk_low);
558*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "tweak_upper",	aio_req->encrypt_twk_high);
559*7ea28254SJohn Hall #if 0
560*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
561*7ea28254SJohn Hall 		(void *)aio_req->sg_desc[0].addr);
562*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
563*7ea28254SJohn Hall 		aio_req->sg_desc[0].len);
564*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
565*7ea28254SJohn Hall 		aio_req->sg_desc[0].flags);
566*7ea28254SJohn Hall #endif
567*7ea28254SJohn Hall #endif /* DEBUG_AIO */
568*7ea28254SJohn Hall }
569*7ea28254SJohn Hall 
570*7ea28254SJohn Hall /*Subroutine used to populate AIO Raid5 or 6 write bypass IU */
571*7ea28254SJohn Hall void
572*7ea28254SJohn Hall pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *softs,
573*7ea28254SJohn Hall 	pqi_aio_raid5or6_write_req_t *aio_req, rcb_t *rcb,
574*7ea28254SJohn Hall 	uint32_t num_elem_alloted)
575*7ea28254SJohn Hall {
576*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
577*7ea28254SJohn Hall 	uint32_t index;
578*7ea28254SJohn Hall 	unsigned num_data_disks;
579*7ea28254SJohn Hall 	unsigned num_metadata_disks;
580*7ea28254SJohn Hall 	unsigned total_disks;
581*7ea28254SJohn Hall 	num_data_disks = LE_16(rcb->dvp->raid_map->data_disks_per_row);
582*7ea28254SJohn Hall 	num_metadata_disks = LE_16(rcb->dvp->raid_map->metadata_disks_per_row);
583*7ea28254SJohn Hall 	total_disks = num_data_disks + num_metadata_disks;
584*7ea28254SJohn Hall 
585*7ea28254SJohn Hall 	index = PQISRC_DIV_ROUND_UP(rcb->raid_map_index + 1, total_disks);
586*7ea28254SJohn Hall 	index *= total_disks;
587*7ea28254SJohn Hall 	index -= num_metadata_disks;
588*7ea28254SJohn Hall 
589*7ea28254SJohn Hall 	switch (rcb->dvp->raid_level) {
590*7ea28254SJohn Hall 	case SA_RAID_5:
591*7ea28254SJohn Hall 		aio_req->header.iu_type =
592*7ea28254SJohn Hall 		PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST;
593*7ea28254SJohn Hall 		break;
594*7ea28254SJohn Hall 	case SA_RAID_6:
595*7ea28254SJohn Hall 		aio_req->header.iu_type =
596*7ea28254SJohn Hall 		PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST;
597*7ea28254SJohn Hall 		break;
598*7ea28254SJohn Hall 	default:
599*7ea28254SJohn Hall 		DBG_ERR("WRONG RAID TYPE FOR FUNCTION\n");
600*7ea28254SJohn Hall 	}
601*7ea28254SJohn Hall 	aio_req->header.comp_feature = 0;
602*7ea28254SJohn Hall 	aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
603*7ea28254SJohn Hall 	aio_req->work_area[0] = 0;
604*7ea28254SJohn Hall 	aio_req->work_area[1] = 0;
605*7ea28254SJohn Hall 	aio_req->req_id = rcb->tag;
606*7ea28254SJohn Hall 	aio_req->volume_id =  (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF);
607*7ea28254SJohn Hall 	aio_req->data_it_nexus = rcb->dvp->raid_map->dev_data[rcb->raid_map_index].ioaccel_handle;
608*7ea28254SJohn Hall 	aio_req->p_parity_it_nexus =
609*7ea28254SJohn Hall 		rcb->dvp->raid_map->dev_data[index].ioaccel_handle;
610*7ea28254SJohn Hall 	if (aio_req->header.iu_type ==
611*7ea28254SJohn Hall 		PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST) {
612*7ea28254SJohn Hall 			aio_req->q_parity_it_nexus =
613*7ea28254SJohn Hall 				rcb->dvp->raid_map->dev_data[index + 1].ioaccel_handle;
614*7ea28254SJohn Hall 	}
615*7ea28254SJohn Hall 	aio_req->xor_multiplier =
616*7ea28254SJohn Hall 		rcb->dvp->raid_map->dev_data[rcb->raid_map_index].xor_mult[1];
617*7ea28254SJohn Hall 	aio_req->row = rcb->row_num;
618*7ea28254SJohn Hall 	/*aio_req->reserved = rcb->row_num * rcb->blocks_per_row +
619*7ea28254SJohn Hall 		rcb->dvp->raid_map->disk_starting_blk;*/
620*7ea28254SJohn Hall 	aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
621*7ea28254SJohn Hall 	aio_req->cmd_flags.data_dir = rcb->data_dir;
622*7ea28254SJohn Hall 	aio_req->cmd_flags.mem_type = 0;
623*7ea28254SJohn Hall 	aio_req->cmd_flags.fence = 0;
624*7ea28254SJohn Hall 	aio_req->cmd_flags.res2 = 0;
625*7ea28254SJohn Hall 	aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb);
626*7ea28254SJohn Hall 	aio_req->attr_prio.cmd_prio = 0;
627*7ea28254SJohn Hall 	aio_req->attr_prio.res3 = 0;
628*7ea28254SJohn Hall 	if (rcb->cmdlen > sizeof(aio_req->cdb))
629*7ea28254SJohn Hall 		rcb->cmdlen = sizeof(aio_req->cdb);
630*7ea28254SJohn Hall 	memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
631*7ea28254SJohn Hall 	aio_req->err_idx = aio_req->req_id;
632*7ea28254SJohn Hall 	aio_req->cdb_len = rcb->cmdlen;
633*7ea28254SJohn Hall #if 0
634*7ea28254SJohn Hall 	/* Stubbed out for later */
635*7ea28254SJohn Hall 	aio_req->header.iu_type = iu_type;
636*7ea28254SJohn Hall 	aio_req->data_it_nexus = ;
637*7ea28254SJohn Hall 	aio_req->p_parity_it_nexus = ;
638*7ea28254SJohn Hall 	aio_req->q_parity_it_nexus = ;
639*7ea28254SJohn Hall 	aio_req->row = ;
640*7ea28254SJohn Hall 	aio_req->stripe_lba = ;
641*7ea28254SJohn Hall #endif
642*7ea28254SJohn Hall 	/* handle encryption fields */
643*7ea28254SJohn Hall 	if (rcb->encrypt_enable == true) {
644*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable = true;
645*7ea28254SJohn Hall 		aio_req->encrypt_key_index =
646*7ea28254SJohn Hall 			LE_16(rcb->enc_info.data_enc_key_index);
647*7ea28254SJohn Hall 		aio_req->encrypt_twk_low =
648*7ea28254SJohn Hall 			LE_32(rcb->enc_info.encrypt_tweak_lower);
649*7ea28254SJohn Hall 		aio_req->encrypt_twk_high =
650*7ea28254SJohn Hall 			LE_32(rcb->enc_info.encrypt_tweak_upper);
651*7ea28254SJohn Hall 	} else {
652*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable = 0;
653*7ea28254SJohn Hall 		aio_req->encrypt_key_index = 0;
654*7ea28254SJohn Hall 		aio_req->encrypt_twk_high = 0;
655*7ea28254SJohn Hall 		aio_req->encrypt_twk_low = 0;
656*7ea28254SJohn Hall 	}
657*7ea28254SJohn Hall 	/* Frame SGL Descriptor */
658*7ea28254SJohn Hall 	aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
659*7ea28254SJohn Hall 		&aio_req->header, num_elem_alloted);
660*7ea28254SJohn Hall 
661*7ea28254SJohn Hall 	aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
662*7ea28254SJohn Hall 
663*7ea28254SJohn Hall 	/* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */
664*7ea28254SJohn Hall 
665*7ea28254SJohn Hall 	aio_req->header.iu_length += offsetof(pqi_aio_raid5or6_write_req_t, sg_desc) -
666*7ea28254SJohn Hall 		sizeof(iu_header_t);
667*7ea28254SJohn Hall 	/* set completion and error handlers. */
668*7ea28254SJohn Hall 	rcb->success_cmp_callback = pqisrc_process_io_response_success;
669*7ea28254SJohn Hall 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
670*7ea28254SJohn Hall 	rcb->resp_qid = aio_req->response_queue_id;
671*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
672*7ea28254SJohn Hall 
673*7ea28254SJohn Hall }
674*7ea28254SJohn Hall 
675*7ea28254SJohn Hall /*Subroutine used to show AIO RAID5/6 Write bypass IU fields */
676*7ea28254SJohn Hall void
677*7ea28254SJohn Hall pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t *rcb,
678*7ea28254SJohn Hall 	pqi_aio_raid5or6_write_req_t *aio_req)
679*7ea28254SJohn Hall {
680*7ea28254SJohn Hall #ifdef DEBUG_AIO
681*7ea28254SJohn Hall 	DBG_INFO("AIO RAID5or6 Write IU Content, tag# 0x%08x\n", rcb->tag);
682*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "iu_type",	aio_req->header.iu_type);
683*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "comp_feat",	aio_req->header.comp_feature);
684*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "length",	aio_req->header.iu_length);
685*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "resp_qid",	aio_req->response_queue_id);
686*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "req_id",	aio_req->req_id);
687*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "volume_id",	aio_req->volume_id);
688*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "data_it_nexus",
689*7ea28254SJohn Hall 		aio_req->data_it_nexus);
690*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "p_parity_it_nexus",
691*7ea28254SJohn Hall 		aio_req->p_parity_it_nexus);
692*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "q_parity_it_nexus",
693*7ea28254SJohn Hall 		aio_req->q_parity_it_nexus);
694*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "buf_len",	aio_req->buf_len);
695*7ea28254SJohn Hall 	DBG_INFO("%15s:\n", "cmd_flags");
696*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "data_dir",	aio_req->cmd_flags.data_dir);
697*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "partial",	aio_req->cmd_flags.partial);
698*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "mem_type",	aio_req->cmd_flags.mem_type);
699*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "fence",	aio_req->cmd_flags.fence);
700*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "encryption",
701*7ea28254SJohn Hall 		aio_req->cmd_flags.encrypt_enable);
702*7ea28254SJohn Hall 	DBG_INFO("%15s:\n", "attr_prio");
703*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "task_attr",	aio_req->attr_prio.task_attr);
704*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "cmd_prio",	aio_req->attr_prio.cmd_prio);
705*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "dek_index",	aio_req->encrypt_key_index);
706*7ea28254SJohn Hall 	pqisrc_show_cdb(softs, "AIOR56W", rcb, aio_req->cdb);
707*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "err_idx",	aio_req->err_idx);
708*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "num_sg",	aio_req->num_sg);
709*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "cdb_len",	aio_req->cdb_len);
710*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "tweak_lower",	aio_req->encrypt_twk_low);
711*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "tweak_upper",	aio_req->encrypt_twk_high);
712*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%lx\n", "row",	aio_req->row);
713*7ea28254SJohn Hall #if 0
714*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%lx\n", "stripe_lba",	aio_req->stripe_lba);
715*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr",
716*7ea28254SJohn Hall 		(void *)aio_req->sg_desc[0].addr);
717*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len",
718*7ea28254SJohn Hall 		aio_req->sg_desc[0].len);
719*7ea28254SJohn Hall 	DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags",
720*7ea28254SJohn Hall 		aio_req->sg_desc[0].flags);
721*7ea28254SJohn Hall #endif
722*7ea28254SJohn Hall #endif /* DEBUG_AIO */
723*7ea28254SJohn Hall }
724*7ea28254SJohn Hall 
725*7ea28254SJohn Hall /* Is the cdb a read command? */
726*7ea28254SJohn Hall boolean_t
727*7ea28254SJohn Hall pqisrc_cdb_is_read(uint8_t *cdb)
728*7ea28254SJohn Hall {
729*7ea28254SJohn Hall 	if (cdb[0] == SCMD_READ_6 || cdb[0] == SCMD_READ_10 ||
730*7ea28254SJohn Hall 		cdb[0] == SCMD_READ_12 || cdb[0] == SCMD_READ_16)
731*7ea28254SJohn Hall 		return true;
732*7ea28254SJohn Hall 	return false;
733*7ea28254SJohn Hall }
734*7ea28254SJohn Hall 
735*7ea28254SJohn Hall /* Is the cdb a write command? */
736*7ea28254SJohn Hall boolean_t
737*7ea28254SJohn Hall pqisrc_cdb_is_write(uint8_t *cdb)
738*7ea28254SJohn Hall {
739*7ea28254SJohn Hall 	if (cdb == NULL)
740*7ea28254SJohn Hall 		return false;
741*7ea28254SJohn Hall 
742*7ea28254SJohn Hall 	if (cdb[0] == SCMD_WRITE_6 || cdb[0] == SCMD_WRITE_10 ||
743*7ea28254SJohn Hall 		cdb[0] == SCMD_WRITE_12 || cdb[0] == SCMD_WRITE_16)
744*7ea28254SJohn Hall 		return true;
745*7ea28254SJohn Hall 	return false;
746*7ea28254SJohn Hall }
747*7ea28254SJohn Hall 
748*7ea28254SJohn Hall /*Subroutine used to show the AIO request */
749*7ea28254SJohn Hall void
750*7ea28254SJohn Hall pqisrc_show_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
751*7ea28254SJohn Hall 	pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
752*7ea28254SJohn Hall {
753*7ea28254SJohn Hall 	boolean_t is_write;
754*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
755*7ea28254SJohn Hall 
756*7ea28254SJohn Hall 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
757*7ea28254SJohn Hall 
758*7ea28254SJohn Hall 	if (!is_write) {
759*7ea28254SJohn Hall 		pqisrc_show_aio_common(softs, rcb, aio_req);
760*7ea28254SJohn Hall 		goto out;
761*7ea28254SJohn Hall 	}
762*7ea28254SJohn Hall 
763*7ea28254SJohn Hall 	switch (rcb->dvp->raid_level) {
764*7ea28254SJohn Hall 	case SA_RAID_0:
765*7ea28254SJohn Hall 		pqisrc_show_aio_common(softs, rcb, aio_req);
766*7ea28254SJohn Hall 		break;
767*7ea28254SJohn Hall 	case SA_RAID_1:
768*7ea28254SJohn Hall 	case SA_RAID_ADM:
769*7ea28254SJohn Hall 		pqisrc_show_aio_R1_write(softs, rcb,
770*7ea28254SJohn Hall 			(pqi_aio_raid1_write_req_t *)aio_req);
771*7ea28254SJohn Hall 		break;
772*7ea28254SJohn Hall 	case SA_RAID_5:
773*7ea28254SJohn Hall 	case SA_RAID_6:
774*7ea28254SJohn Hall 		pqisrc_show_aio_R5or6_write(softs, rcb,
775*7ea28254SJohn Hall 			(pqi_aio_raid5or6_write_req_t *)aio_req);
776*7ea28254SJohn Hall 		break;
777*7ea28254SJohn Hall 	}
778*7ea28254SJohn Hall 
779*7ea28254SJohn Hall out:
780*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
781*7ea28254SJohn Hall 
782*7ea28254SJohn Hall }
783*7ea28254SJohn Hall 
784*7ea28254SJohn Hall 
785*7ea28254SJohn Hall void
786*7ea28254SJohn Hall pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
787*7ea28254SJohn Hall 	pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
788*7ea28254SJohn Hall {
789*7ea28254SJohn Hall 	boolean_t is_write;
790*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
791*7ea28254SJohn Hall 
792*7ea28254SJohn Hall 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
793*7ea28254SJohn Hall 
794*7ea28254SJohn Hall 	if (is_write) {
795*7ea28254SJohn Hall 		switch (rcb->dvp->raid_level) {
796*7ea28254SJohn Hall 		case SA_RAID_0:
797*7ea28254SJohn Hall 			pqisrc_build_aio_common(softs, aio_req,
798*7ea28254SJohn Hall 				rcb, num_elem_alloted);
799*7ea28254SJohn Hall 			break;
800*7ea28254SJohn Hall 		case SA_RAID_1:
801*7ea28254SJohn Hall 		case SA_RAID_ADM:
802*7ea28254SJohn Hall 			pqisrc_build_aio_R1_write(softs,
803*7ea28254SJohn Hall 				(pqi_aio_raid1_write_req_t *)aio_req,
804*7ea28254SJohn Hall 				rcb, num_elem_alloted);
805*7ea28254SJohn Hall 
806*7ea28254SJohn Hall 			break;
807*7ea28254SJohn Hall 		case SA_RAID_5:
808*7ea28254SJohn Hall 		case SA_RAID_6:
809*7ea28254SJohn Hall 			pqisrc_build_aio_R5or6_write(softs,
810*7ea28254SJohn Hall 				(pqi_aio_raid5or6_write_req_t *)aio_req,
811*7ea28254SJohn Hall 				rcb, num_elem_alloted);
812*7ea28254SJohn Hall 			break;
813*7ea28254SJohn Hall 		}
814*7ea28254SJohn Hall 	} else {
815*7ea28254SJohn Hall 		pqisrc_build_aio_common(softs, aio_req, rcb, num_elem_alloted);
816*7ea28254SJohn Hall 	}
817*7ea28254SJohn Hall 
818*7ea28254SJohn Hall 	pqisrc_show_aio_io(softs, rcb, aio_req, num_elem_alloted);
819*7ea28254SJohn Hall 
820*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
821*7ea28254SJohn Hall }
822*7ea28254SJohn Hall 
823*7ea28254SJohn Hall /*
824*7ea28254SJohn Hall  *	Return true from this function to prevent AIO from handling this request.
825*7ea28254SJohn Hall  *	True is returned if the request is determined to be part of a stream, or
826*7ea28254SJohn Hall  *	if the controller does not handle AIO at the appropriate RAID level.
827*7ea28254SJohn Hall  */
828*7ea28254SJohn Hall static boolean_t
829*7ea28254SJohn Hall pqisrc_is_parity_write_stream(pqisrc_softstate_t *softs, rcb_t *rcb)
830*7ea28254SJohn Hall {
831*7ea28254SJohn Hall 	os_ticks_t oldest_ticks;
832*7ea28254SJohn Hall 	uint8_t lru_index;
833*7ea28254SJohn Hall 	int i;
834*7ea28254SJohn Hall 	int rc;
835*7ea28254SJohn Hall 	pqi_scsi_dev_t *device;
836*7ea28254SJohn Hall 	struct pqi_stream_data *pqi_stream_data;
837*7ea28254SJohn Hall 	aio_req_locator_t loc;
838*7ea28254SJohn Hall 
839*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
840*7ea28254SJohn Hall 
841*7ea28254SJohn Hall 	rc = fill_lba_for_scsi_rw(softs, rcb->cdbp , &loc);
842*7ea28254SJohn Hall 	if (rc != PQI_STATUS_SUCCESS) {
843*7ea28254SJohn Hall 		return false;
844*7ea28254SJohn Hall 	}
845*7ea28254SJohn Hall 
846*7ea28254SJohn Hall 	/* check writes only */
847*7ea28254SJohn Hall 	if (!pqisrc_cdb_is_write(rcb->cdbp)) {
848*7ea28254SJohn Hall 	    return false;
849*7ea28254SJohn Hall 	}
850*7ea28254SJohn Hall 
851*7ea28254SJohn Hall 	if (!softs->enable_stream_detection) {
852*7ea28254SJohn Hall 		return false;
853*7ea28254SJohn Hall 	}
854*7ea28254SJohn Hall 
855*7ea28254SJohn Hall 	device = rcb->dvp;
856*7ea28254SJohn Hall 	if (!device) {
857*7ea28254SJohn Hall 		return false;
858*7ea28254SJohn Hall 	}
859*7ea28254SJohn Hall 
860*7ea28254SJohn Hall 	/*
861*7ea28254SJohn Hall 	 * check for R5/R6 streams.
862*7ea28254SJohn Hall 	 */
863*7ea28254SJohn Hall 	if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) {
864*7ea28254SJohn Hall 		return false;
865*7ea28254SJohn Hall 	}
866*7ea28254SJohn Hall 
867*7ea28254SJohn Hall 	/*
868*7ea28254SJohn Hall 	 * If controller does not support AIO R{5,6} writes, need to send
869*7ea28254SJohn Hall 	 * requests down non-aio path.
870*7ea28254SJohn Hall 	 */
871*7ea28254SJohn Hall 	if ((device->raid_level == SA_RAID_5 && !softs->aio_raid5_write_bypass) ||
872*7ea28254SJohn Hall 		(device->raid_level == SA_RAID_6 && !softs->aio_raid6_write_bypass)) {
873*7ea28254SJohn Hall 		return true;
874*7ea28254SJohn Hall 	}
875*7ea28254SJohn Hall 
876*7ea28254SJohn Hall 	lru_index = 0;
877*7ea28254SJohn Hall 	oldest_ticks = INT_MAX;
878*7ea28254SJohn Hall 	for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
879*7ea28254SJohn Hall 		pqi_stream_data = &device->stream_data[i];
880*7ea28254SJohn Hall 		/*
881*7ea28254SJohn Hall 		 * check for adjacent request or request is within
882*7ea28254SJohn Hall 		 * the previous request.
883*7ea28254SJohn Hall 		 */
884*7ea28254SJohn Hall 		if ((pqi_stream_data->next_lba &&
885*7ea28254SJohn Hall 			loc.block.first >= pqi_stream_data->next_lba) &&
886*7ea28254SJohn Hall 			loc.block.first <= pqi_stream_data->next_lba +
887*7ea28254SJohn Hall 				loc.block.cnt) {
888*7ea28254SJohn Hall 			pqi_stream_data->next_lba = loc.block.first +
889*7ea28254SJohn Hall 				loc.block.cnt;
890*7ea28254SJohn Hall 			pqi_stream_data->last_accessed = TICKS;
891*7ea28254SJohn Hall 			return true;
892*7ea28254SJohn Hall 		}
893*7ea28254SJohn Hall 
894*7ea28254SJohn Hall 		/* unused entry */
895*7ea28254SJohn Hall 		if (pqi_stream_data->last_accessed == 0) {
896*7ea28254SJohn Hall 			lru_index = i;
897*7ea28254SJohn Hall 			break;
898*7ea28254SJohn Hall 		}
899*7ea28254SJohn Hall 
900*7ea28254SJohn Hall 		/* Find entry with oldest last accessed time */
901*7ea28254SJohn Hall 		if (pqi_stream_data->last_accessed <= oldest_ticks) {
902*7ea28254SJohn Hall 			oldest_ticks = pqi_stream_data->last_accessed;
903*7ea28254SJohn Hall 			lru_index = i;
904*7ea28254SJohn Hall 		}
905*7ea28254SJohn Hall 	}
906*7ea28254SJohn Hall 
907*7ea28254SJohn Hall 	/*
908*7ea28254SJohn Hall 	 * Set LRU entry
909*7ea28254SJohn Hall 	 */
910*7ea28254SJohn Hall 	pqi_stream_data = &device->stream_data[lru_index];
911*7ea28254SJohn Hall 	pqi_stream_data->last_accessed = TICKS;
912*7ea28254SJohn Hall 	pqi_stream_data->next_lba = loc.block.first + loc.block.cnt;
913*7ea28254SJohn Hall 
914*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
915*7ea28254SJohn Hall 
916*7ea28254SJohn Hall 	return false;
917*7ea28254SJohn Hall }
918*7ea28254SJohn Hall 
919*7ea28254SJohn Hall /**
920*7ea28254SJohn Hall  Determine if a request is eligible for AIO.  Build/map
921*7ea28254SJohn Hall  the request if using AIO path to a RAID volume.
922*7ea28254SJohn Hall 
923*7ea28254SJohn Hall  return the path that should be used for this request
924*7ea28254SJohn Hall */
925*7ea28254SJohn Hall static IO_PATH_T
926*7ea28254SJohn Hall determine_io_path_build_bypass(pqisrc_softstate_t *softs,rcb_t *rcb)
927*7ea28254SJohn Hall {
928*7ea28254SJohn Hall 	IO_PATH_T io_path = AIO_PATH;
929*7ea28254SJohn Hall 	pqi_scsi_dev_t *devp = rcb->dvp;
930*7ea28254SJohn Hall 	int ret = PQI_STATUS_FAILURE;
931*7ea28254SJohn Hall 
932*7ea28254SJohn Hall 	/* Default to using the host CDB directly (will be used if targeting RAID
933*7ea28254SJohn Hall 		path or HBA mode */
934*7ea28254SJohn Hall 	rcb->cdbp = OS_GET_CDBP(rcb);
935*7ea28254SJohn Hall 
936*7ea28254SJohn Hall 	if(!rcb->aio_retry) {
937*7ea28254SJohn Hall 
938*7ea28254SJohn Hall 		/**  IO for Physical Drive, Send in AIO PATH **/
939*7ea28254SJohn Hall 		if(IS_AIO_PATH(devp)) {
940*7ea28254SJohn Hall 			rcb->ioaccel_handle = devp->ioaccel_handle;
941*7ea28254SJohn Hall 			return io_path;
942*7ea28254SJohn Hall 		}
943*7ea28254SJohn Hall 
944*7ea28254SJohn Hall 		/** IO for RAID Volume, ByPass IO, Send in AIO PATH unless part of stream **/
945*7ea28254SJohn Hall 		if (devp->offload_enabled && !pqisrc_is_parity_write_stream(softs, rcb)) {
946*7ea28254SJohn Hall 			ret = pqisrc_build_scsi_cmd_raidbypass(softs, devp, rcb);
947*7ea28254SJohn Hall 		}
948*7ea28254SJohn Hall 
949*7ea28254SJohn Hall 		if (PQI_STATUS_FAILURE == ret) {
950*7ea28254SJohn Hall 			io_path = RAID_PATH;
951*7ea28254SJohn Hall 		} else {
952*7ea28254SJohn Hall 			ASSERT(rcb->cdbp == rcb->bypass_cdb);
953*7ea28254SJohn Hall 		}
954*7ea28254SJohn Hall 	} else {
955*7ea28254SJohn Hall 		/* Retrying failed AIO IO */
956*7ea28254SJohn Hall 		io_path = RAID_PATH;
957*7ea28254SJohn Hall 	}
958*7ea28254SJohn Hall 
959*7ea28254SJohn Hall 	return io_path;
960*7ea28254SJohn Hall }
961*7ea28254SJohn Hall 
962*7ea28254SJohn Hall uint8_t
963*7ea28254SJohn Hall pqisrc_get_aio_data_direction(rcb_t *rcb)
964*7ea28254SJohn Hall {
965*7ea28254SJohn Hall         switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) {
966*7ea28254SJohn Hall         case CAM_DIR_IN:  	return SOP_DATA_DIR_FROM_DEVICE;
967*7ea28254SJohn Hall         case CAM_DIR_OUT:   	return SOP_DATA_DIR_TO_DEVICE;
968*7ea28254SJohn Hall         case CAM_DIR_NONE:  	return SOP_DATA_DIR_NONE;
969*7ea28254SJohn Hall         default:		return SOP_DATA_DIR_UNKNOWN;
970*7ea28254SJohn Hall         }
971*7ea28254SJohn Hall }
972*7ea28254SJohn Hall 
973*7ea28254SJohn Hall uint8_t
974*7ea28254SJohn Hall pqisrc_get_raid_data_direction(rcb_t *rcb)
975*7ea28254SJohn Hall {
976*7ea28254SJohn Hall         switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) {
977*7ea28254SJohn Hall         case CAM_DIR_IN:  	return SOP_DATA_DIR_TO_DEVICE;
978*7ea28254SJohn Hall         case CAM_DIR_OUT:   	return SOP_DATA_DIR_FROM_DEVICE;
979*7ea28254SJohn Hall         case CAM_DIR_NONE:  	return SOP_DATA_DIR_NONE;
980*7ea28254SJohn Hall         default:		return SOP_DATA_DIR_UNKNOWN;
981*7ea28254SJohn Hall         }
9821e66f787SSean Bruno }
9831e66f787SSean Bruno 
9841e66f787SSean Bruno /* Function used to build and send RAID/AIO */
9859fac68fcSPAPANI SRIKANTH int
9869fac68fcSPAPANI SRIKANTH pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
9871e66f787SSean Bruno {
9881e66f787SSean Bruno 	ib_queue_t *ib_q_array = softs->op_aio_ib_q;
9891e66f787SSean Bruno 	ib_queue_t *ib_q = NULL;
9901e66f787SSean Bruno 	char *ib_iu = NULL;
991*7ea28254SJohn Hall 	IO_PATH_T io_path;
9921e66f787SSean Bruno 	uint32_t TraverseCount = 0;
9931e66f787SSean Bruno 	int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
9941e66f787SSean Bruno 	int qindex = first_qindex;
9951e66f787SSean Bruno 	uint32_t num_op_ib_q = softs->num_op_aio_ibq;
9961e66f787SSean Bruno 	uint32_t num_elem_needed;
9971e66f787SSean Bruno 	uint32_t num_elem_alloted = 0;
9981e66f787SSean Bruno 	pqi_scsi_dev_t *devp = rcb->dvp;
999*7ea28254SJohn Hall 	boolean_t is_write;
10001e66f787SSean Bruno 
1001*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
10021e66f787SSean Bruno 
1003*7ea28254SJohn Hall 	/* Note: this will determine if the request is eligble for AIO */
1004*7ea28254SJohn Hall 	io_path = determine_io_path_build_bypass(softs, rcb);
1005*7ea28254SJohn Hall 
1006*7ea28254SJohn Hall 	if (io_path == RAID_PATH)
1007*7ea28254SJohn Hall 	{
1008*7ea28254SJohn Hall 		/* Update direction for RAID path */
1009*7ea28254SJohn Hall 		rcb->data_dir = pqisrc_get_raid_data_direction(rcb);
10109fac68fcSPAPANI SRIKANTH 		num_op_ib_q = softs->num_op_raid_ibq;
10119fac68fcSPAPANI SRIKANTH 		ib_q_array = softs->op_raid_ib_q;
10129fac68fcSPAPANI SRIKANTH 	}
1013*7ea28254SJohn Hall 	else {
1014*7ea28254SJohn Hall 		rcb->data_dir = pqisrc_get_aio_data_direction(rcb);
1015*7ea28254SJohn Hall 		if (rcb->data_dir == SOP_DATA_DIR_UNKNOWN) {
1016*7ea28254SJohn Hall 			DBG_ERR("Unknown Direction\n");
1017*7ea28254SJohn Hall 		}
1018*7ea28254SJohn Hall 	}
10191e66f787SSean Bruno 
1020*7ea28254SJohn Hall 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
1021*7ea28254SJohn Hall 	/* coverity[unchecked_value] */
1022*7ea28254SJohn Hall 	num_elem_needed = pqisrc_num_elem_needed(softs,
1023*7ea28254SJohn Hall 		OS_GET_IO_SG_COUNT(rcb), devp, is_write, io_path);
1024*7ea28254SJohn Hall 	DBG_IO("num_elem_needed :%u",num_elem_needed);
10251e66f787SSean Bruno 
10261e66f787SSean Bruno 	do {
10271e66f787SSean Bruno 		uint32_t num_elem_available;
10281e66f787SSean Bruno 		ib_q = (ib_q_array + qindex);
10291e66f787SSean Bruno 		PQI_LOCK(&ib_q->lock);
10301e66f787SSean Bruno 		num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
10311e66f787SSean Bruno 					*(ib_q->ci_virt_addr), ib_q->num_elem);
10321e66f787SSean Bruno 
1033*7ea28254SJohn Hall 		DBG_IO("num_elem_avialable :%u\n",num_elem_available);
10341e66f787SSean Bruno 		if(num_elem_available >= num_elem_needed) {
10351e66f787SSean Bruno 			num_elem_alloted = num_elem_needed;
10361e66f787SSean Bruno 			break;
10371e66f787SSean Bruno 		}
10381e66f787SSean Bruno 		DBG_IO("Current queue is busy! Hop to next queue\n");
10391e66f787SSean Bruno 
10401e66f787SSean Bruno 		PQI_UNLOCK(&ib_q->lock);
10411e66f787SSean Bruno 		qindex = (qindex + 1) % num_op_ib_q;
10421e66f787SSean Bruno 		if(qindex == first_qindex) {
10431e66f787SSean Bruno 			if (num_elem_needed == 1)
10441e66f787SSean Bruno 				break;
10451e66f787SSean Bruno 			TraverseCount += 1;
10461e66f787SSean Bruno 			num_elem_needed = 1;
10471e66f787SSean Bruno 		}
10481e66f787SSean Bruno 	}while(TraverseCount < 2);
10491e66f787SSean Bruno 
1050*7ea28254SJohn Hall 	DBG_IO("num_elem_alloted :%u",num_elem_alloted);
10511e66f787SSean Bruno 	if (num_elem_alloted == 0) {
10521e66f787SSean Bruno 		DBG_WARN("OUT: IB Queues were full\n");
10531e66f787SSean Bruno 		return PQI_STATUS_QFULL;
10541e66f787SSean Bruno 	}
10551e66f787SSean Bruno 
10569fac68fcSPAPANI SRIKANTH 	pqisrc_increment_device_active_io(softs,devp);
10579fac68fcSPAPANI SRIKANTH 
10581e66f787SSean Bruno 	/* Get IB Queue Slot address to build IU */
10591e66f787SSean Bruno 	ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
10601e66f787SSean Bruno 
10611e66f787SSean Bruno 	if(io_path == AIO_PATH) {
1062*7ea28254SJohn Hall 		/* Fill in the AIO IU per request and raid type */
10631e66f787SSean Bruno 		pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t *)ib_iu,
10641e66f787SSean Bruno 			num_elem_alloted);
10651e66f787SSean Bruno 	} else {
10661e66f787SSean Bruno 		/** Build RAID structure **/
10671e66f787SSean Bruno 		pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t *)ib_iu,
10681e66f787SSean Bruno 			num_elem_alloted);
10691e66f787SSean Bruno 	}
10701e66f787SSean Bruno 
10711e66f787SSean Bruno 	rcb->req_pending = true;
10729fac68fcSPAPANI SRIKANTH 	rcb->req_q = ib_q;
10739fac68fcSPAPANI SRIKANTH 	rcb->path = io_path;
10741e66f787SSean Bruno 
1075*7ea28254SJohn Hall 	pqisrc_increment_io_counters(softs, rcb);
1076*7ea28254SJohn Hall 
10771e66f787SSean Bruno 	/* Update the local PI */
10781e66f787SSean Bruno 	ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
10791e66f787SSean Bruno 
1080*7ea28254SJohn Hall 	DBG_IO("ib_q->pi_local : %x\n", ib_q->pi_local);
1081*7ea28254SJohn Hall 	DBG_IO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
10821e66f787SSean Bruno 
10831e66f787SSean Bruno 	/* Inform the fw about the new IU */
10841e66f787SSean Bruno 	PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
10851e66f787SSean Bruno 
10861e66f787SSean Bruno 	PQI_UNLOCK(&ib_q->lock);
1087*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
10881e66f787SSean Bruno 	return PQI_STATUS_SUCCESS;
10891e66f787SSean Bruno }
10901e66f787SSean Bruno 
10911e66f787SSean Bruno /* Subroutine used to set encryption info as part of RAID bypass IO*/
10929fac68fcSPAPANI SRIKANTH static inline void
10939fac68fcSPAPANI SRIKANTH pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
10949fac68fcSPAPANI SRIKANTH 		struct raid_map *raid_map, uint64_t first_block)
10951e66f787SSean Bruno {
10961e66f787SSean Bruno 	uint32_t volume_blk_size;
10971e66f787SSean Bruno 
10981e66f787SSean Bruno 	/*
10991e66f787SSean Bruno 	 * Set the encryption tweak values based on logical block address.
11001e66f787SSean Bruno 	 * If the block size is 512, the tweak value is equal to the LBA.
11011e66f787SSean Bruno 	 * For other block sizes, tweak value is (LBA * block size) / 512.
11021e66f787SSean Bruno 	 */
11031e66f787SSean Bruno 	volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
11041e66f787SSean Bruno 	if (volume_blk_size != 512)
11051e66f787SSean Bruno 		first_block = (first_block * volume_blk_size) / 512;
11061e66f787SSean Bruno 
11071e66f787SSean Bruno 	enc_info->data_enc_key_index =
11081e66f787SSean Bruno 		GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
11091e66f787SSean Bruno 	enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
11101e66f787SSean Bruno 	enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
11111e66f787SSean Bruno }
11121e66f787SSean Bruno 
1113*7ea28254SJohn Hall 
1114*7ea28254SJohn Hall /*
1115*7ea28254SJohn Hall  * Attempt to perform offload RAID mapping for a logical volume I/O.
1116*7ea28254SJohn Hall  */
1117*7ea28254SJohn Hall 
1118*7ea28254SJohn Hall #define HPSA_RAID_0		0
1119*7ea28254SJohn Hall #define HPSA_RAID_4		1
1120*7ea28254SJohn Hall #define HPSA_RAID_1		2	/* also used for RAID 10 */
1121*7ea28254SJohn Hall #define HPSA_RAID_5		3	/* also used for RAID 50 */
1122*7ea28254SJohn Hall #define HPSA_RAID_51		4
1123*7ea28254SJohn Hall #define HPSA_RAID_6		5	/* also used for RAID 60 */
1124*7ea28254SJohn Hall #define HPSA_RAID_ADM		6	/* also used for RAID 1+0 ADM */
1125*7ea28254SJohn Hall #define HPSA_RAID_MAX		HPSA_RAID_ADM
1126*7ea28254SJohn Hall #define HPSA_RAID_UNKNOWN	0xff
1127*7ea28254SJohn Hall 
11281e66f787SSean Bruno /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
1129*7ea28254SJohn Hall static int
1130*7ea28254SJohn Hall fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l)
11319fac68fcSPAPANI SRIKANTH {
11329fac68fcSPAPANI SRIKANTH 
1133*7ea28254SJohn Hall 	if (!l) {
1134*7ea28254SJohn Hall 		DBG_INFO("No locator ptr: AIO ineligible");
1135*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
1136*7ea28254SJohn Hall 	}
1137*7ea28254SJohn Hall 
1138*7ea28254SJohn Hall 	if (cdb == NULL)
1139*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
1140*7ea28254SJohn Hall 
11411e66f787SSean Bruno 	switch (cdb[0]) {
11421e66f787SSean Bruno 	case SCMD_WRITE_6:
1143*7ea28254SJohn Hall 		l->is_write = true;
1144*7ea28254SJohn Hall 		/* coverity[fallthrough] */
11451e66f787SSean Bruno 	case SCMD_READ_6:
1146*7ea28254SJohn Hall 		l->block.first = (uint64_t)(((cdb[1] & 0x1F) << 16) |
11471e66f787SSean Bruno 				(cdb[2] << 8) | cdb[3]);
1148*7ea28254SJohn Hall 		l->block.cnt = (uint32_t)cdb[4];
1149*7ea28254SJohn Hall 		if (l->block.cnt == 0)
1150*7ea28254SJohn Hall 				l->block.cnt = 256; /*blkcnt 0 means 256 */
11511e66f787SSean Bruno 		break;
11521e66f787SSean Bruno 	case SCMD_WRITE_10:
1153*7ea28254SJohn Hall 		l->is_write = true;
1154*7ea28254SJohn Hall 		/* coverity[fallthrough] */
11551e66f787SSean Bruno 	case SCMD_READ_10:
1156*7ea28254SJohn Hall 		l->block.first = (uint64_t)GET_BE32(&cdb[2]);
1157*7ea28254SJohn Hall 		l->block.cnt = (uint32_t)GET_BE16(&cdb[7]);
11581e66f787SSean Bruno 		break;
11591e66f787SSean Bruno 	case SCMD_WRITE_12:
1160*7ea28254SJohn Hall 		l->is_write = true;
1161*7ea28254SJohn Hall 		/* coverity[fallthrough] */
11621e66f787SSean Bruno 	case SCMD_READ_12:
1163*7ea28254SJohn Hall 		l->block.first = (uint64_t)GET_BE32(&cdb[2]);
1164*7ea28254SJohn Hall 		l->block.cnt = GET_BE32(&cdb[6]);
11651e66f787SSean Bruno 		break;
11661e66f787SSean Bruno 	case SCMD_WRITE_16:
1167*7ea28254SJohn Hall 		l->is_write = true;
1168*7ea28254SJohn Hall 		/* coverity[fallthrough] */
11691e66f787SSean Bruno 	case SCMD_READ_16:
1170*7ea28254SJohn Hall 		l->block.first = GET_BE64(&cdb[2]);
1171*7ea28254SJohn Hall 		l->block.cnt = GET_BE32(&cdb[10]);
11721e66f787SSean Bruno 		break;
11731e66f787SSean Bruno 	default:
11741e66f787SSean Bruno 		/* Process via normal I/O path. */
1175*7ea28254SJohn Hall 		DBG_AIO("NOT read or write 6/10/12/16: AIO ineligible");
11761e66f787SSean Bruno 		return PQI_STATUS_FAILURE;
11771e66f787SSean Bruno 	}
11781e66f787SSean Bruno 	return PQI_STATUS_SUCCESS;
11791e66f787SSean Bruno }
11801e66f787SSean Bruno 
1181*7ea28254SJohn Hall 
1182*7ea28254SJohn Hall /* determine whether writes to certain types of RAID are supported. */
1183*7ea28254SJohn Hall inline boolean_t
1184*7ea28254SJohn Hall pqisrc_is_supported_write(pqisrc_softstate_t *softs,
1185*7ea28254SJohn Hall 	pqi_scsi_dev_t *device)
1186*7ea28254SJohn Hall {
1187*7ea28254SJohn Hall 
1188*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1189*7ea28254SJohn Hall 
1190*7ea28254SJohn Hall 	/* Raid0 was always supported */
1191*7ea28254SJohn Hall 	if (device->raid_level == SA_RAID_0)
1192*7ea28254SJohn Hall 		return true;
1193*7ea28254SJohn Hall 
1194*7ea28254SJohn Hall 	/* module params for individual adv. aio write features may be on,
1195*7ea28254SJohn Hall 	 * which affects ALL controllers, but some controllers
1196*7ea28254SJohn Hall 	 * do not support adv. aio write.
1197*7ea28254SJohn Hall 	 */
1198*7ea28254SJohn Hall 	if (!softs->adv_aio_capable)
1199*7ea28254SJohn Hall 		return false;
1200*7ea28254SJohn Hall 
1201*7ea28254SJohn Hall 	/* if the raid write bypass feature is turned on,
1202*7ea28254SJohn Hall 	 * then the write is supported.
1203*7ea28254SJohn Hall 	 */
1204*7ea28254SJohn Hall 	switch (device->raid_level) {
1205*7ea28254SJohn Hall 	case SA_RAID_1:
1206*7ea28254SJohn Hall 	case SA_RAID_ADM:
1207*7ea28254SJohn Hall 		if (softs->aio_raid1_write_bypass)
1208*7ea28254SJohn Hall 			return true;
1209*7ea28254SJohn Hall 		break;
1210*7ea28254SJohn Hall 	case SA_RAID_5:
1211*7ea28254SJohn Hall 		if (softs->aio_raid5_write_bypass)
1212*7ea28254SJohn Hall 			return true;
1213*7ea28254SJohn Hall 		break;
1214*7ea28254SJohn Hall 	case SA_RAID_6:
1215*7ea28254SJohn Hall 		if (softs->aio_raid6_write_bypass)
1216*7ea28254SJohn Hall 			return true;
1217*7ea28254SJohn Hall 	}
1218*7ea28254SJohn Hall 
1219*7ea28254SJohn Hall 	/* otherwise, it must be an unsupported write. */
1220*7ea28254SJohn Hall 	DBG_IO("AIO ineligible: write not supported for raid type\n");
1221*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1222*7ea28254SJohn Hall 	return false;
1223*7ea28254SJohn Hall 
1224*7ea28254SJohn Hall }
1225*7ea28254SJohn Hall 
1226*7ea28254SJohn Hall /* check for zero-byte transfers, invalid blocks, and wraparound */
1227*7ea28254SJohn Hall static inline boolean_t
1228*7ea28254SJohn Hall pqisrc_is_invalid_block(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1229*7ea28254SJohn Hall {
1230*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1231*7ea28254SJohn Hall 
1232*7ea28254SJohn Hall 	if (l->block.cnt == 0) {
1233*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible: blk_cnt=0\n");
1234*7ea28254SJohn Hall 		DBG_FUNC("OUT\n");
1235*7ea28254SJohn Hall 		return true;
1236*7ea28254SJohn Hall 	}
1237*7ea28254SJohn Hall 
1238*7ea28254SJohn Hall 	if (l->block.last < l->block.first ||
1239*7ea28254SJohn Hall 		l->block.last >=
1240*7ea28254SJohn Hall 			GET_LE64((uint8_t *)&l->raid_map->volume_blk_cnt)) {
1241*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible: last block < first\n");
1242*7ea28254SJohn Hall 		DBG_FUNC("OUT\n");
1243*7ea28254SJohn Hall 		return true;
1244*7ea28254SJohn Hall 	}
1245*7ea28254SJohn Hall 
1246*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1247*7ea28254SJohn Hall 	return false;
1248*7ea28254SJohn Hall }
1249*7ea28254SJohn Hall 
1250*7ea28254SJohn Hall /* Compute various attributes of request's location */
1251*7ea28254SJohn Hall static inline boolean_t
1252*7ea28254SJohn Hall pqisrc_calc_disk_params(pqisrc_softstate_t *softs, aio_req_locator_t *l,  rcb_t *rcb)
1253*7ea28254SJohn Hall {
1254*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1255*7ea28254SJohn Hall 
1256*7ea28254SJohn Hall 	/* grab #disks, strip size, and layout map count from raid map */
1257*7ea28254SJohn Hall 	l->row.data_disks =
1258*7ea28254SJohn Hall 		GET_LE16((uint8_t *)&l->raid_map->data_disks_per_row);
1259*7ea28254SJohn Hall 	l->strip_sz =
1260*7ea28254SJohn Hall 		GET_LE16((uint8_t *)(&l->raid_map->strip_size));
1261*7ea28254SJohn Hall 	l->map.layout_map_count =
1262*7ea28254SJohn Hall 		GET_LE16((uint8_t *)(&l->raid_map->layout_map_count));
1263*7ea28254SJohn Hall 
1264*7ea28254SJohn Hall 	/* Calculate stripe information for the request. */
1265*7ea28254SJohn Hall 	l->row.blks_per_row =  l->row.data_disks * l->strip_sz;
1266*7ea28254SJohn Hall 	if (!l->row.blks_per_row || !l->strip_sz) {
1267*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible\n");
1268*7ea28254SJohn Hall 		DBG_FUNC("OUT\n");
1269*7ea28254SJohn Hall 		return false;
1270*7ea28254SJohn Hall 	}
1271*7ea28254SJohn Hall 	/* use __udivdi3 ? */
1272*7ea28254SJohn Hall 	rcb->blocks_per_row = l->row.blks_per_row;
1273*7ea28254SJohn Hall 	l->row.first = l->block.first / l->row.blks_per_row;
1274*7ea28254SJohn Hall 	rcb->row_num = l->row.first;
1275*7ea28254SJohn Hall 	l->row.last = l->block.last / l->row.blks_per_row;
1276*7ea28254SJohn Hall 	l->row.offset_first = (uint32_t)(l->block.first -
1277*7ea28254SJohn Hall 		(l->row.first * l->row.blks_per_row));
1278*7ea28254SJohn Hall 	l->row.offset_last = (uint32_t)(l->block.last -
1279*7ea28254SJohn Hall 		(l->row.last * l->row.blks_per_row));
1280*7ea28254SJohn Hall 	l->col.first = l->row.offset_first / l->strip_sz;
1281*7ea28254SJohn Hall 	l->col.last = l->row.offset_last / l->strip_sz;
1282*7ea28254SJohn Hall 
1283*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1284*7ea28254SJohn Hall 	return true;
1285*7ea28254SJohn Hall }
1286*7ea28254SJohn Hall 
1287*7ea28254SJohn Hall /* Not AIO-eligible if it isnt' a single row/column. */
1288*7ea28254SJohn Hall static inline boolean_t
1289*7ea28254SJohn Hall pqisrc_is_single_row_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1290*7ea28254SJohn Hall {
1291*7ea28254SJohn Hall 	boolean_t ret = true;
1292*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1293*7ea28254SJohn Hall 
1294*7ea28254SJohn Hall 	if (l->row.first != l->row.last || l->col.first != l->col.last) {
1295*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible\n");
1296*7ea28254SJohn Hall 		ret = false;
1297*7ea28254SJohn Hall 	}
1298*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1299*7ea28254SJohn Hall 	return ret;
1300*7ea28254SJohn Hall }
1301*7ea28254SJohn Hall 
1302*7ea28254SJohn Hall /* figure out disks/row, row, and map index. */
1303*7ea28254SJohn Hall static inline boolean_t
1304*7ea28254SJohn Hall pqisrc_set_map_row_and_idx(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t *rcb)
1305*7ea28254SJohn Hall {
1306*7ea28254SJohn Hall 	if (!l->row.data_disks) {
1307*7ea28254SJohn Hall 		DBG_INFO("AIO ineligible: no data disks?\n");
1308*7ea28254SJohn Hall 		return false;
1309*7ea28254SJohn Hall 	}
1310*7ea28254SJohn Hall 
1311*7ea28254SJohn Hall 	l->row.total_disks = l->row.data_disks +
1312*7ea28254SJohn Hall 		LE_16(l->raid_map->metadata_disks_per_row);
1313*7ea28254SJohn Hall 
1314*7ea28254SJohn Hall 	l->map.row = ((uint32_t)(l->row.first >>
1315*7ea28254SJohn Hall 		l->raid_map->parity_rotation_shift)) %
1316*7ea28254SJohn Hall 		GET_LE16((uint8_t *)(&l->raid_map->row_cnt));
1317*7ea28254SJohn Hall 
1318*7ea28254SJohn Hall 	l->map.idx = (l->map.row * l->row.total_disks) + l->col.first;
1319*7ea28254SJohn Hall 	rcb->raid_map_index = l->map.idx;
1320*7ea28254SJohn Hall 	rcb->raid_map_row = l->map.row;
1321*7ea28254SJohn Hall 
1322*7ea28254SJohn Hall 	return true;
1323*7ea28254SJohn Hall }
1324*7ea28254SJohn Hall 
1325*7ea28254SJohn Hall /* set the mirror for a raid 1/10/ADM */
1326*7ea28254SJohn Hall static inline void
1327*7ea28254SJohn Hall pqisrc_set_read_mirror(pqisrc_softstate_t *softs,
1328*7ea28254SJohn Hall 	pqi_scsi_dev_t *device, aio_req_locator_t *l)
1329*7ea28254SJohn Hall {
1330*7ea28254SJohn Hall 	/* Avoid direct use of device->offload_to_mirror within this
1331*7ea28254SJohn Hall 	 * function since multiple threads might simultaneously
1332*7ea28254SJohn Hall 	 * increment it beyond the range of device->layout_map_count -1.
1333*7ea28254SJohn Hall 	 */
1334*7ea28254SJohn Hall 
1335*7ea28254SJohn Hall 	int mirror = device->offload_to_mirror[l->map.idx];
1336*7ea28254SJohn Hall 	int next_mirror = mirror + 1;
1337*7ea28254SJohn Hall 
1338*7ea28254SJohn Hall 	if (next_mirror >= l->map.layout_map_count)
1339*7ea28254SJohn Hall 		next_mirror = 0;
1340*7ea28254SJohn Hall 
1341*7ea28254SJohn Hall 	device->offload_to_mirror[l->map.idx] = next_mirror;
1342*7ea28254SJohn Hall 	l->map.idx += mirror * l->row.data_disks;
1343*7ea28254SJohn Hall }
1344*7ea28254SJohn Hall 
1345*7ea28254SJohn Hall /* collect ioaccel handles for mirrors of given location. */
1346*7ea28254SJohn Hall static inline boolean_t
1347*7ea28254SJohn Hall pqisrc_set_write_mirrors(
1348*7ea28254SJohn Hall 	pqisrc_softstate_t *softs,
1349*7ea28254SJohn Hall 	pqi_scsi_dev_t *device,
1350*7ea28254SJohn Hall 	aio_req_locator_t *l,
1351*7ea28254SJohn Hall 	rcb_t *rcb)
1352*7ea28254SJohn Hall {
1353*7ea28254SJohn Hall 	uint32_t mirror = 0;
1354*7ea28254SJohn Hall 	uint32_t index;
1355*7ea28254SJohn Hall 
1356*7ea28254SJohn Hall 	if (l->map.layout_map_count > PQISRC_MAX_SUPPORTED_MIRRORS)
1357*7ea28254SJohn Hall 		return false;
1358*7ea28254SJohn Hall 
1359*7ea28254SJohn Hall 	do {
1360*7ea28254SJohn Hall 		index = l->map.idx + (l->row.data_disks * mirror);
1361*7ea28254SJohn Hall 		rcb->it_nexus[mirror] =
1362*7ea28254SJohn Hall 			l->raid_map->dev_data[index].ioaccel_handle;
1363*7ea28254SJohn Hall 		mirror++;
1364*7ea28254SJohn Hall 	} while (mirror != l->map.layout_map_count);
1365*7ea28254SJohn Hall 
1366*7ea28254SJohn Hall 	return true;
1367*7ea28254SJohn Hall }
1368*7ea28254SJohn Hall 
1369*7ea28254SJohn Hall /* Make sure first and last block are in the same R5/R6 RAID group. */
1370*7ea28254SJohn Hall static inline boolean_t
1371*7ea28254SJohn Hall pqisrc_is_r5or6_single_group(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1372*7ea28254SJohn Hall {
1373*7ea28254SJohn Hall 	boolean_t ret = true;
1374*7ea28254SJohn Hall 
1375*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1376*7ea28254SJohn Hall 	l->r5or6.row.blks_per_row = l->strip_sz * l->row.data_disks;
1377*7ea28254SJohn Hall 	l->stripesz = l->r5or6.row.blks_per_row * l->map.layout_map_count;
1378*7ea28254SJohn Hall 	l->group.first = (l->block.first % l->stripesz) /
1379*7ea28254SJohn Hall 				l->r5or6.row.blks_per_row;
1380*7ea28254SJohn Hall 	l->group.last = (l->block.last % l->stripesz) /
1381*7ea28254SJohn Hall 				l->r5or6.row.blks_per_row;
1382*7ea28254SJohn Hall 
1383*7ea28254SJohn Hall 	if (l->group.first != l->group.last) {
1384*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible");
1385*7ea28254SJohn Hall 		ret = false;
1386*7ea28254SJohn Hall 	}
1387*7ea28254SJohn Hall 
1388*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1389*7ea28254SJohn Hall 	ASSERT(ret == true);
1390*7ea28254SJohn Hall 	return ret;
1391*7ea28254SJohn Hall }
1392*7ea28254SJohn Hall /* Make sure R5 or R6 request doesn't span rows. */
1393*7ea28254SJohn Hall static inline boolean_t
1394*7ea28254SJohn Hall pqisrc_is_r5or6_single_row(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1395*7ea28254SJohn Hall {
1396*7ea28254SJohn Hall 	boolean_t ret = true;
1397*7ea28254SJohn Hall 
1398*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
1399*7ea28254SJohn Hall 
1400*7ea28254SJohn Hall 	/* figure row nums containing first & last block */
1401*7ea28254SJohn Hall 	l->row.first = l->r5or6.row.first =
1402*7ea28254SJohn Hall 		l->block.first / l->stripesz;
1403*7ea28254SJohn Hall 	l->r5or6.row.last = l->block.last / l->stripesz;
1404*7ea28254SJohn Hall 
1405*7ea28254SJohn Hall 	if (l->r5or6.row.first != l->r5or6.row.last) {
1406*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible");
1407*7ea28254SJohn Hall 		ret = false;
1408*7ea28254SJohn Hall 	}
1409*7ea28254SJohn Hall 
1410*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
1411*7ea28254SJohn Hall 	ASSERT(ret == true);
1412*7ea28254SJohn Hall 	return ret;
1413*7ea28254SJohn Hall }
1414*7ea28254SJohn Hall 
1415*7ea28254SJohn Hall /* Make sure R5 or R6 request doesn't span columns. */
1416*7ea28254SJohn Hall static inline boolean_t
1417*7ea28254SJohn Hall pqisrc_is_r5or6_single_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
1418*7ea28254SJohn Hall {
1419*7ea28254SJohn Hall 	boolean_t ret = true;
1420*7ea28254SJohn Hall 
1421*7ea28254SJohn Hall 	/* Find the columns of the first and last block */
1422*7ea28254SJohn Hall 	l->row.offset_first = l->r5or6.row.offset_first =
1423*7ea28254SJohn Hall 		(uint32_t)((l->block.first % l->stripesz) %
1424*7ea28254SJohn Hall 		l->r5or6.row.blks_per_row);
1425*7ea28254SJohn Hall 	l->r5or6.row.offset_last =
1426*7ea28254SJohn Hall 		(uint32_t)((l->block.last % l->stripesz) %
1427*7ea28254SJohn Hall 		l->r5or6.row.blks_per_row);
1428*7ea28254SJohn Hall 
1429*7ea28254SJohn Hall 	l->col.first = l->r5or6.row.offset_first / l->strip_sz;
1430*7ea28254SJohn Hall 	l->r5or6.col.first = l->col.first;
1431*7ea28254SJohn Hall 	l->r5or6.col.last = l->r5or6.row.offset_last / l->strip_sz;
1432*7ea28254SJohn Hall 
1433*7ea28254SJohn Hall 	if (l->r5or6.col.first != l->r5or6.col.last) {
1434*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible");
1435*7ea28254SJohn Hall 		ret = false;
1436*7ea28254SJohn Hall 	}
1437*7ea28254SJohn Hall 
1438*7ea28254SJohn Hall 	ASSERT(ret == true);
1439*7ea28254SJohn Hall 	return ret;
1440*7ea28254SJohn Hall }
1441*7ea28254SJohn Hall 
1442*7ea28254SJohn Hall 
1443*7ea28254SJohn Hall /* Set the map row and index for a R5 or R6 AIO request */
1444*7ea28254SJohn Hall static inline void
1445*7ea28254SJohn Hall pqisrc_set_r5or6_row_and_index(aio_req_locator_t *l,
1446*7ea28254SJohn Hall 	rcb_t *rcb)
1447*7ea28254SJohn Hall {
1448*7ea28254SJohn Hall 	l->map.row = ((uint32_t)
1449*7ea28254SJohn Hall 		(l->row.first >> l->raid_map->parity_rotation_shift)) %
1450*7ea28254SJohn Hall 		GET_LE16((uint8_t *)(&l->raid_map->row_cnt));
1451*7ea28254SJohn Hall 
1452*7ea28254SJohn Hall 	l->map.idx = (l->group.first *
1453*7ea28254SJohn Hall 		(GET_LE16((uint8_t *)(&l->raid_map->row_cnt))
1454*7ea28254SJohn Hall 		* l->row.total_disks))
1455*7ea28254SJohn Hall 		+ (l->map.row * l->row.total_disks)
1456*7ea28254SJohn Hall 		+ l->col.first;
1457*7ea28254SJohn Hall 
1458*7ea28254SJohn Hall 	rcb->raid_map_index = l->map.idx;
1459*7ea28254SJohn Hall 	rcb->raid_map_row = l->map.row;
1460*7ea28254SJohn Hall }
1461*7ea28254SJohn Hall 
1462*7ea28254SJohn Hall /* calculate physical disk block for aio request */
1463*7ea28254SJohn Hall static inline boolean_t
1464*7ea28254SJohn Hall pqisrc_calc_aio_block(aio_req_locator_t *l)
1465*7ea28254SJohn Hall {
1466*7ea28254SJohn Hall 	boolean_t ret = true;
1467*7ea28254SJohn Hall 
1468*7ea28254SJohn Hall 	l->block.disk_block =
1469*7ea28254SJohn Hall 		GET_LE64((uint8_t *) (&l->raid_map->disk_starting_blk))
1470*7ea28254SJohn Hall 		+ (l->row.first * l->strip_sz)
1471*7ea28254SJohn Hall 		+ ((uint64_t)(l->row.offset_first) - (uint64_t)(l->col.first) * l->strip_sz);
1472*7ea28254SJohn Hall 
1473*7ea28254SJohn Hall 	/* any values we should be checking here? if not convert to void */
1474*7ea28254SJohn Hall 	return ret;
1475*7ea28254SJohn Hall }
1476*7ea28254SJohn Hall 
1477*7ea28254SJohn Hall /* Handle differing logical/physical block sizes. */
1478*7ea28254SJohn Hall static inline uint32_t
1479*7ea28254SJohn Hall pqisrc_handle_blk_size_diffs(aio_req_locator_t *l)
1480*7ea28254SJohn Hall {
1481*7ea28254SJohn Hall 	uint32_t disk_blk_cnt;
1482*7ea28254SJohn Hall 	disk_blk_cnt = l->block.cnt;
1483*7ea28254SJohn Hall 
1484*7ea28254SJohn Hall 	if (l->raid_map->phys_blk_shift) {
1485*7ea28254SJohn Hall 		l->block.disk_block <<= l->raid_map->phys_blk_shift;
1486*7ea28254SJohn Hall 		disk_blk_cnt <<= l->raid_map->phys_blk_shift;
1487*7ea28254SJohn Hall 	}
1488*7ea28254SJohn Hall 	return disk_blk_cnt;
1489*7ea28254SJohn Hall }
1490*7ea28254SJohn Hall 
1491*7ea28254SJohn Hall /* Make sure AIO request doesn't exceed the max that AIO device can
1492*7ea28254SJohn Hall  * handle based on dev type, Raid level, and encryption status.
1493*7ea28254SJohn Hall  * TODO: make limits dynamic when this becomes possible.
1494*7ea28254SJohn Hall  */
1495*7ea28254SJohn Hall inline boolean_t
1496*7ea28254SJohn Hall pqisrc_aio_req_too_big(pqisrc_softstate_t *softs,
1497*7ea28254SJohn Hall 	pqi_scsi_dev_t *device, rcb_t *rcb,
1498*7ea28254SJohn Hall 	aio_req_locator_t *l, uint32_t disk_blk_cnt)
1499*7ea28254SJohn Hall {
1500*7ea28254SJohn Hall 	boolean_t ret = false;
1501*7ea28254SJohn Hall 	uint32_t dev_max;
1502*7ea28254SJohn Hall 	uint32_t size = disk_blk_cnt * device->raid_map->volume_blk_size;
1503*7ea28254SJohn Hall 	dev_max = size;
1504*7ea28254SJohn Hall 
1505*7ea28254SJohn Hall 	/* filter for nvme crypto */
1506*7ea28254SJohn Hall 	if (device->is_nvme && rcb->encrypt_enable) {
1507*7ea28254SJohn Hall 		if (softs->max_aio_rw_xfer_crypto_nvme != 0) {
1508*7ea28254SJohn Hall 			dev_max = MIN(dev_max,softs->max_aio_rw_xfer_crypto_nvme);
1509*7ea28254SJohn Hall 		}
1510*7ea28254SJohn Hall 	}
1511*7ea28254SJohn Hall 
1512*7ea28254SJohn Hall 	/* filter for RAID 5/6/50/60 */
1513*7ea28254SJohn Hall 	if (!device->is_physical_device &&
1514*7ea28254SJohn Hall 		(device->raid_level == SA_RAID_5 ||
1515*7ea28254SJohn Hall 		device->raid_level == SA_RAID_51 ||
1516*7ea28254SJohn Hall 		device->raid_level == SA_RAID_6)) {
1517*7ea28254SJohn Hall 		if (softs->max_aio_write_raid5_6 != 0) {
1518*7ea28254SJohn Hall 			dev_max = MIN(dev_max,softs->max_aio_write_raid5_6);
1519*7ea28254SJohn Hall 		}
1520*7ea28254SJohn Hall 	}
1521*7ea28254SJohn Hall 
1522*7ea28254SJohn Hall 	/* filter for RAID ADM */
1523*7ea28254SJohn Hall 	if (!device->is_physical_device &&
1524*7ea28254SJohn Hall 		(device->raid_level == SA_RAID_ADM) &&
1525*7ea28254SJohn Hall 		(softs->max_aio_write_raid1_10_3drv != 0)) {
1526*7ea28254SJohn Hall 			dev_max = MIN(dev_max,
1527*7ea28254SJohn Hall 				softs->max_aio_write_raid1_10_3drv);
1528*7ea28254SJohn Hall 	}
1529*7ea28254SJohn Hall 
1530*7ea28254SJohn Hall 	/* filter for RAID 1/10 */
1531*7ea28254SJohn Hall 	if (!device->is_physical_device &&
1532*7ea28254SJohn Hall 		(device->raid_level == SA_RAID_1) &&
1533*7ea28254SJohn Hall 		(softs->max_aio_write_raid1_10_2drv != 0)) {
1534*7ea28254SJohn Hall 			dev_max = MIN(dev_max,
1535*7ea28254SJohn Hall 				softs->max_aio_write_raid1_10_2drv);
1536*7ea28254SJohn Hall 	}
1537*7ea28254SJohn Hall 
1538*7ea28254SJohn Hall 
1539*7ea28254SJohn Hall 	if (size > dev_max) {
1540*7ea28254SJohn Hall 		DBG_AIO("AIO ineligible: size=%u, max=%u", size, dev_max);
1541*7ea28254SJohn Hall 		ret = true;
1542*7ea28254SJohn Hall 	}
1543*7ea28254SJohn Hall 
1544*7ea28254SJohn Hall 	return ret;
1545*7ea28254SJohn Hall }
1546*7ea28254SJohn Hall 
1547*7ea28254SJohn Hall 
1548*7ea28254SJohn Hall #ifdef DEBUG_RAID_MAP
1549*7ea28254SJohn Hall static inline void
1550*7ea28254SJohn Hall pqisrc_aio_show_raid_map(pqisrc_softstate_t *softs, struct raid_map *m)
1551*7ea28254SJohn Hall {
1552*7ea28254SJohn Hall 	int i;
1553*7ea28254SJohn Hall 
1554*7ea28254SJohn Hall 	if (!m) {
1555*7ea28254SJohn Hall 		DBG_WARN("No RAID MAP!\n");
1556*7ea28254SJohn Hall 		return;
1557*7ea28254SJohn Hall 	}
1558*7ea28254SJohn Hall 	DBG_INFO("======= Raid Map ================\n");
1559*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "StructureSize", m->structure_size);
1560*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "LogicalBlockSize", m->volume_blk_size);
1561*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "LogicalBlockCount", m->volume_blk_cnt);
1562*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "PhysicalBlockShift", m->phys_blk_shift);
1563*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "ParityRotationShift",
1564*7ea28254SJohn Hall 				m->parity_rotation_shift);
1565*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "StripSize", m->strip_size);
1566*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "DiskStartingBlock", m->disk_starting_blk);
1567*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "DiskBlockCount", m->disk_blk_cnt);
1568*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "DataDisksPerRow", m->data_disks_per_row);
1569*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "MetdataDisksPerRow",
1570*7ea28254SJohn Hall 				m->metadata_disks_per_row);
1571*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "RowCount", m->row_cnt);
1572*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "LayoutMapCnt", m->layout_map_count);
1573*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "fEncryption", m->flags);
1574*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "DEK", m->data_encryption_key_index);
1575*7ea28254SJohn Hall 	for (i = 0; i < RAID_MAP_MAX_ENTRIES; i++) {
1576*7ea28254SJohn Hall 		if (m->dev_data[i].ioaccel_handle == 0)
1577*7ea28254SJohn Hall 			break;
1578*7ea28254SJohn Hall 		DBG_INFO("%-25s: %d: 0x%04x\n", "ioaccel_handle, disk",
1579*7ea28254SJohn Hall 			i, m->dev_data[i].ioaccel_handle);
1580*7ea28254SJohn Hall 	}
1581*7ea28254SJohn Hall }
1582*7ea28254SJohn Hall #endif /* DEBUG_RAID_MAP */
1583*7ea28254SJohn Hall 
1584*7ea28254SJohn Hall static inline void
1585*7ea28254SJohn Hall pqisrc_aio_show_locator_info(pqisrc_softstate_t *softs,
1586*7ea28254SJohn Hall 	aio_req_locator_t *l, uint32_t disk_blk_cnt, rcb_t *rcb)
1587*7ea28254SJohn Hall {
1588*7ea28254SJohn Hall #ifdef DEBUG_AIO_LOCATOR
1589*7ea28254SJohn Hall 	pqisrc_aio_show_raid_map(softs, l->raid_map);
1590*7ea28254SJohn Hall 
1591*7ea28254SJohn Hall 	DBG_INFO("======= AIO Locator Content, tag#0x%08x =====\n", rcb->tag);
1592*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "block.first", l->block.first);
1593*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "block.last", l->block.last);
1594*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "block.cnt", l->block.cnt);
1595*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "block.disk_block", l->block.disk_block);
1596*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "row.blks_per_row", l->row.blks_per_row);
1597*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "row.first", l->row.first);
1598*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%lx\n", "row.last", l->row.last);
1599*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "row.offset_first", l->row.offset_first);
1600*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "row.offset_last", l->row.offset_last);
1601*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "row.data_disks", l->row.data_disks);
1602*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "row.total_disks", l->row.total_disks);
1603*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "col.first", l->col.first);
1604*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "col.last", l->col.last);
1605*7ea28254SJohn Hall 
1606*7ea28254SJohn Hall 	if (l->raid_level == SA_RAID_5 || l->raid_level == SA_RAID_6) {
1607*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.blks_per_row",
1608*7ea28254SJohn Hall 				l->r5or6.row.blks_per_row);
1609*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.first", l->r5or6.row.first);
1610*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.last", l->r5or6.row.last);
1611*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_first",
1612*7ea28254SJohn Hall 					l->r5or6.row.offset_first);
1613*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_last",
1614*7ea28254SJohn Hall 					l->r5or6.row.offset_last);
1615*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.data_disks",
1616*7ea28254SJohn Hall 					l->r5or6.row.data_disks);
1617*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.row.total_disks",
1618*7ea28254SJohn Hall 					l->r5or6.row.total_disks);
1619*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.col.first", l->r5or6.col.first);
1620*7ea28254SJohn Hall 		DBG_INFO("%-25s: 0x%x\n", "r5or6.col.last", l->r5or6.col.last);
1621*7ea28254SJohn Hall 	}
1622*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "map.row", l->map.row);
1623*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "map.idx", l->map.idx);
1624*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "map.layout_map_count",
1625*7ea28254SJohn Hall 				l->map.layout_map_count);
1626*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "group.first", l->group.first);
1627*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "group.last", l->group.last);
1628*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "group.cur", l->group.cur);
1629*7ea28254SJohn Hall 	DBG_INFO("%-25s: %d\n", "is_write", l->is_write);
1630*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "stripesz", l->stripesz);
1631*7ea28254SJohn Hall 	DBG_INFO("%-25s: 0x%x\n", "strip_sz", l->strip_sz);
1632*7ea28254SJohn Hall 	DBG_INFO("%-25s: %d\n", "offload_to_mirror", l->offload_to_mirror);
1633*7ea28254SJohn Hall 	DBG_INFO("%-25s: %d\n", "raid_level", l->raid_level);
1634*7ea28254SJohn Hall 
1635*7ea28254SJohn Hall #endif /* DEBUG_AIO_LOCATOR */
1636*7ea28254SJohn Hall }
1637*7ea28254SJohn Hall 
1638*7ea28254SJohn Hall /* build the aio cdb */
1639*7ea28254SJohn Hall inline void
1640*7ea28254SJohn Hall pqisrc_aio_build_cdb(aio_req_locator_t *l,
1641*7ea28254SJohn Hall 		uint32_t disk_blk_cnt, rcb_t *rcb, uint8_t *cdb)
1642*7ea28254SJohn Hall {
1643*7ea28254SJohn Hall 	uint8_t cdb_length;
1644*7ea28254SJohn Hall 
1645*7ea28254SJohn Hall 	if (l->block.disk_block > 0xffffffff) {
1646*7ea28254SJohn Hall 		cdb[0] = l->is_write ? SCMD_WRITE_16 : SCMD_READ_16;
1647*7ea28254SJohn Hall 		cdb[1] = 0;
1648*7ea28254SJohn Hall 		PUT_BE64(l->block.disk_block, &cdb[2]);
1649*7ea28254SJohn Hall 		PUT_BE32(disk_blk_cnt, &cdb[10]);
1650*7ea28254SJohn Hall 		cdb[15] = 0;
1651*7ea28254SJohn Hall 		cdb_length = 16;
1652*7ea28254SJohn Hall 	} else {
1653*7ea28254SJohn Hall 		cdb[0] = l->is_write ? SCMD_WRITE_10 : SCMD_READ_10;
1654*7ea28254SJohn Hall 		cdb[1] = 0;
1655*7ea28254SJohn Hall 		PUT_BE32(l->block.disk_block, &cdb[2]);
1656*7ea28254SJohn Hall 		cdb[6] = 0;
1657*7ea28254SJohn Hall 		PUT_BE16(disk_blk_cnt, &cdb[7]);
1658*7ea28254SJohn Hall 		cdb[9] = 0;
1659*7ea28254SJohn Hall 		cdb_length = 10;
1660*7ea28254SJohn Hall 	}
1661*7ea28254SJohn Hall 
1662*7ea28254SJohn Hall 	rcb->cmdlen = cdb_length;
1663*7ea28254SJohn Hall 
1664*7ea28254SJohn Hall }
1665*7ea28254SJohn Hall 
16669fac68fcSPAPANI SRIKANTH /* print any arbitrary buffer of length total_len */
16679fac68fcSPAPANI SRIKANTH void
16689fac68fcSPAPANI SRIKANTH pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
16699fac68fcSPAPANI SRIKANTH 		uint32_t total_len, uint32_t flags)
16709fac68fcSPAPANI SRIKANTH {
16719fac68fcSPAPANI SRIKANTH #define LINE_BUF_LEN 60
16729fac68fcSPAPANI SRIKANTH #define INDEX_PER_LINE 16
16739fac68fcSPAPANI SRIKANTH 	uint32_t buf_consumed = 0;
16749fac68fcSPAPANI SRIKANTH 	int ii;
16759fac68fcSPAPANI SRIKANTH 	char line_buf[LINE_BUF_LEN];
16769fac68fcSPAPANI SRIKANTH 	int line_len; /* written length per line */
16779fac68fcSPAPANI SRIKANTH 	uint8_t this_char;
16789fac68fcSPAPANI SRIKANTH 
16799fac68fcSPAPANI SRIKANTH 	if (user_buf == NULL)
16809fac68fcSPAPANI SRIKANTH 		return;
16819fac68fcSPAPANI SRIKANTH 
1682*7ea28254SJohn Hall 	memset(line_buf, 0, LINE_BUF_LEN);
1683*7ea28254SJohn Hall 
16849fac68fcSPAPANI SRIKANTH 	/* Print index columns */
16859fac68fcSPAPANI SRIKANTH 	if (flags & PRINT_FLAG_HDR_COLUMN)
16869fac68fcSPAPANI SRIKANTH 	{
16879fac68fcSPAPANI SRIKANTH 		for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++)
16889fac68fcSPAPANI SRIKANTH 		{
16899fac68fcSPAPANI SRIKANTH 			line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
16909fac68fcSPAPANI SRIKANTH 			if ((line_len + 4) >= LINE_BUF_LEN)
16919fac68fcSPAPANI SRIKANTH 				break;
16929fac68fcSPAPANI SRIKANTH 		}
1693*7ea28254SJohn Hall 		DBG_INFO("%15.15s:[ %s ]\n", "header", line_buf);
16949fac68fcSPAPANI SRIKANTH 	}
16959fac68fcSPAPANI SRIKANTH 
16969fac68fcSPAPANI SRIKANTH 	/* Print index columns */
16979fac68fcSPAPANI SRIKANTH 	while(buf_consumed < total_len)
16989fac68fcSPAPANI SRIKANTH 	{
16999fac68fcSPAPANI SRIKANTH 		memset(line_buf, 0, LINE_BUF_LEN);
17009fac68fcSPAPANI SRIKANTH 
17019fac68fcSPAPANI SRIKANTH 		for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++)
17029fac68fcSPAPANI SRIKANTH 		{
17039fac68fcSPAPANI SRIKANTH 			this_char = *((char*)(user_buf) + buf_consumed);
17049fac68fcSPAPANI SRIKANTH 			line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char);
17059fac68fcSPAPANI SRIKANTH 
17069fac68fcSPAPANI SRIKANTH 			buf_consumed++;
17079fac68fcSPAPANI SRIKANTH 			if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
17089fac68fcSPAPANI SRIKANTH 				break;
17099fac68fcSPAPANI SRIKANTH 		}
1710*7ea28254SJohn Hall 		DBG_INFO("%15.15s:[ %s ]\n", msg, line_buf);
17119fac68fcSPAPANI SRIKANTH 	}
17129fac68fcSPAPANI SRIKANTH }
17139fac68fcSPAPANI SRIKANTH 
1714*7ea28254SJohn Hall /* print CDB with column header */
1715*7ea28254SJohn Hall void
1716*7ea28254SJohn Hall pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb)
1717*7ea28254SJohn Hall {
1718*7ea28254SJohn Hall 	/* Print the CDB contents */
1719*7ea28254SJohn Hall 	pqisrc_print_buffer(softs, msg, cdb, rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
1720*7ea28254SJohn Hall }
1721*7ea28254SJohn Hall 
1722*7ea28254SJohn Hall void
1723*7ea28254SJohn Hall pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info)
1724*7ea28254SJohn Hall {
1725*7ea28254SJohn Hall    pqi_scsi_dev_t *devp;
1726*7ea28254SJohn Hall 
1727*7ea28254SJohn Hall 	if (rcb == NULL || rcb->dvp == NULL)
1728*7ea28254SJohn Hall 	{
1729*7ea28254SJohn Hall 		DBG_ERR("Invalid rcb or dev ptr! rcb=%p\n", rcb);
1730*7ea28254SJohn Hall 		return;
1731*7ea28254SJohn Hall 	}
1732*7ea28254SJohn Hall 
1733*7ea28254SJohn Hall 	devp = rcb->dvp;
1734*7ea28254SJohn Hall 
1735*7ea28254SJohn Hall 	/* print the host and mapped CDB */
1736*7ea28254SJohn Hall 	DBG_INFO("\n");
1737*7ea28254SJohn Hall 	DBG_INFO("----- Start Dump: %s -----\n", msg);
1738*7ea28254SJohn Hall 	pqisrc_print_buffer(softs, "host cdb", OS_GET_CDBP(rcb), rcb->cmdlen, PRINT_FLAG_HDR_COLUMN);
1739*7ea28254SJohn Hall 	if (OS_GET_CDBP(rcb) != rcb->cdbp)
1740*7ea28254SJohn Hall 		pqisrc_print_buffer(softs, "aio mapped cdb", rcb->cdbp, rcb->cmdlen, 0);
1741*7ea28254SJohn Hall 
1742*7ea28254SJohn Hall 	DBG_INFO("tag=0x%x dir=%u host_timeout=%ums\n", rcb->tag,
1743*7ea28254SJohn Hall 		rcb->data_dir, (uint32_t)rcb->host_timeout_ms);
1744*7ea28254SJohn Hall 
1745*7ea28254SJohn Hall 	DBG_INFO("BTL: %d:%d:%d addr=0x%x\n", devp->bus, devp->target,
1746*7ea28254SJohn Hall 		devp->lun, GET_LE32(devp->scsi3addr));
1747*7ea28254SJohn Hall 
1748*7ea28254SJohn Hall 	if (rcb->path == AIO_PATH)
1749*7ea28254SJohn Hall 	{
1750*7ea28254SJohn Hall 		DBG_INFO("handle=0x%x\n", rcb->ioaccel_handle);
1751*7ea28254SJohn Hall 		DBG_INFO("row=%u blk/row=%u index=%u map_row=%u\n",
1752*7ea28254SJohn Hall 			rcb->row_num, rcb->blocks_per_row, rcb->raid_map_index, rcb->raid_map_row);
1753*7ea28254SJohn Hall 
1754*7ea28254SJohn Hall 		if (err_info)
1755*7ea28254SJohn Hall 			pqisrc_show_aio_error_info(softs, rcb, err_info);
1756*7ea28254SJohn Hall 	}
1757*7ea28254SJohn Hall 
1758*7ea28254SJohn Hall 	else /* RAID path */
1759*7ea28254SJohn Hall 	{
1760*7ea28254SJohn Hall 		if (err_info)
1761*7ea28254SJohn Hall 			pqisrc_show_raid_error_info(softs, rcb, err_info);
1762*7ea28254SJohn Hall 	}
1763*7ea28254SJohn Hall 
1764*7ea28254SJohn Hall 
1765*7ea28254SJohn Hall 	DBG_INFO("-----  Done -----\n\n");
1766*7ea28254SJohn Hall }
1767*7ea28254SJohn Hall 
17689fac68fcSPAPANI SRIKANTH 
17691e66f787SSean Bruno /*
17701e66f787SSean Bruno  * Function used to build and send RAID bypass request to the adapter
17711e66f787SSean Bruno  */
17729fac68fcSPAPANI SRIKANTH int
1773*7ea28254SJohn Hall pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
1774*7ea28254SJohn Hall 			pqi_scsi_dev_t *device, rcb_t *rcb)
17751e66f787SSean Bruno {
17761e66f787SSean Bruno 	uint32_t disk_blk_cnt;
1777*7ea28254SJohn Hall 	struct aio_req_locator loc;
1778*7ea28254SJohn Hall 	struct aio_req_locator *l = &loc;
1779*7ea28254SJohn Hall 	int rc;
1780*7ea28254SJohn Hall 	memset(l, 0, sizeof(*l));
1781*7ea28254SJohn Hall 
17821e66f787SSean Bruno 	DBG_FUNC("IN\n");
17831e66f787SSean Bruno 
1784*7ea28254SJohn Hall 	if (device == NULL) {
1785*7ea28254SJohn Hall 		DBG_INFO("device is NULL\n");
17861e66f787SSean Bruno 		return PQI_STATUS_FAILURE;
1787*7ea28254SJohn Hall 	}
1788*7ea28254SJohn Hall 	if (device->raid_map == NULL) {
1789*7ea28254SJohn Hall 		DBG_INFO("tag=0x%x BTL: %d:%d:%d Raid map is NULL\n",
1790*7ea28254SJohn Hall 			rcb->tag, device->bus, device->target, device->lun);
1791*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
1792*7ea28254SJohn Hall 	}
1793*7ea28254SJohn Hall 
1794*7ea28254SJohn Hall 	/* Check for eligible op, get LBA and block count. */
1795*7ea28254SJohn Hall 	rc =  fill_lba_for_scsi_rw(softs, OS_GET_CDBP(rcb), l);
1796*7ea28254SJohn Hall 	if (rc == PQI_STATUS_FAILURE)
1797aeb665b5SEd Maste 		return PQI_STATUS_FAILURE;
17981e66f787SSean Bruno 
1799*7ea28254SJohn Hall 	if (l->is_write && !pqisrc_is_supported_write(softs, device))
18001e66f787SSean Bruno 		return PQI_STATUS_FAILURE;
18011e66f787SSean Bruno 
1802*7ea28254SJohn Hall 	l->raid_map = device->raid_map;
1803*7ea28254SJohn Hall 	l->block.last = l->block.first + l->block.cnt - 1;
1804*7ea28254SJohn Hall 	l->raid_level = device->raid_level;
18051e66f787SSean Bruno 
1806*7ea28254SJohn Hall 	if (pqisrc_is_invalid_block(softs, l))
18071e66f787SSean Bruno 		return PQI_STATUS_FAILURE;
18081e66f787SSean Bruno 
1809*7ea28254SJohn Hall 	if (!pqisrc_calc_disk_params(softs, l, rcb))
1810*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
18111e66f787SSean Bruno 
1812*7ea28254SJohn Hall 	if (!pqisrc_is_single_row_column(softs, l))
1813*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
18149fac68fcSPAPANI SRIKANTH 
1815*7ea28254SJohn Hall 	if (!pqisrc_set_map_row_and_idx(softs, l, rcb))
18161e66f787SSean Bruno 		return PQI_STATUS_FAILURE;
18171e66f787SSean Bruno 
18181e66f787SSean Bruno 	/* Proceeding with driver mapping. */
18191e66f787SSean Bruno 
18201e66f787SSean Bruno 
1821*7ea28254SJohn Hall 	switch (device->raid_level) {
1822*7ea28254SJohn Hall 	case SA_RAID_1:
1823*7ea28254SJohn Hall 	case SA_RAID_ADM:
1824*7ea28254SJohn Hall 		if (l->is_write) {
1825*7ea28254SJohn Hall 			if (!pqisrc_set_write_mirrors(softs, device, l, rcb))
1826*7ea28254SJohn Hall 				return PQI_STATUS_FAILURE;
1827*7ea28254SJohn Hall 		} else
1828*7ea28254SJohn Hall 			pqisrc_set_read_mirror(softs, device, l);
1829*7ea28254SJohn Hall 		break;
1830*7ea28254SJohn Hall 	case SA_RAID_5:
1831*7ea28254SJohn Hall 	case SA_RAID_6:
1832*7ea28254SJohn Hall 		if (l->map.layout_map_count > 1 || l->is_write) {
18331e66f787SSean Bruno 
1834*7ea28254SJohn Hall 			if (!pqisrc_is_r5or6_single_group(softs, l))
18351e66f787SSean Bruno 				return PQI_STATUS_FAILURE;
18361e66f787SSean Bruno 
1837*7ea28254SJohn Hall 			if (!pqisrc_is_r5or6_single_row(softs, l))
18381e66f787SSean Bruno 				return PQI_STATUS_FAILURE;
18391e66f787SSean Bruno 
1840*7ea28254SJohn Hall 			if (!pqisrc_is_r5or6_single_column(softs, l))
18411e66f787SSean Bruno 				return PQI_STATUS_FAILURE;
18421e66f787SSean Bruno 
1843*7ea28254SJohn Hall 			pqisrc_set_r5or6_row_and_index(l, rcb);
1844*7ea28254SJohn Hall 		}
1845*7ea28254SJohn Hall 		break;
18461e66f787SSean Bruno 	}
18471e66f787SSean Bruno 
1848*7ea28254SJohn Hall 	if (l->map.idx >= RAID_MAP_MAX_ENTRIES) {
1849*7ea28254SJohn Hall 		DBG_INFO("AIO ineligible: index exceeds max map entries");
1850*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
18511e66f787SSean Bruno 	}
18521e66f787SSean Bruno 
1853*7ea28254SJohn Hall 	rcb->ioaccel_handle =
1854*7ea28254SJohn Hall 		l->raid_map->dev_data[l->map.idx].ioaccel_handle;
1855*7ea28254SJohn Hall 
1856*7ea28254SJohn Hall 	if (!pqisrc_calc_aio_block(l))
18571e66f787SSean Bruno 		return PQI_STATUS_FAILURE;
18581e66f787SSean Bruno 
1859*7ea28254SJohn Hall 	disk_blk_cnt = pqisrc_handle_blk_size_diffs(l);
1860*7ea28254SJohn Hall 
1861*7ea28254SJohn Hall 
1862*7ea28254SJohn Hall 	/* Set encryption flag if needed. */
1863*7ea28254SJohn Hall 	rcb->encrypt_enable = false;
1864*7ea28254SJohn Hall 	if (GET_LE16((uint8_t *)(&l->raid_map->flags)) &
1865*7ea28254SJohn Hall 		RAID_MAP_ENCRYPTION_ENABLED) {
1866*7ea28254SJohn Hall 		pqisrc_set_enc_info(&rcb->enc_info, l->raid_map,
1867*7ea28254SJohn Hall 			l->block.first);
1868*7ea28254SJohn Hall 		rcb->encrypt_enable = true;
1869*7ea28254SJohn Hall 	}
1870*7ea28254SJohn Hall 
1871*7ea28254SJohn Hall 	if (pqisrc_aio_req_too_big(softs, device, rcb, l, disk_blk_cnt))
1872*7ea28254SJohn Hall 		return PQI_STATUS_FAILURE;
1873*7ea28254SJohn Hall 
1874*7ea28254SJohn Hall 	/* set the cdb ptr to the local bypass cdb */
1875*7ea28254SJohn Hall 	rcb->cdbp = &rcb->bypass_cdb[0];
1876*7ea28254SJohn Hall 
18771e66f787SSean Bruno 	/* Build the new CDB for the physical disk I/O. */
1878*7ea28254SJohn Hall 	pqisrc_aio_build_cdb(l, disk_blk_cnt, rcb, rcb->cdbp);
18791e66f787SSean Bruno 
1880*7ea28254SJohn Hall 	pqisrc_aio_show_locator_info(softs, l, disk_blk_cnt, rcb);
18811e66f787SSean Bruno 
1882*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
18831e66f787SSean Bruno 
18841e66f787SSean Bruno 	return PQI_STATUS_SUCCESS;
18851e66f787SSean Bruno }
18861e66f787SSean Bruno 
18879fac68fcSPAPANI SRIKANTH /* Function used to submit an AIO TMF to the adapter
18889fac68fcSPAPANI SRIKANTH  * DEVICE_RESET is not supported.
18899fac68fcSPAPANI SRIKANTH  */
1890*7ea28254SJohn Hall 
18919fac68fcSPAPANI SRIKANTH static int
18929fac68fcSPAPANI SRIKANTH pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
18939fac68fcSPAPANI SRIKANTH                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
18941e66f787SSean Bruno {
18951e66f787SSean Bruno 	int rval = PQI_STATUS_SUCCESS;
18969fac68fcSPAPANI SRIKANTH 	pqi_aio_tmf_req_t tmf_req;
18979fac68fcSPAPANI SRIKANTH 	ib_queue_t *op_ib_q = NULL;
1898*7ea28254SJohn Hall 	boolean_t is_write;
18991e66f787SSean Bruno 
19009fac68fcSPAPANI SRIKANTH 	memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
19011e66f787SSean Bruno 
1902*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
19031e66f787SSean Bruno 
19049fac68fcSPAPANI SRIKANTH 	tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
19051e66f787SSean Bruno 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
19061e66f787SSean Bruno 	tmf_req.req_id = rcb->tag;
19079fac68fcSPAPANI SRIKANTH 	tmf_req.error_idx = rcb->tag;
19089fac68fcSPAPANI SRIKANTH 	tmf_req.nexus = devp->ioaccel_handle;
1909*7ea28254SJohn Hall 	/* memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); */
19101e66f787SSean Bruno 	tmf_req.tmf = tmf_type;
19111e66f787SSean Bruno 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
19129fac68fcSPAPANI SRIKANTH 	op_ib_q = &softs->op_aio_ib_q[0];
1913*7ea28254SJohn Hall 	is_write = pqisrc_cdb_is_write(rcb->cdbp);
1914*7ea28254SJohn Hall 
1915*7ea28254SJohn Hall 	uint64_t lun = rcb->cm_ccb->ccb_h.target_lun;
1916*7ea28254SJohn Hall 	if (lun && (rcb->dvp->is_multi_lun)) {
1917*7ea28254SJohn Hall 		int_to_scsilun(lun, tmf_req.lun);
1918*7ea28254SJohn Hall 	}
1919*7ea28254SJohn Hall 	else {
1920*7ea28254SJohn Hall 		memset(tmf_req.lun, 0, sizeof(tmf_req.lun));
1921*7ea28254SJohn Hall 	}
19229fac68fcSPAPANI SRIKANTH 
19239fac68fcSPAPANI SRIKANTH 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
19249fac68fcSPAPANI SRIKANTH 		tmf_req.req_id_to_manage = rcb_to_manage->tag;
19259fac68fcSPAPANI SRIKANTH 		tmf_req.nexus = rcb_to_manage->ioaccel_handle;
19269fac68fcSPAPANI SRIKANTH 	}
19279fac68fcSPAPANI SRIKANTH 
1928*7ea28254SJohn Hall 	if (devp->raid_level == SA_RAID_1 ||
1929*7ea28254SJohn Hall 	    devp->raid_level == SA_RAID_5 ||
1930*7ea28254SJohn Hall 	    devp->raid_level == SA_RAID_6) {
1931*7ea28254SJohn Hall 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK && is_write)
1932*7ea28254SJohn Hall 			tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT;
1933*7ea28254SJohn Hall 	}
19341e66f787SSean Bruno 
19354f77349dSWarner Losh 	DBG_WARN("aio tmf: iu_type=0x%x req_id_to_manage=0x%x\n",
19364f77349dSWarner Losh 		tmf_req.header.iu_type, tmf_req.req_id_to_manage);
1937*7ea28254SJohn Hall 	DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%u\n",
19384f77349dSWarner Losh 		tmf_req.req_id, tmf_req.nexus, tmf_req.tmf, op_ib_q->q_id);
19394f77349dSWarner Losh 
19404f77349dSWarner Losh 	rcb->path = AIO_PATH;
19411e66f787SSean Bruno 	rcb->req_pending = true;
19429fac68fcSPAPANI SRIKANTH 	/* Timedout tmf response goes here */
19439fac68fcSPAPANI SRIKANTH 	rcb->error_cmp_callback = pqisrc_process_aio_response_error;
19441e66f787SSean Bruno 
19459fac68fcSPAPANI SRIKANTH 	rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
19461e66f787SSean Bruno 	if (rval != PQI_STATUS_SUCCESS) {
19471e66f787SSean Bruno 		DBG_ERR("Unable to submit command rval=%d\n", rval);
19481e66f787SSean Bruno 		return rval;
19491e66f787SSean Bruno 	}
19501e66f787SSean Bruno 
19519fac68fcSPAPANI SRIKANTH 	rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
19521e66f787SSean Bruno 	if (rval != PQI_STATUS_SUCCESS){
19531e66f787SSean Bruno 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
19549fac68fcSPAPANI SRIKANTH 		rcb->status = rval;
19551e66f787SSean Bruno 	}
19561e66f787SSean Bruno 
1957*7ea28254SJohn Hall 	if (rcb->status  != PQI_STATUS_SUCCESS) {
19581e66f787SSean Bruno 		DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
19591e66f787SSean Bruno 				"stat:0x%x\n", tmf_type, rcb->status);
19601e66f787SSean Bruno 		rval = PQI_STATUS_FAILURE;
19611e66f787SSean Bruno 	}
19621e66f787SSean Bruno 
1963*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
19641e66f787SSean Bruno 	return rval;
19651e66f787SSean Bruno }
19669fac68fcSPAPANI SRIKANTH 
19679fac68fcSPAPANI SRIKANTH /* Function used to submit a Raid TMF to the adapter */
19689fac68fcSPAPANI SRIKANTH static int
19699fac68fcSPAPANI SRIKANTH pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
19709fac68fcSPAPANI SRIKANTH                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
19719fac68fcSPAPANI SRIKANTH {
19729fac68fcSPAPANI SRIKANTH 	int rval = PQI_STATUS_SUCCESS;
19739fac68fcSPAPANI SRIKANTH 	pqi_raid_tmf_req_t tmf_req;
19749fac68fcSPAPANI SRIKANTH 	ib_queue_t *op_ib_q = NULL;
19759fac68fcSPAPANI SRIKANTH 
19769fac68fcSPAPANI SRIKANTH 	memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
19779fac68fcSPAPANI SRIKANTH 
1978*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
19799fac68fcSPAPANI SRIKANTH 
19809fac68fcSPAPANI SRIKANTH 	tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
19819fac68fcSPAPANI SRIKANTH 	tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
19829fac68fcSPAPANI SRIKANTH 	tmf_req.req_id = rcb->tag;
19839fac68fcSPAPANI SRIKANTH 
19849fac68fcSPAPANI SRIKANTH 	memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
1985*7ea28254SJohn Hall 	tmf_req.ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun;
1986*7ea28254SJohn Hall 
19879fac68fcSPAPANI SRIKANTH 	tmf_req.tmf = tmf_type;
19889fac68fcSPAPANI SRIKANTH 	tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
19899fac68fcSPAPANI SRIKANTH 
19909fac68fcSPAPANI SRIKANTH 	/* Decide the queue where the tmf request should be submitted */
19919fac68fcSPAPANI SRIKANTH 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
19929fac68fcSPAPANI SRIKANTH 		tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid;
19939fac68fcSPAPANI SRIKANTH 		tmf_req.req_id_to_manage = rcb_to_manage->tag;
19949fac68fcSPAPANI SRIKANTH 	}
19959fac68fcSPAPANI SRIKANTH 
19969fac68fcSPAPANI SRIKANTH 	if (softs->timeout_in_tmf &&
19979fac68fcSPAPANI SRIKANTH 			tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
1998*7ea28254SJohn Hall 		/* OS_TMF_TIMEOUT_SEC - 1 to accomodate driver processing */
19999fac68fcSPAPANI SRIKANTH 		tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
20009fac68fcSPAPANI SRIKANTH 		/* if OS tmf timeout is 0, set minimum value for timeout */
20019fac68fcSPAPANI SRIKANTH 		if (!tmf_req.timeout_in_sec)
20029fac68fcSPAPANI SRIKANTH 			tmf_req.timeout_in_sec = 1;
20039fac68fcSPAPANI SRIKANTH 	}
20049fac68fcSPAPANI SRIKANTH 
20059fac68fcSPAPANI SRIKANTH 	op_ib_q = &softs->op_raid_ib_q[0];
2006*7ea28254SJohn Hall 
2007*7ea28254SJohn Hall 	DBG_WARN("raid tmf: iu_type=0x%x req_id_to_manage=%d\n",
2008*7ea28254SJohn Hall 		tmf_req.header.iu_type, tmf_req.req_id_to_manage);
2009*7ea28254SJohn Hall 
20104f77349dSWarner Losh 	rcb->path = RAID_PATH;
20119fac68fcSPAPANI SRIKANTH 	rcb->req_pending = true;
20129fac68fcSPAPANI SRIKANTH 	/* Timedout tmf response goes here */
20139fac68fcSPAPANI SRIKANTH 	rcb->error_cmp_callback = pqisrc_process_raid_response_error;
20149fac68fcSPAPANI SRIKANTH 
20159fac68fcSPAPANI SRIKANTH 	rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
20169fac68fcSPAPANI SRIKANTH 	if (rval != PQI_STATUS_SUCCESS) {
20179fac68fcSPAPANI SRIKANTH 		DBG_ERR("Unable to submit command rval=%d\n", rval);
20189fac68fcSPAPANI SRIKANTH 		return rval;
20199fac68fcSPAPANI SRIKANTH 	}
20209fac68fcSPAPANI SRIKANTH 
20219fac68fcSPAPANI SRIKANTH 	rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
20229fac68fcSPAPANI SRIKANTH 	if (rval != PQI_STATUS_SUCCESS) {
20239fac68fcSPAPANI SRIKANTH 		DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
20249fac68fcSPAPANI SRIKANTH 		rcb->status = rval;
20259fac68fcSPAPANI SRIKANTH 	}
20269fac68fcSPAPANI SRIKANTH 
2027*7ea28254SJohn Hall 	if (rcb->status  != PQI_STATUS_SUCCESS) {
20289fac68fcSPAPANI SRIKANTH 		DBG_NOTE("Task Management failed tmf_type:%d "
20299fac68fcSPAPANI SRIKANTH 				"stat:0x%x\n", tmf_type, rcb->status);
20309fac68fcSPAPANI SRIKANTH 		rval = PQI_STATUS_FAILURE;
20319fac68fcSPAPANI SRIKANTH 	}
20329fac68fcSPAPANI SRIKANTH 
2033*7ea28254SJohn Hall 	DBG_FUNC("OUT\n");
20349fac68fcSPAPANI SRIKANTH 	return rval;
20359fac68fcSPAPANI SRIKANTH }
20369fac68fcSPAPANI SRIKANTH 
2037*7ea28254SJohn Hall void
2038*7ea28254SJohn Hall dump_tmf_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg)
2039*7ea28254SJohn Hall {
2040*7ea28254SJohn Hall 	uint32_t qid = rcb->req_q ? rcb->req_q->q_id : -1;
2041*7ea28254SJohn Hall 
2042*7ea28254SJohn Hall 	DBG_INFO("%s: pending=%d path=%d tag=0x%x=%u qid=%u timeout=%ums\n",
2043*7ea28254SJohn Hall 		msg, rcb->req_pending, rcb->path, rcb->tag,
2044*7ea28254SJohn Hall 		rcb->tag, qid, (uint32_t)rcb->host_timeout_ms);
2045*7ea28254SJohn Hall }
2046*7ea28254SJohn Hall 
20479fac68fcSPAPANI SRIKANTH int
20489fac68fcSPAPANI SRIKANTH pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
20499fac68fcSPAPANI SRIKANTH                     rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
20509fac68fcSPAPANI SRIKANTH {
20519fac68fcSPAPANI SRIKANTH 	int ret = PQI_STATUS_SUCCESS;
20529fac68fcSPAPANI SRIKANTH 
2053*7ea28254SJohn Hall 	DBG_FUNC("IN\n");
2054*7ea28254SJohn Hall 
2055*7ea28254SJohn Hall 	DBG_WARN("sending TMF. io outstanding=%u\n",
2056*7ea28254SJohn Hall 		softs->max_outstanding_io - softs->taglist.num_elem);
2057*7ea28254SJohn Hall 
2058*7ea28254SJohn Hall 	rcb->is_abort_cmd_from_host = true;
20594f77349dSWarner Losh 	rcb->softs = softs;
20609fac68fcSPAPANI SRIKANTH 
2061*7ea28254SJohn Hall 	/* No target rcb for general purpose TMFs like LUN RESET */
2062*7ea28254SJohn Hall 	if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
2063*7ea28254SJohn Hall 	{
2064*7ea28254SJohn Hall 		rcb_to_manage->host_wants_to_abort_this = true;
2065*7ea28254SJohn Hall 		dump_tmf_details(softs, rcb_to_manage, "rcb_to_manage");
2066*7ea28254SJohn Hall 	}
2067*7ea28254SJohn Hall 
2068*7ea28254SJohn Hall 
2069*7ea28254SJohn Hall 	dump_tmf_details(softs, rcb, "rcb");
2070*7ea28254SJohn Hall 
20719fac68fcSPAPANI SRIKANTH 	if(!devp->is_physical_device) {
20729fac68fcSPAPANI SRIKANTH 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
20739fac68fcSPAPANI SRIKANTH 			if(rcb_to_manage->path == AIO_PATH) {
20749fac68fcSPAPANI SRIKANTH 				if(devp->offload_enabled)
20759fac68fcSPAPANI SRIKANTH 					ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
20769fac68fcSPAPANI SRIKANTH 			}
20779fac68fcSPAPANI SRIKANTH 			else {
20789fac68fcSPAPANI SRIKANTH 				DBG_INFO("TASK ABORT not supported in raid\n");
20799fac68fcSPAPANI SRIKANTH 				ret = PQI_STATUS_FAILURE;
20809fac68fcSPAPANI SRIKANTH 			}
20819fac68fcSPAPANI SRIKANTH 		}
20829fac68fcSPAPANI SRIKANTH 		else {
20839fac68fcSPAPANI SRIKANTH 			ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
20849fac68fcSPAPANI SRIKANTH 		}
20859fac68fcSPAPANI SRIKANTH 	} else {
20869fac68fcSPAPANI SRIKANTH 		if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
20879fac68fcSPAPANI SRIKANTH 			ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
20889fac68fcSPAPANI SRIKANTH 		else
20899fac68fcSPAPANI SRIKANTH 			ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
20909fac68fcSPAPANI SRIKANTH 	}
20919fac68fcSPAPANI SRIKANTH 
20929fac68fcSPAPANI SRIKANTH 	DBG_FUNC("OUT\n");
20939fac68fcSPAPANI SRIKANTH 
20949fac68fcSPAPANI SRIKANTH 	return ret;
20959fac68fcSPAPANI SRIKANTH }
20969fac68fcSPAPANI SRIKANTH 
2097*7ea28254SJohn Hall /* return index into the global (softs) counters based on raid level */
2098*7ea28254SJohn Hall static counter_types_t
2099*7ea28254SJohn Hall get_counter_index(rcb_t *rcb)
2100*7ea28254SJohn Hall {
2101*7ea28254SJohn Hall 	if (IS_AIO_PATH(rcb->dvp))
2102*7ea28254SJohn Hall 		return HBA_COUNTER;
2103*7ea28254SJohn Hall 
2104*7ea28254SJohn Hall 	switch (rcb->dvp->raid_level) {
2105*7ea28254SJohn Hall 		case SA_RAID_0:	return RAID0_COUNTER;
2106*7ea28254SJohn Hall 		case SA_RAID_1:
2107*7ea28254SJohn Hall 		case SA_RAID_ADM:	return RAID1_COUNTER;
2108*7ea28254SJohn Hall 		case SA_RAID_5:	return RAID5_COUNTER;
2109*7ea28254SJohn Hall 		case SA_RAID_6:	return RAID6_COUNTER;
2110*7ea28254SJohn Hall 		case SA_RAID_UNKNOWN:
2111*7ea28254SJohn Hall 		default:
2112*7ea28254SJohn Hall 		{
2113*7ea28254SJohn Hall 			static boolean_t asserted = false;
2114*7ea28254SJohn Hall 			if (!asserted)
2115*7ea28254SJohn Hall 			{
2116*7ea28254SJohn Hall 				asserted = true;
2117*7ea28254SJohn Hall 				ASSERT(rcb->path == RAID_PATH);
2118*7ea28254SJohn Hall 				ASSERT(0);
2119*7ea28254SJohn Hall 			}
2120*7ea28254SJohn Hall 			return UNKNOWN_COUNTER;
2121*7ea28254SJohn Hall 		}
2122*7ea28254SJohn Hall 	}
2123*7ea28254SJohn Hall }
2124*7ea28254SJohn Hall 
2125*7ea28254SJohn Hall /* return the counter type as ASCII-string */
2126*7ea28254SJohn Hall static char *
2127*7ea28254SJohn Hall counter_type_to_raid_ascii(counter_types_t type)
2128*7ea28254SJohn Hall {
2129*7ea28254SJohn Hall 	switch (type)
2130*7ea28254SJohn Hall 	{
2131*7ea28254SJohn Hall 		case UNKNOWN_COUNTER: return "Unknown";
2132*7ea28254SJohn Hall 		case HBA_COUNTER:		return "HbaPath";
2133*7ea28254SJohn Hall 		case RAID0_COUNTER:	return "Raid0";
2134*7ea28254SJohn Hall 		case RAID1_COUNTER:	return "Raid1";
2135*7ea28254SJohn Hall 		case RAID5_COUNTER:	return "Raid5";
2136*7ea28254SJohn Hall 		case RAID6_COUNTER:	return "Raid6";
2137*7ea28254SJohn Hall 		default:					return "Unsupported";
2138*7ea28254SJohn Hall 	}
2139*7ea28254SJohn Hall }
2140*7ea28254SJohn Hall 
21419fac68fcSPAPANI SRIKANTH /* return the path as ASCII-string */
21429fac68fcSPAPANI SRIKANTH char *
21439fac68fcSPAPANI SRIKANTH io_path_to_ascii(IO_PATH_T path)
21449fac68fcSPAPANI SRIKANTH {
21459fac68fcSPAPANI SRIKANTH 	switch (path)
21469fac68fcSPAPANI SRIKANTH 	{
21479fac68fcSPAPANI SRIKANTH 		case AIO_PATH:		return "Aio";
21489fac68fcSPAPANI SRIKANTH 		case RAID_PATH:	return "Raid";
21499fac68fcSPAPANI SRIKANTH 		default:				return "Unknown";
21509fac68fcSPAPANI SRIKANTH 	}
21519fac68fcSPAPANI SRIKANTH }
2152*7ea28254SJohn Hall 
2153*7ea28254SJohn Hall /* return the io type as ASCII-string */
2154*7ea28254SJohn Hall static char *
2155*7ea28254SJohn Hall io_type_to_ascii(io_type_t io_type)
2156*7ea28254SJohn Hall {
2157*7ea28254SJohn Hall 	switch (io_type)
2158*7ea28254SJohn Hall 	{
2159*7ea28254SJohn Hall 		case UNKNOWN_IO_TYPE:	return "Unknown";
2160*7ea28254SJohn Hall 		case READ_IO_TYPE:		return "Read";
2161*7ea28254SJohn Hall 		case WRITE_IO_TYPE:		return "Write";
2162*7ea28254SJohn Hall 		case NON_RW_IO_TYPE:		return "NonRW";
2163*7ea28254SJohn Hall 		default:						return "Unsupported";
2164*7ea28254SJohn Hall 	}
2165*7ea28254SJohn Hall }
2166*7ea28254SJohn Hall 
2167*7ea28254SJohn Hall 
2168*7ea28254SJohn Hall /* return the io type based on cdb */
2169*7ea28254SJohn Hall io_type_t
2170*7ea28254SJohn Hall get_io_type_from_cdb(uint8_t *cdb)
2171*7ea28254SJohn Hall {
2172*7ea28254SJohn Hall 	if (cdb == NULL)
2173*7ea28254SJohn Hall 		return UNKNOWN_IO_TYPE;
2174*7ea28254SJohn Hall 
2175*7ea28254SJohn Hall 	else if (pqisrc_cdb_is_read(cdb))
2176*7ea28254SJohn Hall 		return READ_IO_TYPE;
2177*7ea28254SJohn Hall 
2178*7ea28254SJohn Hall 	else if (pqisrc_cdb_is_write(cdb))
2179*7ea28254SJohn Hall 		return WRITE_IO_TYPE;
2180*7ea28254SJohn Hall 
2181*7ea28254SJohn Hall 	return NON_RW_IO_TYPE;
2182*7ea28254SJohn Hall }
2183*7ea28254SJohn Hall 
2184*7ea28254SJohn Hall /* increment this counter based on path and read/write */
2185*7ea28254SJohn Hall OS_ATOMIC64_T
2186*7ea28254SJohn Hall increment_this_counter(io_counters_t *pcounter, IO_PATH_T path, io_type_t io_type)
2187*7ea28254SJohn Hall {
2188*7ea28254SJohn Hall 	OS_ATOMIC64_T ret_val;
2189*7ea28254SJohn Hall 
2190*7ea28254SJohn Hall 	if (path == AIO_PATH)
2191*7ea28254SJohn Hall 	{
2192*7ea28254SJohn Hall 		if (io_type == READ_IO_TYPE)
2193*7ea28254SJohn Hall 			ret_val = OS_ATOMIC64_INC(&pcounter->aio_read_cnt);
2194*7ea28254SJohn Hall 		else if (io_type == WRITE_IO_TYPE)
2195*7ea28254SJohn Hall 			ret_val = OS_ATOMIC64_INC(&pcounter->aio_write_cnt);
2196*7ea28254SJohn Hall 		else
2197*7ea28254SJohn Hall 			ret_val = OS_ATOMIC64_INC(&pcounter->aio_non_read_write);
2198*7ea28254SJohn Hall 	}
2199*7ea28254SJohn Hall 	else
2200*7ea28254SJohn Hall 	{
2201*7ea28254SJohn Hall 		if (io_type == READ_IO_TYPE)
2202*7ea28254SJohn Hall 			ret_val = OS_ATOMIC64_INC(&pcounter->raid_read_cnt);
2203*7ea28254SJohn Hall 		else if (io_type == WRITE_IO_TYPE)
2204*7ea28254SJohn Hall 			ret_val = OS_ATOMIC64_INC(&pcounter->raid_write_cnt);
2205*7ea28254SJohn Hall 		else
2206*7ea28254SJohn Hall 			ret_val = OS_ATOMIC64_INC(&pcounter->raid_non_read_write);
2207*7ea28254SJohn Hall 	}
2208*7ea28254SJohn Hall 
2209*7ea28254SJohn Hall 	return ret_val;
2210*7ea28254SJohn Hall }
2211*7ea28254SJohn Hall 
2212*7ea28254SJohn Hall /* increment appropriate counter(s) anytime we post a new request */
2213*7ea28254SJohn Hall static void
2214*7ea28254SJohn Hall pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb)
2215*7ea28254SJohn Hall {
2216*7ea28254SJohn Hall 	io_type_t io_type = get_io_type_from_cdb(rcb->cdbp);
2217*7ea28254SJohn Hall 	counter_types_t type_index = get_counter_index(rcb);
2218*7ea28254SJohn Hall 	io_counters_t *pcounter = &softs->counters[type_index];
2219*7ea28254SJohn Hall 	OS_ATOMIC64_T ret_val;
2220*7ea28254SJohn Hall 
2221*7ea28254SJohn Hall 	ret_val = increment_this_counter(pcounter, rcb->path, io_type);
2222*7ea28254SJohn Hall 
2223*7ea28254SJohn Hall #if 1 /* leave this enabled while we gain confidence for each io path */
2224*7ea28254SJohn Hall 	if (ret_val == 1)
2225*7ea28254SJohn Hall 	{
2226*7ea28254SJohn Hall 		char *raid_type = counter_type_to_raid_ascii(type_index);
2227*7ea28254SJohn Hall 		char *path = io_path_to_ascii(rcb->path);
2228*7ea28254SJohn Hall 		char *io_ascii = io_type_to_ascii(io_type);
2229*7ea28254SJohn Hall 
2230*7ea28254SJohn Hall 		DBG_INFO("Got first path/type hit. "
2231*7ea28254SJohn Hall 			"Path=%s RaidType=%s IoType=%s\n",
2232*7ea28254SJohn Hall 			path, raid_type, io_ascii);
2233*7ea28254SJohn Hall 	}
2234*7ea28254SJohn Hall #endif
2235*7ea28254SJohn Hall 
2236*7ea28254SJohn Hall 	/* @todo future: may want to make a per-dev counter */
2237*7ea28254SJohn Hall }
2238*7ea28254SJohn Hall 
2239*7ea28254SJohn Hall /* public routine to print a particular counter with header msg */
2240*7ea28254SJohn Hall void
2241*7ea28254SJohn Hall print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg)
2242*7ea28254SJohn Hall {
2243*7ea28254SJohn Hall 	io_counters_t counter;
2244*7ea28254SJohn Hall 	uint32_t percent_reads;
2245*7ea28254SJohn Hall 	uint32_t percent_aio;
2246*7ea28254SJohn Hall 
2247*7ea28254SJohn Hall 	if (!softs->log_io_counters)
2248*7ea28254SJohn Hall 		return;
2249*7ea28254SJohn Hall 
2250*7ea28254SJohn Hall 	/* Use a cached copy so percentages are based on the data that is printed */
2251*7ea28254SJohn Hall 	memcpy(&counter, pcounter, sizeof(counter));
2252*7ea28254SJohn Hall 
2253*7ea28254SJohn Hall 	DBG_NOTE("Counter: %s (ptr=%p)\n", msg, pcounter);
2254*7ea28254SJohn Hall 
2255*7ea28254SJohn Hall 	percent_reads = CALC_PERCENT_VS(counter.aio_read_cnt + counter.raid_read_cnt,
2256*7ea28254SJohn Hall 											counter.aio_write_cnt + counter.raid_write_cnt);
2257*7ea28254SJohn Hall 
2258*7ea28254SJohn Hall 	percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt + counter.aio_write_cnt,
2259*7ea28254SJohn Hall 											counter.raid_read_cnt + counter.raid_write_cnt);
2260*7ea28254SJohn Hall 
2261*7ea28254SJohn Hall 	DBG_NOTE("   R/W Percentages: Reads=%3u%% AIO=%3u%%\n", percent_reads, percent_aio);
2262*7ea28254SJohn Hall 
2263*7ea28254SJohn Hall 	/* Print the Read counts */
2264*7ea28254SJohn Hall 	percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt, counter.raid_read_cnt);
2265*7ea28254SJohn Hall 	DBG_NOTE("   Reads : AIO=%8u(%3u%%) RAID=%8u\n",
2266*7ea28254SJohn Hall 		(uint32_t)counter.aio_read_cnt, percent_aio, (uint32_t)counter.raid_read_cnt);
2267*7ea28254SJohn Hall 
2268*7ea28254SJohn Hall 	/* Print the Write counts */
2269*7ea28254SJohn Hall 	percent_aio = CALC_PERCENT_VS(counter.aio_write_cnt, counter.raid_write_cnt);
2270*7ea28254SJohn Hall 	DBG_NOTE("   Writes: AIO=%8u(%3u%%) RAID=%8u\n",
2271*7ea28254SJohn Hall 		(uint32_t)counter.aio_write_cnt, percent_aio, (uint32_t)counter.raid_write_cnt);
2272*7ea28254SJohn Hall 
2273*7ea28254SJohn Hall 	/* Print the Non-Rw counts */
2274*7ea28254SJohn Hall 	percent_aio = CALC_PERCENT_VS(counter.aio_non_read_write, counter.raid_non_read_write);
2275*7ea28254SJohn Hall 	DBG_NOTE("   Non-RW: AIO=%8u(%3u%%) RAID=%8u\n",
2276*7ea28254SJohn Hall 		(uint32_t)counter.aio_non_read_write, percent_aio, (uint32_t)counter.raid_non_read_write);
2277*7ea28254SJohn Hall }
2278*7ea28254SJohn Hall 
2279*7ea28254SJohn Hall /* return true if buffer is all zeroes */
2280*7ea28254SJohn Hall boolean_t
2281*7ea28254SJohn Hall is_buffer_zero(void *buffer, uint32_t size)
2282*7ea28254SJohn Hall {
2283*7ea28254SJohn Hall 	char *buf = buffer;
2284*7ea28254SJohn Hall 	DWORD ii;
2285*7ea28254SJohn Hall 
2286*7ea28254SJohn Hall 	if (buffer == NULL || size == 0)
2287*7ea28254SJohn Hall 		return false;
2288*7ea28254SJohn Hall 
2289*7ea28254SJohn Hall 	for (ii = 0; ii < size; ii++)
2290*7ea28254SJohn Hall 	{
2291*7ea28254SJohn Hall 		if (buf[ii] != 0x00)
2292*7ea28254SJohn Hall 			return false;
2293*7ea28254SJohn Hall 	}
2294*7ea28254SJohn Hall 	return true;
2295*7ea28254SJohn Hall }
2296*7ea28254SJohn Hall 
2297*7ea28254SJohn Hall /* public routine to print a all global counter types */
2298*7ea28254SJohn Hall void
2299*7ea28254SJohn Hall print_all_counters(pqisrc_softstate_t *softs, uint32_t flags)
2300*7ea28254SJohn Hall {
2301*7ea28254SJohn Hall 	int ii;
2302*7ea28254SJohn Hall 	io_counters_t *pcounter;
2303*7ea28254SJohn Hall 	char *raid_type;
2304*7ea28254SJohn Hall 
2305*7ea28254SJohn Hall 	for (ii = 0; ii < MAX_IO_COUNTER; ii++)
2306*7ea28254SJohn Hall 	{
2307*7ea28254SJohn Hall 		pcounter = &softs->counters[ii];
2308*7ea28254SJohn Hall 		raid_type = counter_type_to_raid_ascii(ii);
2309*7ea28254SJohn Hall 
2310*7ea28254SJohn Hall 		if ((flags & COUNTER_FLAG_ONLY_NON_ZERO) &&
2311*7ea28254SJohn Hall 			is_buffer_zero(pcounter, sizeof(*pcounter)))
2312*7ea28254SJohn Hall 		{
2313*7ea28254SJohn Hall 			continue;
2314*7ea28254SJohn Hall 		}
2315*7ea28254SJohn Hall 
2316*7ea28254SJohn Hall 		print_this_counter(softs, pcounter, raid_type);
2317*7ea28254SJohn Hall 	}
2318*7ea28254SJohn Hall 
2319*7ea28254SJohn Hall 	if (flags & COUNTER_FLAG_CLEAR_COUNTS)
2320*7ea28254SJohn Hall 	{
2321*7ea28254SJohn Hall 		DBG_NOTE("Clearing all counters\n");
2322*7ea28254SJohn Hall 		memset(softs->counters, 0, sizeof(softs->counters));
2323*7ea28254SJohn Hall 	}
2324*7ea28254SJohn Hall }
2325