xref: /illumos-gate/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c (revision c193478586214940af708897e19c9a878b6a6223)
1 /*
2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3  * i.e. Thunderbolt and Invader
4  *
5  * Solaris MegaRAID device driver for SAS2.0 controllers
6  * Copyright (c) 2008-2012, LSI Logic Corporation.
7  * All rights reserved.
8  *
9  * Version:
10  * Author:
11  *		Swaminathan K S
12  *		Arun Chandrashekhar
13  *		Manju R
14  *		Rasheed
15  *		Shakeel Bukhari
16  */
17 
18 /*
19  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
20  * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
21  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
22  */
23 
24 
25 #include <sys/types.h>
26 #include <sys/file.h>
27 #include <sys/atomic.h>
28 #include <sys/scsi/scsi.h>
29 #include <sys/byteorder.h>
30 #include <sys/sdt.h>
31 #include "ld_pd_map.h"
32 #include "mr_sas.h"
33 #include "fusion.h"
34 
35 /*
36  * FMA header files
37  */
38 #include <sys/ddifm.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
42 
43 
44 /* Pre-TB command size and TB command size. */
45 #define	MR_COMMAND_SIZE (64*20)	/* 1280 bytes */
46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
52 extern struct ddi_device_acc_attr endian_attr;
53 extern int	debug_level_g;
54 extern unsigned int	enable_fp;
55 volatile int dump_io_wait_time = 90;
56 extern volatile int  debug_timeout_g;
57 extern int	mrsas_issue_pending_cmds(struct mrsas_instance *);
58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
59 extern void	push_pending_mfi_pkt(struct mrsas_instance *,
60 			struct mrsas_cmd *);
61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
62 	    MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
63 
64 /* Local static prototypes. */
65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
66     struct scsi_address *, struct scsi_pkt *, uchar_t *);
67 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
68     U64 start_blk, U32 num_blocks);
69 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
71 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
72 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
73 #ifdef PDSUPPORT
74 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
75     struct mrsas_tbolt_pd_info *, int);
76 #endif /* PDSUPPORT */
77 
78 static int debug_tbolt_fw_faults_after_ocr_g = 0;
79 
80 /*
81  * destroy_mfi_mpi_frame_pool
82  */
83 void
84 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
85 {
86 	int	i;
87 
88 	struct mrsas_cmd	*cmd;
89 
90 	/* return all mfi frames to pool */
91 	for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
92 		cmd = instance->cmd_list[i];
93 		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
94 			(void) mrsas_free_dma_obj(instance,
95 			    cmd->frame_dma_obj);
96 		}
97 		cmd->frame_dma_obj_status = DMA_OBJ_FREED;
98 	}
99 }
100 
101 /*
102  * destroy_mpi2_frame_pool
103  */
104 void
105 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
106 {
107 
108 	if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
109 		(void) mrsas_free_dma_obj(instance,
110 		    instance->mpi2_frame_pool_dma_obj);
111 		instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
112 	}
113 }
114 
115 
116 /*
117  * mrsas_tbolt_free_additional_dma_buffer
118  */
119 void
120 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
121 {
122 	int i;
123 
124 	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
125 		(void) mrsas_free_dma_obj(instance,
126 		    instance->mfi_internal_dma_obj);
127 		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
128 	}
129 	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
130 		(void) mrsas_free_dma_obj(instance,
131 		    instance->mfi_evt_detail_obj);
132 		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
133 	}
134 
135 	for (i = 0; i < 2; i++) {
136 		if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
137 			(void) mrsas_free_dma_obj(instance,
138 			    instance->ld_map_obj[i]);
139 			instance->ld_map_obj[i].status = DMA_OBJ_FREED;
140 		}
141 	}
142 }
143 
144 
145 /*
146  * free_req_desc_pool
147  */
148 void
149 free_req_rep_desc_pool(struct mrsas_instance *instance)
150 {
151 	if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
152 		(void) mrsas_free_dma_obj(instance,
153 		    instance->request_desc_dma_obj);
154 		instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
155 	}
156 
157 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
158 		(void) mrsas_free_dma_obj(instance,
159 		    instance->reply_desc_dma_obj);
160 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
161 	}
162 
163 
164 }
165 
166 
167 /*
168  * ThunderBolt(TB) Request Message Frame Pool
169  */
170 int
171 create_mpi2_frame_pool(struct mrsas_instance *instance)
172 {
173 	int		i = 0;
174 	uint16_t	max_cmd;
175 	uint32_t	sgl_sz;
176 	uint32_t	raid_msg_size;
177 	uint32_t	total_size;
178 	uint32_t	offset;
179 	uint32_t	io_req_base_phys;
180 	uint8_t		*io_req_base;
181 	struct mrsas_cmd	*cmd;
182 
183 	max_cmd = instance->max_fw_cmds;
184 
185 	sgl_sz		= 1024;
186 	raid_msg_size	= MRSAS_THUNDERBOLT_MSG_SIZE;
187 
188 	/* Allocating additional 256 bytes to accomodate SMID 0. */
189 	total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
190 	    (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
191 
192 	con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
193 	    "max_cmd %x", max_cmd));
194 
195 	con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
196 	    "request message frame pool size %x", total_size));
197 
198 	/*
199 	 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
200 	 * and then split the memory to 1024 commands. Each command should be
201 	 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
202 	 * within it. Further refer the "alloc_req_rep_desc" function where
203 	 * we allocate request/reply descriptors queues for a clue.
204 	 */
205 
206 	instance->mpi2_frame_pool_dma_obj.size = total_size;
207 	instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
208 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
209 	    0xFFFFFFFFU;
210 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
211 	    0xFFFFFFFFU;
212 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
213 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
214 
215 	if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
216 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
217 		dev_err(instance->dip, CE_WARN,
218 		    "could not alloc mpi2 frame pool");
219 		return (DDI_FAILURE);
220 	}
221 
222 	bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
223 	instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
224 
225 	instance->io_request_frames =
226 	    (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
227 	instance->io_request_frames_phy =
228 	    (uint32_t)
229 	    instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
230 
231 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
232 	    (void *)instance->io_request_frames));
233 
234 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
235 	    instance->io_request_frames_phy));
236 
237 	io_req_base = (uint8_t *)instance->io_request_frames +
238 	    MRSAS_THUNDERBOLT_MSG_SIZE;
239 	io_req_base_phys = instance->io_request_frames_phy +
240 	    MRSAS_THUNDERBOLT_MSG_SIZE;
241 
242 	con_log(CL_DLEVEL3, (CE_NOTE,
243 	    "io req_base_phys 0x%x", io_req_base_phys));
244 
245 	for (i = 0; i < max_cmd; i++) {
246 		cmd = instance->cmd_list[i];
247 
248 		offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
249 
250 		cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
251 		    ((uint8_t *)io_req_base + offset);
252 		cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
253 
254 		cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
255 		    (max_cmd * raid_msg_size) + i * sgl_sz);
256 
257 		cmd->sgl_phys_addr = (io_req_base_phys +
258 		    (max_cmd * raid_msg_size) + i * sgl_sz);
259 
260 		cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
261 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
262 		    (i * SENSE_LENGTH));
263 
264 		cmd->sense_phys_addr1 = (io_req_base_phys +
265 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
266 		    (i * SENSE_LENGTH));
267 
268 
269 		cmd->SMID = i + 1;
270 
271 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
272 		    cmd->index, (void *)cmd->scsi_io_request));
273 
274 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
275 		    cmd->index, cmd->scsi_io_request_phys_addr));
276 
277 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
278 		    cmd->index, (void *)cmd->sense1));
279 
280 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
281 		    cmd->index, cmd->sense_phys_addr1));
282 
283 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
284 		    cmd->index, (void *)cmd->sgl));
285 
286 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
287 		    cmd->index, cmd->sgl_phys_addr));
288 	}
289 
290 	return (DDI_SUCCESS);
291 
292 }
293 
294 
295 /*
296  * alloc_additional_dma_buffer for AEN
297  */
298 int
299 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
300 {
301 	uint32_t	internal_buf_size = PAGESIZE*2;
302 	int i;
303 
304 	/* Initialize buffer status as free */
305 	instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
306 	instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
307 	instance->ld_map_obj[0].status = DMA_OBJ_FREED;
308 	instance->ld_map_obj[1].status = DMA_OBJ_FREED;
309 
310 
311 	instance->mfi_internal_dma_obj.size = internal_buf_size;
312 	instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
313 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
314 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
315 	    0xFFFFFFFFU;
316 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
317 
318 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
319 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
320 		dev_err(instance->dip, CE_WARN,
321 		    "could not alloc reply queue");
322 		return (DDI_FAILURE);
323 	}
324 
325 	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
326 
327 	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
328 	instance->internal_buf =
329 	    (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
330 	instance->internal_buf_dmac_add =
331 	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
332 	instance->internal_buf_size = internal_buf_size;
333 
334 	/* allocate evt_detail */
335 	instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
336 	instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
337 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
338 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
339 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
340 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
341 
342 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
343 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
344 		dev_err(instance->dip, CE_WARN,
345 		    "mrsas_tbolt_alloc_additional_dma_buffer: "
346 		    "could not allocate data transfer buffer.");
347 		goto fail_tbolt_additional_buff;
348 	}
349 
350 	bzero(instance->mfi_evt_detail_obj.buffer,
351 	    sizeof (struct mrsas_evt_detail));
352 
353 	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
354 
355 	instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
356 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
357 
358 	for (i = 0; i < 2; i++) {
359 		/* allocate the data transfer buffer */
360 		instance->ld_map_obj[i].size = instance->size_map_info;
361 		instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
362 		instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
363 		instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
364 		    0xFFFFFFFFU;
365 		instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
366 		instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
367 
368 		if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
369 		    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
370 			dev_err(instance->dip, CE_WARN,
371 			    "could not allocate data transfer buffer.");
372 			goto fail_tbolt_additional_buff;
373 		}
374 
375 		instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
376 
377 		bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
378 
379 		instance->ld_map[i] =
380 		    (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
381 		instance->ld_map_phy[i] = (uint32_t)instance->
382 		    ld_map_obj[i].dma_cookie[0].dmac_address;
383 
384 		con_log(CL_DLEVEL3, (CE_NOTE,
385 		    "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
386 
387 		con_log(CL_DLEVEL3, (CE_NOTE,
388 		    "size_map_info 0x%x", instance->size_map_info));
389 	}
390 
391 	return (DDI_SUCCESS);
392 
393 fail_tbolt_additional_buff:
394 	mrsas_tbolt_free_additional_dma_buffer(instance);
395 
396 	return (DDI_FAILURE);
397 }
398 
399 MRSAS_REQUEST_DESCRIPTOR_UNION *
400 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
401 {
402 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
403 
404 	if (index > instance->max_fw_cmds) {
405 		con_log(CL_ANN1, (CE_NOTE,
406 		    "Invalid SMID 0x%x request for descriptor", index));
407 		con_log(CL_ANN1, (CE_NOTE,
408 		    "max_fw_cmds : 0x%x", instance->max_fw_cmds));
409 		return (NULL);
410 	}
411 
412 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
413 	    ((char *)instance->request_message_pool +
414 	    (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
415 
416 	con_log(CL_ANN1, (CE_NOTE,
417 	    "request descriptor : 0x%08lx", (unsigned long)req_desc));
418 
419 	con_log(CL_ANN1, (CE_NOTE,
420 	    "request descriptor base phy : 0x%08lx",
421 	    (unsigned long)instance->request_message_pool_phy));
422 
423 	return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
424 }
425 
426 
427 /*
428  * Allocate Request and Reply  Queue Descriptors.
429  */
430 int
431 alloc_req_rep_desc(struct mrsas_instance *instance)
432 {
433 	uint32_t	request_q_sz, reply_q_sz;
434 	int		i, max_reply_q_sz;
435 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
436 
437 	/*
438 	 * ThunderBolt(TB) There's no longer producer consumer mechanism.
439 	 * Once we have an interrupt we are supposed to scan through the list of
440 	 * reply descriptors and process them accordingly. We would be needing
441 	 * to allocate memory for 1024 reply descriptors
442 	 */
443 
444 	/* Allocate Reply Descriptors */
445 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
446 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
447 
448 	/* reply queue size should be multiple of 16 */
449 	max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
450 
451 	reply_q_sz = 8 * max_reply_q_sz;
452 
453 
454 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
455 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
456 
457 	instance->reply_desc_dma_obj.size = reply_q_sz;
458 	instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
459 	instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
460 	instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
461 	instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
462 	instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
463 
464 	if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
465 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
466 		dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
467 		return (DDI_FAILURE);
468 	}
469 
470 	bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
471 	instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
472 
473 	/* virtual address of  reply queue */
474 	instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
475 	    instance->reply_desc_dma_obj.buffer);
476 
477 	instance->reply_q_depth = max_reply_q_sz;
478 
479 	con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
480 	    instance->reply_q_depth));
481 
482 	con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
483 	    (void *)instance->reply_frame_pool));
484 
485 	/* initializing reply address to 0xFFFFFFFF */
486 	reply_desc = instance->reply_frame_pool;
487 
488 	for (i = 0; i < instance->reply_q_depth; i++) {
489 		reply_desc->Words = (uint64_t)~0;
490 		reply_desc++;
491 	}
492 
493 
494 	instance->reply_frame_pool_phy =
495 	    (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
496 
497 	con_log(CL_ANN1, (CE_NOTE,
498 	    "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
499 
500 
501 	instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
502 	    reply_q_sz);
503 
504 	con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
505 	    instance->reply_pool_limit_phy));
506 
507 
508 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
509 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
510 
511 	/* Allocate Request Descriptors */
512 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
513 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
514 
515 	request_q_sz = 8 *
516 	    (instance->max_fw_cmds);
517 
518 	instance->request_desc_dma_obj.size = request_q_sz;
519 	instance->request_desc_dma_obj.dma_attr	= mrsas_generic_dma_attr;
520 	instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
521 	instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
522 	    0xFFFFFFFFU;
523 	instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen	= 1;
524 	instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
525 
526 	if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
527 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
528 		dev_err(instance->dip, CE_WARN,
529 		    "could not alloc request queue desc");
530 		goto fail_undo_reply_queue;
531 	}
532 
533 	bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
534 	instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
535 
536 	/* virtual address of  request queue desc */
537 	instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
538 	    (instance->request_desc_dma_obj.buffer);
539 
540 	instance->request_message_pool_phy =
541 	    (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
542 
543 	return (DDI_SUCCESS);
544 
545 fail_undo_reply_queue:
546 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
547 		(void) mrsas_free_dma_obj(instance,
548 		    instance->reply_desc_dma_obj);
549 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
550 	}
551 
552 	return (DDI_FAILURE);
553 }
554 
555 /*
556  * mrsas_alloc_cmd_pool_tbolt
557  *
558  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
559  * routine
560  */
561 int
562 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
563 {
564 	int		i;
565 	int		count;
566 	uint32_t	max_cmd;
567 	uint32_t	reserve_cmd;
568 	size_t		sz;
569 
570 	struct mrsas_cmd	*cmd;
571 
572 	max_cmd = instance->max_fw_cmds;
573 	con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
574 	    "max_cmd %x", max_cmd));
575 
576 
577 	sz = sizeof (struct mrsas_cmd *) * max_cmd;
578 
579 	/*
580 	 * instance->cmd_list is an array of struct mrsas_cmd pointers.
581 	 * Allocate the dynamic array first and then allocate individual
582 	 * commands.
583 	 */
584 	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
585 
586 	/* create a frame pool and assign one frame to each cmd */
587 	for (count = 0; count < max_cmd; count++) {
588 		instance->cmd_list[count] =
589 		    kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
590 	}
591 
592 	/* add all the commands to command pool */
593 
594 	INIT_LIST_HEAD(&instance->cmd_pool_list);
595 	INIT_LIST_HEAD(&instance->cmd_pend_list);
596 	INIT_LIST_HEAD(&instance->cmd_app_pool_list);
597 
598 	reserve_cmd = MRSAS_APP_RESERVED_CMDS;
599 
600 	/* cmd index 0 reservered for IOC INIT */
601 	for (i = 1; i < reserve_cmd; i++) {
602 		cmd		= instance->cmd_list[i];
603 		cmd->index	= i;
604 		mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
605 	}
606 
607 
608 	for (i = reserve_cmd; i < max_cmd; i++) {
609 		cmd		= instance->cmd_list[i];
610 		cmd->index	= i;
611 		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
612 	}
613 
614 	return (DDI_SUCCESS);
615 
616 mrsas_undo_cmds:
617 	if (count > 0) {
618 		/* free each cmd */
619 		for (i = 0; i < count; i++) {
620 			if (instance->cmd_list[i] != NULL) {
621 				kmem_free(instance->cmd_list[i],
622 				    sizeof (struct mrsas_cmd));
623 			}
624 			instance->cmd_list[i] = NULL;
625 		}
626 	}
627 
628 mrsas_undo_cmd_list:
629 	if (instance->cmd_list != NULL)
630 		kmem_free(instance->cmd_list, sz);
631 	instance->cmd_list = NULL;
632 
633 	return (DDI_FAILURE);
634 }
635 
636 
637 /*
638  * free_space_for_mpi2
639  */
640 void
641 free_space_for_mpi2(struct mrsas_instance *instance)
642 {
643 	/* already freed */
644 	if (instance->cmd_list == NULL) {
645 		return;
646 	}
647 
648 	/* First free the additional DMA buffer */
649 	mrsas_tbolt_free_additional_dma_buffer(instance);
650 
651 	/* Free the request/reply descriptor pool */
652 	free_req_rep_desc_pool(instance);
653 
654 	/*  Free the MPI message pool */
655 	destroy_mpi2_frame_pool(instance);
656 
657 	/* Free the MFI frame pool */
658 	destroy_mfi_frame_pool(instance);
659 
660 	/* Free all the commands in the cmd_list */
661 	/* Free the cmd_list buffer itself */
662 	mrsas_free_cmd_pool(instance);
663 }
664 
665 
666 /*
667  * ThunderBolt(TB) memory allocations for commands/messages/frames.
668  */
669 int
670 alloc_space_for_mpi2(struct mrsas_instance *instance)
671 {
672 	/* Allocate command pool (memory for cmd_list & individual commands) */
673 	if (mrsas_alloc_cmd_pool_tbolt(instance)) {
674 		dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
675 		return (DDI_FAILURE);
676 	}
677 
678 	/* Initialize single reply size and Message size */
679 	instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
680 	instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
681 
682 	instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
683 	    (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
684 	    sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
685 	instance->max_sge_in_chain = (MR_COMMAND_SIZE -
686 	    MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
687 
688 	/* Reduce SG count by 1 to take care of group cmds feature in FW */
689 	instance->max_num_sge = (instance->max_sge_in_main_msg +
690 	    instance->max_sge_in_chain - 2);
691 	instance->chain_offset_mpt_msg =
692 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
693 	instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
694 	    sizeof (MPI2_SGE_IO_UNION)) / 16;
695 	instance->reply_read_index = 0;
696 
697 
698 	/* Allocate Request and Reply descriptors Array */
699 	/* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
700 	if (alloc_req_rep_desc(instance)) {
701 		dev_err(instance->dip, CE_WARN,
702 		    "Error, allocating memory for descripter-pool");
703 		goto mpi2_undo_cmd_pool;
704 	}
705 	con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
706 	    instance->request_message_pool_phy));
707 
708 
709 	/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
710 	if (create_mfi_frame_pool(instance)) {
711 		dev_err(instance->dip, CE_WARN,
712 		    "Error, allocating memory for MFI frame-pool");
713 		goto mpi2_undo_descripter_pool;
714 	}
715 
716 
717 	/* Allocate MPI2 Message pool */
718 	/*
719 	 * Make sure the buffer is alligned to 256 for raid message packet
720 	 * create a io request pool and assign one frame to each cmd
721 	 */
722 
723 	if (create_mpi2_frame_pool(instance)) {
724 		dev_err(instance->dip, CE_WARN,
725 		    "Error, allocating memory for MPI2 Message-pool");
726 		goto mpi2_undo_mfi_frame_pool;
727 	}
728 
729 #ifdef DEBUG
730 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
731 	    instance->max_sge_in_main_msg));
732 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
733 	    instance->max_sge_in_chain));
734 	con_log(CL_ANN1, (CE_CONT,
735 	    "[max_sge]0x%x", instance->max_num_sge));
736 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
737 	    instance->chain_offset_mpt_msg));
738 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
739 	    instance->chain_offset_io_req));
740 #endif
741 
742 
743 	/* Allocate additional dma buffer */
744 	if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
745 		dev_err(instance->dip, CE_WARN,
746 		    "Error, allocating tbolt additional DMA buffer");
747 		goto mpi2_undo_message_pool;
748 	}
749 
750 	return (DDI_SUCCESS);
751 
752 mpi2_undo_message_pool:
753 	destroy_mpi2_frame_pool(instance);
754 
755 mpi2_undo_mfi_frame_pool:
756 	destroy_mfi_frame_pool(instance);
757 
758 mpi2_undo_descripter_pool:
759 	free_req_rep_desc_pool(instance);
760 
761 mpi2_undo_cmd_pool:
762 	mrsas_free_cmd_pool(instance);
763 
764 	return (DDI_FAILURE);
765 }
766 
767 
768 /*
769  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
770  */
771 int
772 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
773 {
774 
775 	/*
776 	 * Reduce the max supported cmds by 1. This is to ensure that the
777 	 * reply_q_sz (1 more than the max cmd that driver may send)
778 	 * does not exceed max cmds that the FW can support
779 	 */
780 
781 	if (instance->max_fw_cmds > 1008) {
782 		instance->max_fw_cmds = 1008;
783 		instance->max_fw_cmds = instance->max_fw_cmds-1;
784 	}
785 
786 	con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
787 	    "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
788 
789 
790 	/* create a pool of commands */
791 	if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
792 		dev_err(instance->dip, CE_WARN,
793 		    "alloc_space_for_mpi2() failed.");
794 
795 		return (DDI_FAILURE);
796 	}
797 
798 	/* Send ioc init message */
799 	/* NOTE: the issue_init call does FMA checking already. */
800 	if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
801 		dev_err(instance->dip, CE_WARN,
802 		    "mrsas_issue_init_mpi2() failed.");
803 
804 		goto fail_init_fusion;
805 	}
806 
807 	instance->unroll.alloc_space_mpi2 = 1;
808 
809 	con_log(CL_ANN, (CE_NOTE,
810 	    "mrsas_init_adapter_tbolt: SUCCESSFUL"));
811 
812 	return (DDI_SUCCESS);
813 
814 fail_init_fusion:
815 	free_space_for_mpi2(instance);
816 
817 	return (DDI_FAILURE);
818 }
819 
820 
821 
822 /*
823  * init_mpi2
824  */
825 int
826 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
827 {
828 	dma_obj_t init2_dma_obj;
829 	int ret_val = DDI_SUCCESS;
830 
831 	/* allocate DMA buffer for IOC INIT message */
832 	init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
833 	init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
834 	init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
835 	init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
836 	init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
837 	init2_dma_obj.dma_attr.dma_attr_align = 256;
838 
839 	if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
840 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
841 		dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
842 		    "could not allocate data transfer buffer.");
843 		return (DDI_FAILURE);
844 	}
845 	(void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
846 
847 	con_log(CL_ANN1, (CE_NOTE,
848 	    "mrsas_issue_init_mpi2 _phys adr: %x",
849 	    init2_dma_obj.dma_cookie[0].dmac_address));
850 
851 
852 	/* Initialize and send ioc init message */
853 	ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
854 	if (ret_val == DDI_FAILURE) {
855 		con_log(CL_ANN1, (CE_WARN,
856 		    "mrsas_issue_init_mpi2: Failed"));
857 		goto fail_init_mpi2;
858 	}
859 
860 	/* free IOC init DMA buffer */
861 	if (mrsas_free_dma_obj(instance, init2_dma_obj)
862 	    != DDI_SUCCESS) {
863 		con_log(CL_ANN1, (CE_WARN,
864 		    "mrsas_issue_init_mpi2: Free Failed"));
865 		return (DDI_FAILURE);
866 	}
867 
868 	/* Get/Check and sync ld_map info */
869 	instance->map_id = 0;
870 	if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
871 		(void) mrsas_tbolt_sync_map_info(instance);
872 
873 
874 	/* No mrsas_cmd to send, so send NULL. */
875 	if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
876 		goto fail_init_mpi2;
877 
878 	con_log(CL_ANN, (CE_NOTE,
879 	    "mrsas_issue_init_mpi2: SUCCESSFUL"));
880 
881 	return (DDI_SUCCESS);
882 
883 fail_init_mpi2:
884 	(void) mrsas_free_dma_obj(instance, init2_dma_obj);
885 
886 	return (DDI_FAILURE);
887 }
888 
889 static int
890 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
891 {
892 	int				numbytes;
893 	uint16_t			flags;
894 	struct mrsas_init_frame2	*mfiFrameInit2;
895 	struct mrsas_header		*frame_hdr;
896 	Mpi2IOCInitRequest_t		*init;
897 	struct mrsas_cmd		*cmd = NULL;
898 	struct mrsas_drv_ver		drv_ver_info;
899 	MRSAS_REQUEST_DESCRIPTOR_UNION	req_desc;
900 	uint32_t			timeout;
901 
902 	con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
903 
904 
905 #ifdef DEBUG
906 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
907 	    (int)sizeof (*mfiFrameInit2)));
908 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
909 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
910 	    (int)sizeof (struct mrsas_init_frame2)));
911 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
912 	    (int)sizeof (Mpi2IOCInitRequest_t)));
913 #endif
914 
915 	init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
916 	numbytes = sizeof (*init);
917 	bzero(init, numbytes);
918 
919 	ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
920 	    MPI2_FUNCTION_IOC_INIT);
921 
922 	ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
923 	    MPI2_WHOINIT_HOST_DRIVER);
924 
925 	/* set MsgVersion and HeaderVersion host driver was built with */
926 	ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
927 	    MPI2_VERSION);
928 
929 	ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
930 	    MPI2_HEADER_VERSION);
931 
932 	ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
933 	    instance->raid_io_msg_size / 4);
934 
935 	ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
936 	    0);
937 
938 	ddi_put16(mpi2_dma_obj->acc_handle,
939 	    &init->ReplyDescriptorPostQueueDepth,
940 	    instance->reply_q_depth);
941 	/*
942 	 * These addresses are set using the DMA cookie addresses from when the
943 	 * memory was allocated.  Sense buffer hi address should be 0.
944 	 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
945 	 */
946 
947 	ddi_put32(mpi2_dma_obj->acc_handle,
948 	    &init->SenseBufferAddressHigh, 0);
949 
950 	ddi_put64(mpi2_dma_obj->acc_handle,
951 	    (uint64_t *)&init->SystemRequestFrameBaseAddress,
952 	    instance->io_request_frames_phy);
953 
954 	ddi_put64(mpi2_dma_obj->acc_handle,
955 	    &init->ReplyDescriptorPostQueueAddress,
956 	    instance->reply_frame_pool_phy);
957 
958 	ddi_put64(mpi2_dma_obj->acc_handle,
959 	    &init->ReplyFreeQueueAddress, 0);
960 
961 	cmd = instance->cmd_list[0];
962 	if (cmd == NULL) {
963 		return (DDI_FAILURE);
964 	}
965 	cmd->retry_count_for_ocr = 0;
966 	cmd->pkt = NULL;
967 	cmd->drv_pkt_time = 0;
968 
969 	mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
970 	con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
971 
972 	frame_hdr = &cmd->frame->hdr;
973 
974 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
975 	    MFI_CMD_STATUS_POLL_MODE);
976 
977 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
978 
979 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
980 
981 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
982 
983 	con_log(CL_ANN, (CE_CONT,
984 	    "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
985 
986 	/* Init the MFI Header */
987 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
988 	    &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
989 
990 	con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
991 
992 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
993 	    &mfiFrameInit2->cmd_status,
994 	    MFI_STAT_INVALID_STATUS);
995 
996 	con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
997 
998 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
999 	    &mfiFrameInit2->queue_info_new_phys_addr_lo,
1000 	    mpi2_dma_obj->dma_cookie[0].dmac_address);
1001 
1002 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1003 	    &mfiFrameInit2->data_xfer_len,
1004 	    sizeof (Mpi2IOCInitRequest_t));
1005 
1006 	con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1007 	    (int)init->ReplyDescriptorPostQueueAddress));
1008 
1009 	/* fill driver version information */
1010 	fill_up_drv_ver(&drv_ver_info);
1011 
1012 	/* allocate the driver version data transfer buffer */
1013 	instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1014 	instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1015 	instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1016 	instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1017 	instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1018 	instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1019 
1020 	if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1021 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1022 		dev_err(instance->dip, CE_WARN,
1023 		    "fusion init: Could not allocate driver version buffer.");
1024 		return (DDI_FAILURE);
1025 	}
1026 	/* copy driver version to dma buffer */
1027 	bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1028 	ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1029 	    (uint8_t *)drv_ver_info.drv_ver,
1030 	    (uint8_t *)instance->drv_ver_dma_obj.buffer,
1031 	    sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1032 
1033 	/* send driver version physical address to firmware */
1034 	ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1035 	    instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1036 
1037 	con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1038 	    mfiFrameInit2->queue_info_new_phys_addr_lo,
1039 	    (int)sizeof (Mpi2IOCInitRequest_t)));
1040 
1041 	con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1042 
1043 	con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1044 	    cmd->scsi_io_request_phys_addr,
1045 	    (int)sizeof (struct mrsas_init_frame2)));
1046 
1047 	/* disable interrupts before sending INIT2 frame */
1048 	instance->func_ptr->disable_intr(instance);
1049 
1050 	req_desc.Words = cmd->scsi_io_request_phys_addr;
1051 	req_desc.MFAIo.RequestFlags =
1052 	    (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1053 
1054 	cmd->request_desc = &req_desc;
1055 
1056 	/* issue the init frame */
1057 
1058 	mutex_enter(&instance->reg_write_mtx);
1059 	WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1060 	WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1061 	mutex_exit(&instance->reg_write_mtx);
1062 
1063 	con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1064 	con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1065 	    frame_hdr->cmd_status));
1066 
1067 	timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1068 	do {
1069 		if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1070 		    &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1071 			break;
1072 		delay(1);
1073 		timeout--;
1074 	} while (timeout > 0);
1075 
1076 	if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1077 	    &mfiFrameInit2->cmd_status) == 0) {
1078 		con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1079 	} else {
1080 		con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1081 		mrsas_dump_reply_desc(instance);
1082 		goto fail_ioc_init;
1083 	}
1084 
1085 	mrsas_dump_reply_desc(instance);
1086 
1087 	instance->unroll.verBuff = 1;
1088 
1089 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1090 
1091 	return (DDI_SUCCESS);
1092 
1093 
1094 fail_ioc_init:
1095 
1096 	(void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1097 
1098 	return (DDI_FAILURE);
1099 }
1100 
1101 int
1102 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1103 {
1104 	int i;
1105 	uint32_t wait_time = dump_io_wait_time;
1106 	for (i = 0; i < wait_time; i++) {
1107 		/*
1108 		 * Check For Outstanding poll Commands
1109 		 * except ldsync command and aen command
1110 		 */
1111 		if (instance->fw_outstanding <= 2) {
1112 			break;
1113 		}
1114 		drv_usecwait(10*MILLISEC);
1115 		/* complete commands from reply queue */
1116 		(void) mr_sas_tbolt_process_outstanding_cmd(instance);
1117 	}
1118 	if (instance->fw_outstanding > 2) {
1119 		return (1);
1120 	}
1121 	return (0);
1122 }
1123 /*
1124  * scsi_pkt handling
1125  *
1126  * Visible to the external world via the transport structure.
1127  */
1128 
1129 int
1130 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1131 {
1132 	struct mrsas_instance	*instance = ADDR2MR(ap);
1133 	struct scsa_cmd		*acmd = PKT2CMD(pkt);
1134 	struct mrsas_cmd	*cmd = NULL;
1135 	uchar_t			cmd_done = 0;
1136 
1137 	con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1138 	if (instance->deadadapter == 1) {
1139 		dev_err(instance->dip, CE_WARN,
1140 		    "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1141 		    "for IO, as the HBA doesnt take any more IOs");
1142 		if (pkt) {
1143 			pkt->pkt_reason		= CMD_DEV_GONE;
1144 			pkt->pkt_statistics	= STAT_DISCON;
1145 		}
1146 		return (TRAN_FATAL_ERROR);
1147 	}
1148 	if (instance->adapterresetinprogress) {
1149 		con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1150 		    "returning mfi_pkt and setting TRAN_BUSY\n"));
1151 		return (TRAN_BUSY);
1152 	}
1153 	(void) mrsas_tbolt_prepare_pkt(acmd);
1154 
1155 	cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1156 
1157 	/*
1158 	 * Check if the command is already completed by the mrsas_build_cmd()
1159 	 * routine. In which case the busy_flag would be clear and scb will be
1160 	 * NULL and appropriate reason provided in pkt_reason field
1161 	 */
1162 	if (cmd_done) {
1163 		pkt->pkt_reason = CMD_CMPLT;
1164 		pkt->pkt_scbp[0] = STATUS_GOOD;
1165 		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1166 		    | STATE_SENT_CMD;
1167 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1168 			(*pkt->pkt_comp)(pkt);
1169 		}
1170 
1171 		return (TRAN_ACCEPT);
1172 	}
1173 
1174 	if (cmd == NULL) {
1175 		return (TRAN_BUSY);
1176 	}
1177 
1178 
1179 	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1180 		if (instance->fw_outstanding > instance->max_fw_cmds) {
1181 			dev_err(instance->dip, CE_WARN,
1182 			    "Command Queue Full... Returning BUSY");
1183 			DTRACE_PROBE2(tbolt_start_tran_err,
1184 			    uint16_t, instance->fw_outstanding,
1185 			    uint16_t, instance->max_fw_cmds);
1186 			return_raid_msg_pkt(instance, cmd);
1187 			return (TRAN_BUSY);
1188 		}
1189 
1190 		/* Synchronize the Cmd frame for the controller */
1191 		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1192 		    DDI_DMA_SYNC_FORDEV);
1193 
1194 		con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1195 		    "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1196 		    cmd->index, cmd->SMID));
1197 
1198 		instance->func_ptr->issue_cmd(cmd, instance);
1199 	} else {
1200 		instance->func_ptr->issue_cmd(cmd, instance);
1201 		(void) wait_for_outstanding_poll_io(instance);
1202 		(void) mrsas_common_check(instance, cmd);
1203 		DTRACE_PROBE2(tbolt_start_nointr_done,
1204 		    uint8_t, cmd->frame->hdr.cmd,
1205 		    uint8_t, cmd->frame->hdr.cmd_status);
1206 	}
1207 
1208 	return (TRAN_ACCEPT);
1209 }
1210 
1211 /*
1212  * prepare the pkt:
1213  * the pkt may have been resubmitted or just reused so
1214  * initialize some fields and do some checks.
1215  */
1216 static int
1217 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1218 {
1219 	struct scsi_pkt	*pkt = CMD2PKT(acmd);
1220 
1221 
1222 	/*
1223 	 * Reinitialize some fields that need it; the packet may
1224 	 * have been resubmitted
1225 	 */
1226 	pkt->pkt_reason = CMD_CMPLT;
1227 	pkt->pkt_state = 0;
1228 	pkt->pkt_statistics = 0;
1229 	pkt->pkt_resid = 0;
1230 
1231 	/*
1232 	 * zero status byte.
1233 	 */
1234 	*(pkt->pkt_scbp) = 0;
1235 
1236 	return (0);
1237 }
1238 
1239 
1240 int
1241 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1242     struct scsa_cmd *acmd,
1243     struct mrsas_cmd *cmd,
1244     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1245     uint32_t *datalen)
1246 {
1247 	uint32_t		MaxSGEs;
1248 	int			sg_to_process;
1249 	uint32_t		i, j;
1250 	uint32_t		numElements, endElement;
1251 	Mpi25IeeeSgeChain64_t	*ieeeChainElement = NULL;
1252 	Mpi25IeeeSgeChain64_t	*scsi_raid_io_sgl_ieee = NULL;
1253 	ddi_acc_handle_t acc_handle =
1254 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1255 
1256 	con_log(CL_ANN1, (CE_NOTE,
1257 	    "chkpnt: Building Chained SGL :%d", __LINE__));
1258 
1259 	/* Calulate SGE size in number of Words(32bit) */
1260 	/* Clear the datalen before updating it. */
1261 	*datalen = 0;
1262 
1263 	MaxSGEs = instance->max_sge_in_main_msg;
1264 
1265 	ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1266 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1267 
1268 	/* set data transfer flag. */
1269 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1270 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1271 		    MPI2_SCSIIO_CONTROL_WRITE);
1272 	} else {
1273 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1274 		    MPI2_SCSIIO_CONTROL_READ);
1275 	}
1276 
1277 
1278 	numElements = acmd->cmd_cookiecnt;
1279 
1280 	con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1281 
1282 	if (numElements > instance->max_num_sge) {
1283 		con_log(CL_ANN, (CE_NOTE,
1284 		    "[Max SGE Count Exceeded]:%x", numElements));
1285 		return (numElements);
1286 	}
1287 
1288 	ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1289 	    (uint8_t)numElements);
1290 
1291 	/* set end element in main message frame */
1292 	endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1293 
1294 	/* prepare the scatter-gather list for the firmware */
1295 	scsi_raid_io_sgl_ieee =
1296 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1297 
1298 	if (instance->gen3) {
1299 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1300 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1301 
1302 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1303 	}
1304 
1305 	for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1306 		ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1307 		    acmd->cmd_dmacookies[i].dmac_laddress);
1308 
1309 		ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1310 		    acmd->cmd_dmacookies[i].dmac_size);
1311 
1312 		ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1313 
1314 		if (instance->gen3) {
1315 			if (i == (numElements - 1)) {
1316 				ddi_put8(acc_handle,
1317 				    &scsi_raid_io_sgl_ieee->Flags,
1318 				    IEEE_SGE_FLAGS_END_OF_LIST);
1319 			}
1320 		}
1321 
1322 		*datalen += acmd->cmd_dmacookies[i].dmac_size;
1323 
1324 #ifdef DEBUG
1325 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1326 		    scsi_raid_io_sgl_ieee->Address));
1327 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1328 		    scsi_raid_io_sgl_ieee->Length));
1329 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1330 		    scsi_raid_io_sgl_ieee->Flags));
1331 #endif
1332 
1333 	}
1334 
1335 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1336 
1337 	/* check if chained SGL required */
1338 	if (i < numElements) {
1339 
1340 		con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1341 
1342 		if (instance->gen3) {
1343 			uint16_t ioFlags =
1344 			    ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1345 
1346 			if ((ioFlags &
1347 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1348 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1349 				ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1350 				    (U8)instance->chain_offset_io_req);
1351 			} else {
1352 				ddi_put8(acc_handle,
1353 				    &scsi_raid_io->ChainOffset, 0);
1354 			}
1355 		} else {
1356 			ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1357 			    (U8)instance->chain_offset_io_req);
1358 		}
1359 
1360 		/* prepare physical chain element */
1361 		ieeeChainElement = scsi_raid_io_sgl_ieee;
1362 
1363 		ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1364 
1365 		if (instance->gen3) {
1366 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1367 			    IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1368 		} else {
1369 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1370 			    (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1371 			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1372 		}
1373 
1374 		ddi_put32(acc_handle, &ieeeChainElement->Length,
1375 		    (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1376 
1377 		ddi_put64(acc_handle, &ieeeChainElement->Address,
1378 		    (U64)cmd->sgl_phys_addr);
1379 
1380 		sg_to_process = numElements - i;
1381 
1382 		con_log(CL_ANN1, (CE_NOTE,
1383 		    "[Additional SGE Count]:%x", endElement));
1384 
1385 		/* point to the chained SGL buffer */
1386 		scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1387 
1388 		/* build rest of the SGL in chained buffer */
1389 		for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1390 			con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1391 
1392 			ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1393 			    acmd->cmd_dmacookies[i].dmac_laddress);
1394 
1395 			ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1396 			    acmd->cmd_dmacookies[i].dmac_size);
1397 
1398 			ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1399 
1400 			if (instance->gen3) {
1401 				if (i == (numElements - 1)) {
1402 					ddi_put8(acc_handle,
1403 					    &scsi_raid_io_sgl_ieee->Flags,
1404 					    IEEE_SGE_FLAGS_END_OF_LIST);
1405 				}
1406 			}
1407 
1408 			*datalen += acmd->cmd_dmacookies[i].dmac_size;
1409 
1410 #if DEBUG
1411 			con_log(CL_DLEVEL1, (CE_NOTE,
1412 			    "[SGL Address]: %" PRIx64,
1413 			    scsi_raid_io_sgl_ieee->Address));
1414 			con_log(CL_DLEVEL1, (CE_NOTE,
1415 			    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1416 			con_log(CL_DLEVEL1, (CE_NOTE,
1417 			    "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1418 #endif
1419 
1420 			i++;
1421 		}
1422 	}
1423 
1424 	return (0);
1425 } /*end of BuildScatterGather */
1426 
1427 
1428 /*
1429  * build_cmd
1430  */
1431 static struct mrsas_cmd *
1432 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1433     struct scsi_pkt *pkt, uchar_t *cmd_done)
1434 {
1435 	uint8_t		fp_possible = 0;
1436 	uint32_t	index;
1437 	uint32_t	lba_count = 0;
1438 	uint32_t	start_lba_hi = 0;
1439 	uint32_t	start_lba_lo = 0;
1440 	ddi_acc_handle_t acc_handle =
1441 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1442 	struct mrsas_cmd		*cmd = NULL;
1443 	struct scsa_cmd			*acmd = PKT2CMD(pkt);
1444 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
1445 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
1446 	uint32_t			datalen;
1447 	struct IO_REQUEST_INFO io_info;
1448 	MR_FW_RAID_MAP_ALL *local_map_ptr;
1449 	uint16_t pd_cmd_cdblen;
1450 
1451 	con_log(CL_DLEVEL1, (CE_NOTE,
1452 	    "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1453 
1454 	/* find out if this is logical or physical drive command.  */
1455 	acmd->islogical = MRDRV_IS_LOGICAL(ap);
1456 	acmd->device_id = MAP_DEVICE_ID(instance, ap);
1457 
1458 	*cmd_done = 0;
1459 
1460 	/* get the command packet */
1461 	if (!(cmd = get_raid_msg_pkt(instance))) {
1462 		DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1463 		    instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1464 		return (NULL);
1465 	}
1466 
1467 	index = cmd->index;
1468 	ReqDescUnion =	mr_sas_get_request_descriptor(instance, index);
1469 	ReqDescUnion->Words = 0;
1470 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1471 	ReqDescUnion->SCSIIO.RequestFlags =
1472 	    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1473 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1474 
1475 
1476 	cmd->request_desc = ReqDescUnion;
1477 	cmd->pkt = pkt;
1478 	cmd->cmd = acmd;
1479 
1480 	DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1481 	    ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1482 	    uint16_t, acmd->device_id);
1483 
1484 	/* lets get the command directions */
1485 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1486 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1487 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1488 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1489 			    DDI_DMA_SYNC_FORDEV);
1490 		}
1491 	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1492 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1493 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1494 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1495 			    DDI_DMA_SYNC_FORCPU);
1496 		}
1497 	} else {
1498 		con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1499 	}
1500 
1501 
1502 	/* get SCSI_IO raid message frame pointer */
1503 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1504 
1505 	/* zero out SCSI_IO raid message frame */
1506 	bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1507 
1508 	/* Set the ldTargetId set by BuildRaidContext() */
1509 	ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1510 	    acmd->device_id);
1511 
1512 	/*  Copy CDB to scsi_io_request message frame */
1513 	ddi_rep_put8(acc_handle,
1514 	    (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1515 	    acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516 
1517 	/*
1518 	 * Just the CDB length, rest of the Flags are zero
1519 	 * This will be modified later.
1520 	 */
1521 	ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1522 
1523 	pd_cmd_cdblen = acmd->cmd_cdblen;
1524 
1525 	if (acmd->islogical) {
1526 
1527 		switch (pkt->pkt_cdbp[0]) {
1528 		case SCMD_READ:
1529 		case SCMD_WRITE:
1530 		case SCMD_READ_G1:
1531 		case SCMD_WRITE_G1:
1532 		case SCMD_READ_G4:
1533 		case SCMD_WRITE_G4:
1534 		case SCMD_READ_G5:
1535 		case SCMD_WRITE_G5:
1536 
1537 			/* Initialize sense Information */
1538 			if (cmd->sense1 == NULL) {
1539 				con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1540 				    "Sense buffer ptr NULL "));
1541 			}
1542 			bzero(cmd->sense1, SENSE_LENGTH);
1543 			con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1544 			    "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1545 
1546 			if (acmd->cmd_cdblen == CDB_GROUP0) {
1547 				/* 6-byte cdb */
1548 				lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1549 				start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1550 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1551 				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1552 				    << 16));
1553 			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
1554 				/* 10-byte cdb */
1555 				lba_count =
1556 				    (((uint16_t)(pkt->pkt_cdbp[8])) |
1557 				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1558 
1559 				start_lba_lo =
1560 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1561 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1562 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1563 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1564 
1565 			} else if (acmd->cmd_cdblen == CDB_GROUP5) {
1566 				/* 12-byte cdb */
1567 				lba_count = (
1568 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1569 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1570 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1571 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1572 
1573 				start_lba_lo =
1574 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1575 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1576 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1577 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1578 
1579 			} else if (acmd->cmd_cdblen == CDB_GROUP4) {
1580 				/* 16-byte cdb */
1581 				lba_count = (
1582 				    ((uint32_t)(pkt->pkt_cdbp[13])) |
1583 				    ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1584 				    ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1585 				    ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1586 
1587 				start_lba_lo = (
1588 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1589 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1590 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1591 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1592 
1593 				start_lba_hi = (
1594 				    ((uint32_t)(pkt->pkt_cdbp[5])) |
1595 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1596 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1597 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1598 			}
1599 
1600 			if (instance->tbolt &&
1601 			    ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1602 				dev_err(instance->dip, CE_WARN,
1603 				    "IO SECTOR COUNT exceeds "
1604 				    "controller limit 0x%x sectors",
1605 				    lba_count);
1606 			}
1607 
1608 			bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1609 			io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1610 			    start_lba_lo;
1611 			io_info.numBlocks = lba_count;
1612 			io_info.ldTgtId = acmd->device_id;
1613 
1614 			if (acmd->cmd_flags & CFLAG_DMASEND)
1615 				io_info.isRead = 0;
1616 			else
1617 				io_info.isRead = 1;
1618 
1619 
1620 			/* Acquire SYNC MAP UPDATE lock */
1621 			mutex_enter(&instance->sync_map_mtx);
1622 
1623 			local_map_ptr =
1624 			    instance->ld_map[(instance->map_id & 1)];
1625 
1626 			if ((MR_TargetIdToLdGet(
1627 			    acmd->device_id, local_map_ptr) >=
1628 			    MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1629 				dev_err(instance->dip, CE_NOTE,
1630 				    "Fast Path NOT Possible, "
1631 				    "targetId >= MAX_LOGICAL_DRIVES || "
1632 				    "!instance->fast_path_io");
1633 				fp_possible = 0;
1634 				/* Set Regionlock flags to BYPASS */
1635 				/* io_request->RaidContext.regLockFlags  = 0; */
1636 				ddi_put8(acc_handle,
1637 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1638 			} else {
1639 				if (MR_BuildRaidContext(instance, &io_info,
1640 				    &scsi_raid_io->RaidContext, local_map_ptr))
1641 					fp_possible = io_info.fpOkForIo;
1642 			}
1643 
1644 			if (!enable_fp)
1645 				fp_possible = 0;
1646 
1647 			con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1648 			    "instance->fast_path_io %d fp_possible %d",
1649 			    enable_fp, instance->fast_path_io, fp_possible));
1650 
1651 		if (fp_possible) {
1652 
1653 			/* Check for DIF enabled LD */
1654 			if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1655 				/* Prepare 32 Byte CDB for DIF capable Disk */
1656 				mrsas_tbolt_prepare_cdb(instance,
1657 				    scsi_raid_io->CDB.CDB32,
1658 				    &io_info, scsi_raid_io, start_lba_lo);
1659 			} else {
1660 				mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1661 				    (uint8_t *)&pd_cmd_cdblen,
1662 				    io_info.pdBlock, io_info.numBlocks);
1663 				ddi_put16(acc_handle,
1664 				    &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1665 			}
1666 
1667 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1668 			    MPI2_FUNCTION_SCSI_IO_REQUEST);
1669 
1670 			ReqDescUnion->SCSIIO.RequestFlags =
1671 			    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1672 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1673 
1674 			if (instance->gen3) {
1675 				uint8_t regLockFlags = ddi_get8(acc_handle,
1676 				    &scsi_raid_io->RaidContext.regLockFlags);
1677 				uint16_t IoFlags = ddi_get16(acc_handle,
1678 				    &scsi_raid_io->IoFlags);
1679 
1680 				if (regLockFlags == REGION_TYPE_UNUSED)
1681 					ReqDescUnion->SCSIIO.RequestFlags =
1682 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1683 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1684 
1685 				IoFlags |=
1686 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1687 				regLockFlags |=
1688 				    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1689 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1690 
1691 				ddi_put8(acc_handle,
1692 				    &scsi_raid_io->ChainOffset, 0);
1693 				ddi_put8(acc_handle,
1694 				    &scsi_raid_io->RaidContext.nsegType,
1695 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1696 				    MPI2_TYPE_CUDA));
1697 				ddi_put8(acc_handle,
1698 				    &scsi_raid_io->RaidContext.regLockFlags,
1699 				    regLockFlags);
1700 				ddi_put16(acc_handle,
1701 				    &scsi_raid_io->IoFlags, IoFlags);
1702 			}
1703 
1704 			if ((instance->load_balance_info[
1705 			    acmd->device_id].loadBalanceFlag) &&
1706 			    (io_info.isRead)) {
1707 				io_info.devHandle =
1708 				    get_updated_dev_handle(&instance->
1709 				    load_balance_info[acmd->device_id],
1710 				    &io_info);
1711 				cmd->load_balance_flag |=
1712 				    MEGASAS_LOAD_BALANCE_FLAG;
1713 			} else {
1714 				cmd->load_balance_flag &=
1715 				    ~MEGASAS_LOAD_BALANCE_FLAG;
1716 			}
1717 
1718 			ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1719 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1720 			    io_info.devHandle);
1721 
1722 		} else { /* FP Not Possible */
1723 
1724 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1725 			    MPI2_FUNCTION_LD_IO_REQUEST);
1726 
1727 			ddi_put16(acc_handle,
1728 			    &scsi_raid_io->DevHandle, acmd->device_id);
1729 
1730 			ReqDescUnion->SCSIIO.RequestFlags =
1731 			    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1732 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1733 
1734 			ddi_put16(acc_handle,
1735 			    &scsi_raid_io->RaidContext.timeoutValue,
1736 			    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1737 
1738 			if (instance->gen3) {
1739 				uint8_t regLockFlags = ddi_get8(acc_handle,
1740 				    &scsi_raid_io->RaidContext.regLockFlags);
1741 
1742 				if (regLockFlags == REGION_TYPE_UNUSED) {
1743 					ReqDescUnion->SCSIIO.RequestFlags =
1744 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1745 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1746 				}
1747 
1748 				regLockFlags |=
1749 				    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1750 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1751 
1752 				ddi_put8(acc_handle,
1753 				    &scsi_raid_io->RaidContext.nsegType,
1754 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1755 				    MPI2_TYPE_CUDA));
1756 				ddi_put8(acc_handle,
1757 				    &scsi_raid_io->RaidContext.regLockFlags,
1758 				    regLockFlags);
1759 			}
1760 		} /* Not FP */
1761 
1762 		/* Release SYNC MAP UPDATE lock */
1763 		mutex_exit(&instance->sync_map_mtx);
1764 
1765 		break;
1766 
1767 		case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1768 			return_raid_msg_pkt(instance, cmd);
1769 			*cmd_done = 1;
1770 			return (NULL);
1771 		}
1772 
1773 		case SCMD_MODE_SENSE:
1774 		case SCMD_MODE_SENSE_G1: {
1775 			union scsi_cdb	*cdbp;
1776 			uint16_t	page_code;
1777 
1778 			cdbp = (void *)pkt->pkt_cdbp;
1779 			page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1780 			switch (page_code) {
1781 			case 0x3:
1782 			case 0x4:
1783 				(void) mrsas_mode_sense_build(pkt);
1784 				return_raid_msg_pkt(instance, cmd);
1785 				*cmd_done = 1;
1786 				return (NULL);
1787 			}
1788 			return (cmd);
1789 		}
1790 
1791 		default:
1792 			/* Pass-through command to logical drive */
1793 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1794 			    MPI2_FUNCTION_LD_IO_REQUEST);
1795 			ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1796 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1797 			    acmd->device_id);
1798 			ReqDescUnion->SCSIIO.RequestFlags =
1799 			    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1800 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1801 			break;
1802 		}
1803 	} else { /* Physical */
1804 #ifdef PDSUPPORT
1805 		/* Pass-through command to physical drive */
1806 
1807 		/* Acquire SYNC MAP UPDATE lock */
1808 		mutex_enter(&instance->sync_map_mtx);
1809 
1810 		local_map_ptr = instance->ld_map[instance->map_id & 1];
1811 
1812 		ddi_put8(acc_handle, &scsi_raid_io->Function,
1813 		    MPI2_FUNCTION_SCSI_IO_REQUEST);
1814 
1815 		ReqDescUnion->SCSIIO.RequestFlags =
1816 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1817 		    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1818 
1819 		ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1820 		    local_map_ptr->raidMap.
1821 		    devHndlInfo[acmd->device_id].curDevHdl);
1822 
1823 		/* Set regLockFlasgs to REGION_TYPE_BYPASS */
1824 		ddi_put8(acc_handle,
1825 		    &scsi_raid_io->RaidContext.regLockFlags, 0);
1826 		ddi_put64(acc_handle,
1827 		    &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1828 		ddi_put32(acc_handle,
1829 		    &scsi_raid_io->RaidContext.regLockLength, 0);
1830 		ddi_put8(acc_handle,
1831 		    &scsi_raid_io->RaidContext.RAIDFlags,
1832 		    MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1833 		    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1834 		ddi_put16(acc_handle,
1835 		    &scsi_raid_io->RaidContext.timeoutValue,
1836 		    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1837 		ddi_put16(acc_handle,
1838 		    &scsi_raid_io->RaidContext.ldTargetId,
1839 		    acmd->device_id);
1840 		ddi_put8(acc_handle,
1841 		    &scsi_raid_io->LUN[1], acmd->lun);
1842 
1843 		if (instance->fast_path_io && instance->gen3) {
1844 			uint16_t IoFlags = ddi_get16(acc_handle,
1845 			    &scsi_raid_io->IoFlags);
1846 			IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1847 			ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1848 		}
1849 		ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1850 		    local_map_ptr->raidMap.
1851 		    devHndlInfo[acmd->device_id].curDevHdl);
1852 
1853 		/* Release SYNC MAP UPDATE lock */
1854 		mutex_exit(&instance->sync_map_mtx);
1855 #else
1856 		/* If no PD support, return here. */
1857 		return (cmd);
1858 #endif
1859 	}
1860 
1861 	/* Set sense buffer physical address/length in scsi_io_request. */
1862 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1863 	    cmd->sense_phys_addr1);
1864 	ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1865 
1866 	/* Construct SGL */
1867 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1868 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1869 
1870 	(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1871 	    scsi_raid_io, &datalen);
1872 
1873 	ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1874 
1875 	con_log(CL_ANN, (CE_CONT,
1876 	    "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1877 	    pkt->pkt_cdbp[0], acmd->device_id));
1878 	con_log(CL_DLEVEL1, (CE_CONT,
1879 	    "data length = %x\n",
1880 	    scsi_raid_io->DataLength));
1881 	con_log(CL_DLEVEL1, (CE_CONT,
1882 	    "cdb length = %x\n",
1883 	    acmd->cmd_cdblen));
1884 
1885 	return (cmd);
1886 }
1887 
1888 uint32_t
1889 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1890 {
1891 	return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1892 }
1893 
1894 void
1895 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1896 {
1897 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1898 	atomic_inc_16(&instance->fw_outstanding);
1899 
1900 	struct scsi_pkt *pkt;
1901 
1902 	con_log(CL_ANN1,
1903 	    (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1904 
1905 	con_log(CL_DLEVEL1, (CE_CONT,
1906 	    " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1907 	con_log(CL_DLEVEL1, (CE_CONT,
1908 	    " [req desc low part] %x \n",
1909 	    (uint_t)(req_desc->Words & 0xffffffffff)));
1910 	con_log(CL_DLEVEL1, (CE_CONT,
1911 	    " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1912 	pkt = cmd->pkt;
1913 
1914 	if (pkt) {
1915 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1916 		    "ISSUED CMD TO FW : called : cmd:"
1917 		    ": %p instance : %p pkt : %p pkt_time : %x\n",
1918 		    gethrtime(), (void *)cmd, (void *)instance,
1919 		    (void *)pkt, cmd->drv_pkt_time));
1920 		if (instance->adapterresetinprogress) {
1921 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1922 			con_log(CL_ANN, (CE_NOTE,
1923 			    "TBOLT Reset the scsi_pkt timer"));
1924 		} else {
1925 			push_pending_mfi_pkt(instance, cmd);
1926 		}
1927 
1928 	} else {
1929 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1930 		    "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1931 		    "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1932 	}
1933 
1934 	/* Issue the command to the FW */
1935 	mutex_enter(&instance->reg_write_mtx);
1936 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1937 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1938 	mutex_exit(&instance->reg_write_mtx);
1939 }
1940 
1941 /*
1942  * issue_cmd_in_sync_mode
1943  */
1944 int
1945 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1946     struct mrsas_cmd *cmd)
1947 {
1948 	int		i;
1949 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1950 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1951 
1952 	struct mrsas_header	*hdr;
1953 	hdr = (struct mrsas_header *)&cmd->frame->hdr;
1954 
1955 	con_log(CL_ANN,
1956 	    (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1957 	    cmd->SMID));
1958 
1959 
1960 	if (instance->adapterresetinprogress) {
1961 		cmd->drv_pkt_time = ddi_get16
1962 		    (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1963 		if (cmd->drv_pkt_time < debug_timeout_g)
1964 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1965 		con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1966 		    "RESET-IN-PROGRESS, issue cmd & return."));
1967 
1968 		mutex_enter(&instance->reg_write_mtx);
1969 		WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1970 		WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1971 		mutex_exit(&instance->reg_write_mtx);
1972 
1973 		return (DDI_SUCCESS);
1974 	} else {
1975 		con_log(CL_ANN1, (CE_NOTE,
1976 		    "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1977 		push_pending_mfi_pkt(instance, cmd);
1978 	}
1979 
1980 	con_log(CL_DLEVEL2, (CE_NOTE,
1981 	    "HighQport offset :%p",
1982 	    (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1983 	con_log(CL_DLEVEL2, (CE_NOTE,
1984 	    "LowQport offset :%p",
1985 	    (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1986 
1987 	cmd->sync_cmd = MRSAS_TRUE;
1988 	cmd->cmd_status =  ENODATA;
1989 
1990 
1991 	mutex_enter(&instance->reg_write_mtx);
1992 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1993 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1994 	mutex_exit(&instance->reg_write_mtx);
1995 
1996 	con_log(CL_ANN1, (CE_NOTE,
1997 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1998 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1999 	    (uint_t)(req_desc->Words & 0xffffffff)));
2000 
2001 	mutex_enter(&instance->int_cmd_mtx);
2002 	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2003 		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2004 	}
2005 	mutex_exit(&instance->int_cmd_mtx);
2006 
2007 
2008 	if (i < (msecs -1)) {
2009 		return (DDI_SUCCESS);
2010 	} else {
2011 		return (DDI_FAILURE);
2012 	}
2013 }
2014 
2015 /*
2016  * issue_cmd_in_poll_mode
2017  */
2018 int
2019 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2020     struct mrsas_cmd *cmd)
2021 {
2022 	int		i;
2023 	uint16_t	flags;
2024 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2025 	struct mrsas_header *frame_hdr;
2026 
2027 	con_log(CL_ANN,
2028 	    (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2029 	    cmd->SMID));
2030 
2031 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2032 
2033 	frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2034 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2035 	    MFI_CMD_STATUS_POLL_MODE);
2036 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2037 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2038 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2039 
2040 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2041 	    (uint_t)(req_desc->Words & 0xffffffff)));
2042 	con_log(CL_ANN1, (CE_NOTE,
2043 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2044 
2045 	/* issue the frame using inbound queue port */
2046 	mutex_enter(&instance->reg_write_mtx);
2047 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2048 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2049 	mutex_exit(&instance->reg_write_mtx);
2050 
2051 	for (i = 0; i < msecs && (
2052 	    ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2053 	    == MFI_CMD_STATUS_POLL_MODE); i++) {
2054 		/* wait for cmd_status to change from 0xFF */
2055 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2056 	}
2057 
2058 	DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2059 
2060 	if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2061 	    &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2062 		con_log(CL_ANN1, (CE_NOTE,
2063 		    " cmd failed %" PRIx64, (req_desc->Words)));
2064 		return (DDI_FAILURE);
2065 	}
2066 
2067 	return (DDI_SUCCESS);
2068 }
2069 
2070 void
2071 tbolt_enable_intr(struct mrsas_instance *instance)
2072 {
2073 	/* TODO: For Thunderbolt/Invader also clear intr on enable */
2074 	/* writel(~0, &regs->outbound_intr_status); */
2075 	/* readl(&regs->outbound_intr_status); */
2076 
2077 	WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2078 
2079 	/* dummy read to force PCI flush */
2080 	(void) RD_OB_INTR_MASK(instance);
2081 
2082 }
2083 
2084 void
2085 tbolt_disable_intr(struct mrsas_instance *instance)
2086 {
2087 	uint32_t mask = 0xFFFFFFFF;
2088 
2089 	WR_OB_INTR_MASK(mask, instance);
2090 
2091 	/* Dummy readl to force pci flush */
2092 
2093 	(void) RD_OB_INTR_MASK(instance);
2094 }
2095 
2096 
2097 int
2098 tbolt_intr_ack(struct mrsas_instance *instance)
2099 {
2100 	uint32_t	status;
2101 
2102 	/* check if it is our interrupt */
2103 	status = RD_OB_INTR_STATUS(instance);
2104 	con_log(CL_ANN1, (CE_NOTE,
2105 	    "chkpnt: Entered tbolt_intr_ack status = %d", status));
2106 
2107 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2108 		return (DDI_INTR_UNCLAIMED);
2109 	}
2110 
2111 	if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2112 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2113 		return (DDI_INTR_UNCLAIMED);
2114 	}
2115 
2116 	if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2117 		/* clear the interrupt by writing back the same value */
2118 		WR_OB_INTR_STATUS(status, instance);
2119 		/* dummy READ */
2120 		(void) RD_OB_INTR_STATUS(instance);
2121 	}
2122 	return (DDI_INTR_CLAIMED);
2123 }
2124 
2125 /*
2126  * get_raid_msg_pkt : Get a command from the free pool
2127  * After successful allocation, the caller of this routine
2128  * must clear the frame buffer (memset to zero) before
2129  * using the packet further.
2130  *
2131  * ***** Note *****
2132  * After clearing the frame buffer the context id of the
2133  * frame buffer SHOULD be restored back.
2134  */
2135 
2136 struct mrsas_cmd *
2137 get_raid_msg_pkt(struct mrsas_instance *instance)
2138 {
2139 	mlist_t			*head = &instance->cmd_pool_list;
2140 	struct mrsas_cmd	*cmd = NULL;
2141 
2142 	mutex_enter(&instance->cmd_pool_mtx);
2143 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2144 
2145 
2146 	if (!mlist_empty(head)) {
2147 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2148 		mlist_del_init(head->next);
2149 	}
2150 	if (cmd != NULL) {
2151 		cmd->pkt = NULL;
2152 		cmd->retry_count_for_ocr = 0;
2153 		cmd->drv_pkt_time = 0;
2154 	}
2155 	mutex_exit(&instance->cmd_pool_mtx);
2156 
2157 	if (cmd != NULL)
2158 		bzero(cmd->scsi_io_request,
2159 		    sizeof (Mpi2RaidSCSIIORequest_t));
2160 	return (cmd);
2161 }
2162 
2163 struct mrsas_cmd *
2164 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2165 {
2166 	mlist_t			*head = &instance->cmd_app_pool_list;
2167 	struct mrsas_cmd	*cmd = NULL;
2168 
2169 	mutex_enter(&instance->cmd_app_pool_mtx);
2170 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2171 
2172 	if (!mlist_empty(head)) {
2173 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2174 		mlist_del_init(head->next);
2175 	}
2176 	if (cmd != NULL) {
2177 		cmd->retry_count_for_ocr = 0;
2178 		cmd->drv_pkt_time = 0;
2179 		cmd->pkt = NULL;
2180 		cmd->request_desc = NULL;
2181 
2182 	}
2183 
2184 	mutex_exit(&instance->cmd_app_pool_mtx);
2185 
2186 	if (cmd != NULL) {
2187 		bzero(cmd->scsi_io_request,
2188 		    sizeof (Mpi2RaidSCSIIORequest_t));
2189 	}
2190 
2191 	return (cmd);
2192 }
2193 
2194 /*
2195  * return_raid_msg_pkt : Return a cmd to free command pool
2196  */
2197 void
2198 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2199 {
2200 	mutex_enter(&instance->cmd_pool_mtx);
2201 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2202 
2203 
2204 	mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2205 
2206 	mutex_exit(&instance->cmd_pool_mtx);
2207 }
2208 
2209 void
2210 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2211 {
2212 	mutex_enter(&instance->cmd_app_pool_mtx);
2213 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2214 
2215 	mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2216 
2217 	mutex_exit(&instance->cmd_app_pool_mtx);
2218 }
2219 
2220 
2221 void
2222 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2223     struct mrsas_cmd *cmd)
2224 {
2225 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2226 	Mpi25IeeeSgeChain64_t		*scsi_raid_io_sgl_ieee;
2227 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
2228 	uint32_t			index;
2229 	ddi_acc_handle_t acc_handle =
2230 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2231 
2232 	if (!instance->tbolt) {
2233 		con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2234 		return;
2235 	}
2236 
2237 	index = cmd->index;
2238 
2239 	ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2240 
2241 	if (!ReqDescUnion) {
2242 		con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2243 		return;
2244 	}
2245 
2246 	con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2247 
2248 	ReqDescUnion->Words = 0;
2249 
2250 	ReqDescUnion->SCSIIO.RequestFlags =
2251 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2252 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2253 
2254 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2255 
2256 	cmd->request_desc = ReqDescUnion;
2257 
2258 	/* get raid message frame pointer */
2259 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2260 
2261 	if (instance->gen3) {
2262 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2263 		    &scsi_raid_io->SGL.IeeeChain;
2264 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2265 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2266 	}
2267 
2268 	ddi_put8(acc_handle, &scsi_raid_io->Function,
2269 	    MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2270 
2271 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2272 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2273 
2274 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2275 	    (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2276 
2277 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2278 	    cmd->sense_phys_addr1);
2279 
2280 
2281 	scsi_raid_io_sgl_ieee =
2282 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2283 
2284 	ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2285 	    (U64)cmd->frame_phys_addr);
2286 
2287 	ddi_put8(acc_handle,
2288 	    &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2289 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2290 	/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2291 	ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2292 
2293 	con_log(CL_ANN1, (CE_NOTE,
2294 	    "[MFI CMD PHY ADDRESS]:%" PRIx64,
2295 	    scsi_raid_io_sgl_ieee->Address));
2296 	con_log(CL_ANN1, (CE_NOTE,
2297 	    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2298 	con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2299 	    scsi_raid_io_sgl_ieee->Flags));
2300 }
2301 
2302 
2303 void
2304 tbolt_complete_cmd(struct mrsas_instance *instance,
2305     struct mrsas_cmd *cmd)
2306 {
2307 	uint8_t				status;
2308 	uint8_t				extStatus;
2309 	uint8_t				function;
2310 	uint8_t				arm;
2311 	struct scsa_cmd			*acmd;
2312 	struct scsi_pkt			*pkt;
2313 	struct scsi_arq_status		*arqstat;
2314 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2315 	LD_LOAD_BALANCE_INFO		*lbinfo;
2316 	ddi_acc_handle_t acc_handle =
2317 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2318 
2319 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2320 
2321 	status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2322 	extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2323 
2324 	con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2325 	con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2326 
2327 	if (status != MFI_STAT_OK) {
2328 		con_log(CL_ANN, (CE_WARN,
2329 		    "IO Cmd Failed SMID %x", cmd->SMID));
2330 	} else {
2331 		con_log(CL_ANN, (CE_NOTE,
2332 		    "IO Cmd Success  SMID %x", cmd->SMID));
2333 	}
2334 
2335 	/* regular commands */
2336 
2337 	function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2338 	DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2339 	    uint8_t, status, uint8_t, extStatus);
2340 
2341 	switch (function) {
2342 
2343 	case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2344 		acmd =	(struct scsa_cmd *)cmd->cmd;
2345 		lbinfo = &instance->load_balance_info[acmd->device_id];
2346 
2347 		if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2348 			arm = lbinfo->raid1DevHandle[0] ==
2349 			    scsi_raid_io->DevHandle ? 0 : 1;
2350 
2351 			lbinfo->scsi_pending_cmds[arm]--;
2352 			cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2353 		}
2354 		con_log(CL_DLEVEL3, (CE_NOTE,
2355 		    "FastPath IO Completion Success "));
2356 		/* FALLTHRU */
2357 
2358 	case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2359 		acmd =	(struct scsa_cmd *)cmd->cmd;
2360 		pkt =	(struct scsi_pkt *)CMD2PKT(acmd);
2361 
2362 		if (acmd->cmd_flags & CFLAG_DMAVALID) {
2363 			if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2364 				(void) ddi_dma_sync(acmd->cmd_dmahandle,
2365 				    acmd->cmd_dma_offset, acmd->cmd_dma_len,
2366 				    DDI_DMA_SYNC_FORCPU);
2367 			}
2368 		}
2369 
2370 		pkt->pkt_reason		= CMD_CMPLT;
2371 		pkt->pkt_statistics	= 0;
2372 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2373 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2374 
2375 		con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2376 		    "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2377 		    ((acmd->islogical) ? "LD" : "PD"),
2378 		    acmd->cmd_dmacount, cmd->SMID, status));
2379 
2380 		if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2381 			struct scsi_inquiry	*inq;
2382 
2383 			if (acmd->cmd_dmacount != 0) {
2384 				bp_mapin(acmd->cmd_buf);
2385 				inq = (struct scsi_inquiry *)
2386 				    acmd->cmd_buf->b_un.b_addr;
2387 
2388 				/* don't expose physical drives to OS */
2389 				if (acmd->islogical &&
2390 				    (status == MFI_STAT_OK)) {
2391 					display_scsi_inquiry((caddr_t)inq);
2392 #ifdef PDSUPPORT
2393 				} else if ((status == MFI_STAT_OK) &&
2394 				    inq->inq_dtype == DTYPE_DIRECT) {
2395 					display_scsi_inquiry((caddr_t)inq);
2396 #endif
2397 				} else {
2398 					/* for physical disk */
2399 					status = MFI_STAT_DEVICE_NOT_FOUND;
2400 				}
2401 			}
2402 		}
2403 
2404 		switch (status) {
2405 		case MFI_STAT_OK:
2406 			pkt->pkt_scbp[0] = STATUS_GOOD;
2407 			break;
2408 		case MFI_STAT_LD_CC_IN_PROGRESS:
2409 		case MFI_STAT_LD_RECON_IN_PROGRESS:
2410 			pkt->pkt_scbp[0] = STATUS_GOOD;
2411 			break;
2412 		case MFI_STAT_LD_INIT_IN_PROGRESS:
2413 			pkt->pkt_reason	= CMD_TRAN_ERR;
2414 			break;
2415 		case MFI_STAT_SCSI_IO_FAILED:
2416 			dev_err(instance->dip, CE_WARN,
2417 			    "tbolt_complete_cmd: scsi_io failed");
2418 			pkt->pkt_reason	= CMD_TRAN_ERR;
2419 			break;
2420 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
2421 			con_log(CL_ANN, (CE_WARN,
2422 			    "tbolt_complete_cmd: scsi_done with error"));
2423 
2424 			pkt->pkt_reason	= CMD_CMPLT;
2425 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2426 
2427 			if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2428 				con_log(CL_ANN,
2429 				    (CE_WARN, "TEST_UNIT_READY fail"));
2430 			} else {
2431 				pkt->pkt_state |= STATE_ARQ_DONE;
2432 				arqstat = (void *)(pkt->pkt_scbp);
2433 				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2434 				arqstat->sts_rqpkt_resid = 0;
2435 				arqstat->sts_rqpkt_state |=
2436 				    STATE_GOT_BUS | STATE_GOT_TARGET
2437 				    | STATE_SENT_CMD
2438 				    | STATE_XFERRED_DATA;
2439 				*(uint8_t *)&arqstat->sts_rqpkt_status =
2440 				    STATUS_GOOD;
2441 				con_log(CL_ANN1,
2442 				    (CE_NOTE, "Copying Sense data %x",
2443 				    cmd->SMID));
2444 
2445 				ddi_rep_get8(acc_handle,
2446 				    (uint8_t *)&(arqstat->sts_sensedata),
2447 				    cmd->sense1,
2448 				    sizeof (struct scsi_extended_sense),
2449 				    DDI_DEV_AUTOINCR);
2450 
2451 			}
2452 			break;
2453 		case MFI_STAT_LD_OFFLINE:
2454 			dev_err(instance->dip, CE_WARN,
2455 			    "tbolt_complete_cmd: ld offline "
2456 			    "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2457 			    /* UNDO: */
2458 			    ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2459 
2460 			    ddi_get16(acc_handle,
2461 			    &scsi_raid_io->RaidContext.ldTargetId),
2462 
2463 			    ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2464 
2465 			pkt->pkt_reason	= CMD_DEV_GONE;
2466 			pkt->pkt_statistics  = STAT_DISCON;
2467 			break;
2468 		case MFI_STAT_DEVICE_NOT_FOUND:
2469 			con_log(CL_ANN, (CE_CONT,
2470 			    "tbolt_complete_cmd: device not found error"));
2471 			pkt->pkt_reason	= CMD_DEV_GONE;
2472 			pkt->pkt_statistics  = STAT_DISCON;
2473 			break;
2474 
2475 		case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2476 			pkt->pkt_state |= STATE_ARQ_DONE;
2477 			pkt->pkt_reason	= CMD_CMPLT;
2478 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2479 
2480 			arqstat = (void *)(pkt->pkt_scbp);
2481 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
2482 			arqstat->sts_rqpkt_resid = 0;
2483 			arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2484 			    | STATE_GOT_TARGET | STATE_SENT_CMD
2485 			    | STATE_XFERRED_DATA;
2486 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2487 
2488 			arqstat->sts_sensedata.es_valid = 1;
2489 			arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2490 			arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2491 
2492 			/*
2493 			 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2494 			 * ASC: 0x21h; ASCQ: 0x00h;
2495 			 */
2496 			arqstat->sts_sensedata.es_add_code = 0x21;
2497 			arqstat->sts_sensedata.es_qual_code = 0x00;
2498 			break;
2499 		case MFI_STAT_INVALID_CMD:
2500 		case MFI_STAT_INVALID_DCMD:
2501 		case MFI_STAT_INVALID_PARAMETER:
2502 		case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2503 		default:
2504 			dev_err(instance->dip, CE_WARN,
2505 			    "tbolt_complete_cmd: Unknown status!");
2506 			pkt->pkt_reason	= CMD_TRAN_ERR;
2507 
2508 			break;
2509 		}
2510 
2511 		atomic_add_16(&instance->fw_outstanding, (-1));
2512 
2513 		(void) mrsas_common_check(instance, cmd);
2514 		if (acmd->cmd_dmahandle) {
2515 			if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2516 			    DDI_SUCCESS) {
2517 				ddi_fm_service_impact(instance->dip,
2518 				    DDI_SERVICE_UNAFFECTED);
2519 				pkt->pkt_reason = CMD_TRAN_ERR;
2520 				pkt->pkt_statistics = 0;
2521 			}
2522 		}
2523 
2524 		/* Call the callback routine */
2525 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2526 			(*pkt->pkt_comp)(pkt);
2527 
2528 		con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2529 
2530 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2531 
2532 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2533 
2534 		return_raid_msg_pkt(instance, cmd);
2535 		break;
2536 	}
2537 	case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	 /* MFA command. */
2538 
2539 		if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2540 		    cmd->frame->dcmd.mbox.b[1] == 1) {
2541 
2542 			mutex_enter(&instance->sync_map_mtx);
2543 
2544 			con_log(CL_ANN, (CE_NOTE,
2545 			    "LDMAP sync command	SMID RECEIVED 0x%X",
2546 			    cmd->SMID));
2547 			if (cmd->frame->hdr.cmd_status != 0) {
2548 				dev_err(instance->dip, CE_WARN,
2549 				    "map sync failed, status = 0x%x.",
2550 				    cmd->frame->hdr.cmd_status);
2551 			} else {
2552 				instance->map_id++;
2553 				con_log(CL_ANN1, (CE_NOTE,
2554 				    "map sync received, switched map_id to %"
2555 				    PRIu64, instance->map_id));
2556 			}
2557 
2558 			if (MR_ValidateMapInfo(
2559 			    instance->ld_map[instance->map_id & 1],
2560 			    instance->load_balance_info)) {
2561 				instance->fast_path_io = 1;
2562 			} else {
2563 				instance->fast_path_io = 0;
2564 			}
2565 
2566 			con_log(CL_ANN, (CE_NOTE,
2567 			    "instance->fast_path_io %d",
2568 			    instance->fast_path_io));
2569 
2570 			instance->unroll.syncCmd = 0;
2571 
2572 			if (instance->map_update_cmd == cmd) {
2573 				return_raid_msg_pkt(instance, cmd);
2574 				atomic_add_16(&instance->fw_outstanding, (-1));
2575 				(void) mrsas_tbolt_sync_map_info(instance);
2576 			}
2577 
2578 			con_log(CL_ANN1, (CE_NOTE,
2579 			    "LDMAP sync completed, ldcount=%d",
2580 			    instance->ld_map[instance->map_id & 1]
2581 			    ->raidMap.ldCount));
2582 			mutex_exit(&instance->sync_map_mtx);
2583 			break;
2584 		}
2585 
2586 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2587 			con_log(CL_ANN1, (CE_CONT,
2588 			    "AEN command SMID RECEIVED 0x%X",
2589 			    cmd->SMID));
2590 			if ((instance->aen_cmd == cmd) &&
2591 			    (instance->aen_cmd->abort_aen)) {
2592 				con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2593 				    "aborted_aen returned"));
2594 			} else {
2595 				atomic_add_16(&instance->fw_outstanding, (-1));
2596 				service_mfi_aen(instance, cmd);
2597 			}
2598 		}
2599 
2600 		if (cmd->sync_cmd == MRSAS_TRUE) {
2601 			con_log(CL_ANN1, (CE_CONT,
2602 			    "Sync-mode Command Response SMID RECEIVED 0x%X",
2603 			    cmd->SMID));
2604 
2605 			tbolt_complete_cmd_in_sync_mode(instance, cmd);
2606 		} else {
2607 			con_log(CL_ANN, (CE_CONT,
2608 			    "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2609 			    cmd->SMID));
2610 		}
2611 		break;
2612 	default:
2613 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2614 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2615 
2616 		/* free message */
2617 		con_log(CL_ANN,
2618 		    (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2619 		break;
2620 	}
2621 }
2622 
2623 uint_t
2624 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2625 {
2626 	uint8_t				replyType;
2627 	Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2628 	Mpi2ReplyDescriptorsUnion_t	*desc;
2629 	uint16_t			smid;
2630 	union desc_value		d_val;
2631 	struct mrsas_cmd		*cmd;
2632 
2633 	struct mrsas_header	*hdr;
2634 	struct scsi_pkt		*pkt;
2635 
2636 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2637 	    0, 0, DDI_DMA_SYNC_FORDEV);
2638 
2639 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2640 	    0, 0, DDI_DMA_SYNC_FORCPU);
2641 
2642 	desc = instance->reply_frame_pool;
2643 	desc += instance->reply_read_index;
2644 
2645 	replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2646 	replyType = replyDesc->ReplyFlags &
2647 	    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2648 
2649 	if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2650 		return (DDI_INTR_UNCLAIMED);
2651 
2652 	if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2653 	    != DDI_SUCCESS) {
2654 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2655 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2656 		con_log(CL_ANN1,
2657 		    (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2658 		    "FMA check, returning DDI_INTR_UNCLAIMED"));
2659 		return (DDI_INTR_CLAIMED);
2660 	}
2661 
2662 	con_log(CL_ANN1, (CE_NOTE, "Reply Desc	= %p  Words = %" PRIx64,
2663 	    (void *)desc, desc->Words));
2664 
2665 	d_val.word = desc->Words;
2666 
2667 
2668 	/* Read Reply descriptor */
2669 	while ((d_val.u1.low != 0xffffffff) &&
2670 	    (d_val.u1.high != 0xffffffff)) {
2671 
2672 		(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2673 		    0, 0, DDI_DMA_SYNC_FORCPU);
2674 
2675 		smid = replyDesc->SMID;
2676 
2677 		if (!smid || smid > instance->max_fw_cmds + 1) {
2678 			con_log(CL_ANN1, (CE_NOTE,
2679 			    "Reply Desc at Break  = %p	Words = %" PRIx64,
2680 			    (void *)desc, desc->Words));
2681 			break;
2682 		}
2683 
2684 		cmd	= instance->cmd_list[smid - 1];
2685 		if (!cmd) {
2686 			con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2687 			    "outstanding_cmd: Invalid command "
2688 			    " or Poll commad Received in completion path"));
2689 		} else {
2690 			mutex_enter(&instance->cmd_pend_mtx);
2691 			if (cmd->sync_cmd == MRSAS_TRUE) {
2692 				hdr = (struct mrsas_header *)&cmd->frame->hdr;
2693 				if (hdr) {
2694 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2695 					    "tbolt_process_outstanding_cmd:"
2696 					    " mlist_del_init(&cmd->list)."));
2697 					mlist_del_init(&cmd->list);
2698 				}
2699 			} else {
2700 				pkt = cmd->pkt;
2701 				if (pkt) {
2702 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2703 					    "tbolt_process_outstanding_cmd:"
2704 					    "mlist_del_init(&cmd->list)."));
2705 					mlist_del_init(&cmd->list);
2706 				}
2707 			}
2708 
2709 			mutex_exit(&instance->cmd_pend_mtx);
2710 
2711 			tbolt_complete_cmd(instance, cmd);
2712 		}
2713 		/* set it back to all 1s. */
2714 		desc->Words = -1LL;
2715 
2716 		instance->reply_read_index++;
2717 
2718 		if (instance->reply_read_index >= (instance->reply_q_depth)) {
2719 			con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2720 			instance->reply_read_index = 0;
2721 		}
2722 
2723 		/* Get the next reply descriptor */
2724 		if (!instance->reply_read_index)
2725 			desc = instance->reply_frame_pool;
2726 		else
2727 			desc++;
2728 
2729 		replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2730 
2731 		d_val.word = desc->Words;
2732 
2733 		con_log(CL_ANN1, (CE_NOTE,
2734 		    "Next Reply Desc  = %p Words = %" PRIx64,
2735 		    (void *)desc, desc->Words));
2736 
2737 		replyType = replyDesc->ReplyFlags &
2738 		    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2739 
2740 		if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2741 			break;
2742 
2743 	} /* End of while loop. */
2744 
2745 	/* update replyIndex to FW */
2746 	WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2747 
2748 
2749 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2750 	    0, 0, DDI_DMA_SYNC_FORDEV);
2751 
2752 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2753 	    0, 0, DDI_DMA_SYNC_FORCPU);
2754 	return (DDI_INTR_CLAIMED);
2755 }
2756 
2757 
2758 
2759 
2760 /*
2761  * complete_cmd_in_sync_mode -	Completes an internal command
2762  * @instance:			Adapter soft state
2763  * @cmd:			Command to be completed
2764  *
2765  * The issue_cmd_in_sync_mode() function waits for a command to complete
2766  * after it issues a command. This function wakes up that waiting routine by
2767  * calling wake_up() on the wait queue.
2768  */
2769 void
2770 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2771     struct mrsas_cmd *cmd)
2772 {
2773 
2774 	cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2775 	    &cmd->frame->io.cmd_status);
2776 
2777 	cmd->sync_cmd = MRSAS_FALSE;
2778 
2779 	mutex_enter(&instance->int_cmd_mtx);
2780 	if (cmd->cmd_status == ENODATA) {
2781 		cmd->cmd_status = 0;
2782 	}
2783 	cv_broadcast(&instance->int_cmd_cv);
2784 	mutex_exit(&instance->int_cmd_mtx);
2785 
2786 }
2787 
2788 /*
2789  * mrsas_tbolt_get_ld_map_info -	Returns	 ld_map structure
2790  * instance:				Adapter soft state
2791  *
2792  * Issues an internal command (DCMD) to get the FW's controller PD
2793  * list structure.  This information is mainly used to find out SYSTEM
2794  * supported by the FW.
2795  */
2796 int
2797 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2798 {
2799 	int ret = 0;
2800 	struct mrsas_cmd	*cmd = NULL;
2801 	struct mrsas_dcmd_frame	*dcmd;
2802 	MR_FW_RAID_MAP_ALL *ci;
2803 	uint32_t ci_h = 0;
2804 	U32 size_map_info;
2805 
2806 	cmd = get_raid_msg_pkt(instance);
2807 
2808 	if (cmd == NULL) {
2809 		dev_err(instance->dip, CE_WARN,
2810 		    "Failed to get a cmd from free-pool in get_ld_map_info()");
2811 		return (DDI_FAILURE);
2812 	}
2813 
2814 	dcmd = &cmd->frame->dcmd;
2815 
2816 	size_map_info =	sizeof (MR_FW_RAID_MAP) +
2817 	    (sizeof (MR_LD_SPAN_MAP) *
2818 	    (MAX_LOGICAL_DRIVES - 1));
2819 
2820 	con_log(CL_ANN, (CE_NOTE,
2821 	    "size_map_info : 0x%x", size_map_info));
2822 
2823 	ci = instance->ld_map[instance->map_id & 1];
2824 	ci_h = instance->ld_map_phy[instance->map_id & 1];
2825 
2826 	if (!ci) {
2827 		dev_err(instance->dip, CE_WARN,
2828 		    "Failed to alloc mem for ld_map_info");
2829 		return_raid_msg_pkt(instance, cmd);
2830 		return (-1);
2831 	}
2832 
2833 	bzero(ci, sizeof (*ci));
2834 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2835 
2836 	dcmd->cmd = MFI_CMD_OP_DCMD;
2837 	dcmd->cmd_status = 0xFF;
2838 	dcmd->sge_count = 1;
2839 	dcmd->flags = MFI_FRAME_DIR_READ;
2840 	dcmd->timeout = 0;
2841 	dcmd->pad_0 = 0;
2842 	dcmd->data_xfer_len = size_map_info;
2843 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2844 	dcmd->sgl.sge32[0].phys_addr = ci_h;
2845 	dcmd->sgl.sge32[0].length = size_map_info;
2846 
2847 
2848 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2849 
2850 	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2851 		ret = 0;
2852 		con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2853 	} else {
2854 		dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2855 		ret = -1;
2856 	}
2857 
2858 	return_raid_msg_pkt(instance, cmd);
2859 
2860 	return (ret);
2861 }
2862 
2863 void
2864 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2865 {
2866 	uint32_t i;
2867 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2868 	union desc_value d_val;
2869 
2870 	reply_desc = instance->reply_frame_pool;
2871 
2872 	for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2873 		d_val.word = reply_desc->Words;
2874 		con_log(CL_DLEVEL3, (CE_NOTE,
2875 		    "i=%d, %x:%x",
2876 		    i, d_val.u1.high, d_val.u1.low));
2877 	}
2878 }
2879 
2880 /*
2881  * mrsas_tbolt_command_create -	Create command for fast path.
2882  * @io_info:	MegaRAID IO request packet pointer.
2883  * @ref_tag:	Reference tag for RD/WRPROTECT
2884  *
2885  * Create the command for fast path.
2886  */
2887 void
2888 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2889     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2890     U32 ref_tag)
2891 {
2892 	uint16_t		EEDPFlags;
2893 	uint32_t		Control;
2894 	ddi_acc_handle_t acc_handle =
2895 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2896 
2897 	/* Prepare 32-byte CDB if DIF is supported on this device */
2898 	con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2899 
2900 	bzero(cdb, 32);
2901 
2902 	cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2903 
2904 
2905 	cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2906 
2907 	if (io_info->isRead)
2908 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2909 	else
2910 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2911 
2912 	/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2913 	cdb[10] = MRSAS_RD_WR_PROTECT;
2914 
2915 	/* LOGICAL BLOCK ADDRESS */
2916 	cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2917 	cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2918 	cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2919 	cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2920 	cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2921 	cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2922 	cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2923 	cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2924 
2925 	/* Logical block reference tag */
2926 	ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2927 	    BE_32(ref_tag));
2928 
2929 	ddi_put16(acc_handle,
2930 	    &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2931 
2932 	ddi_put32(acc_handle, &scsi_io_request->DataLength,
2933 	    ((io_info->numBlocks)*512));
2934 	/* Specify 32-byte cdb */
2935 	ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2936 
2937 	/* Transfer length */
2938 	cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2939 	cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2940 	cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2941 	cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2942 
2943 	/* set SCSI IO EEDPFlags */
2944 	EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2945 	Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2946 
2947 	/* set SCSI IO EEDPFlags bits */
2948 	if (io_info->isRead) {
2949 		/*
2950 		 * For READ commands, the EEDPFlags shall be set to specify to
2951 		 * Increment the Primary Reference Tag, to Check the Reference
2952 		 * Tag, and to Check and Remove the Protection Information
2953 		 * fields.
2954 		 */
2955 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2956 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG	|
2957 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP	|
2958 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG	|
2959 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2960 	} else {
2961 		/*
2962 		 * For WRITE commands, the EEDPFlags shall be set to specify to
2963 		 * Increment the Primary Reference Tag, and to Insert
2964 		 * Protection Information fields.
2965 		 */
2966 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2967 		    MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2968 	}
2969 	Control |= (0x4 << 26);
2970 
2971 	ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2972 	ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2973 	ddi_put32(acc_handle,
2974 	    &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2975 }
2976 
2977 
2978 /*
2979  * mrsas_tbolt_set_pd_lba -	Sets PD LBA
2980  * @cdb:		CDB
2981  * @cdb_len:		cdb length
2982  * @start_blk:		Start block of IO
2983  *
2984  * Used to set the PD LBA in CDB for FP IOs
2985  */
2986 static void
2987 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2988     U32 num_blocks)
2989 {
2990 	U8 cdb_len = *cdb_len_ptr;
2991 	U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2992 
2993 	/* Some drives don't support 16/12 byte CDB's, convert to 10 */
2994 	if (((cdb_len == 12) || (cdb_len == 16)) &&
2995 	    (start_blk <= 0xffffffff)) {
2996 		if (cdb_len == 16) {
2997 			con_log(CL_ANN,
2998 			    (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2999 			opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3000 			flagvals = cdb[1];
3001 			groupnum = cdb[14];
3002 			control = cdb[15];
3003 		} else {
3004 			con_log(CL_ANN,
3005 			    (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3006 			opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3007 			flagvals = cdb[1];
3008 			groupnum = cdb[10];
3009 			control = cdb[11];
3010 		}
3011 
3012 		bzero(cdb, sizeof (cdb));
3013 
3014 		cdb[0] = opcode;
3015 		cdb[1] = flagvals;
3016 		cdb[6] = groupnum;
3017 		cdb[9] = control;
3018 		/* Set transfer length */
3019 		cdb[8] = (U8)(num_blocks & 0xff);
3020 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3021 		cdb_len = 10;
3022 	} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3023 		/* Convert to 16 byte CDB for large LBA's */
3024 		con_log(CL_ANN,
3025 		    (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3026 		switch (cdb_len) {
3027 		case 6:
3028 			opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3029 			control = cdb[5];
3030 			break;
3031 		case 10:
3032 			opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3033 			flagvals = cdb[1];
3034 			groupnum = cdb[6];
3035 			control = cdb[9];
3036 			break;
3037 		case 12:
3038 			opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3039 			flagvals = cdb[1];
3040 			groupnum = cdb[10];
3041 			control = cdb[11];
3042 			break;
3043 		}
3044 
3045 		bzero(cdb, sizeof (cdb));
3046 
3047 		cdb[0] = opcode;
3048 		cdb[1] = flagvals;
3049 		cdb[14] = groupnum;
3050 		cdb[15] = control;
3051 
3052 		/* Transfer length */
3053 		cdb[13] = (U8)(num_blocks & 0xff);
3054 		cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3055 		cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3056 		cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3057 
3058 		/* Specify 16-byte cdb */
3059 		cdb_len = 16;
3060 	} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3061 		/* convert to 10 byte CDB */
3062 		opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3063 		control = cdb[5];
3064 
3065 		bzero(cdb, sizeof (cdb));
3066 		cdb[0] = opcode;
3067 		cdb[9] = control;
3068 
3069 		/* Set transfer length */
3070 		cdb[8] = (U8)(num_blocks & 0xff);
3071 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3072 
3073 		/* Specify 10-byte cdb */
3074 		cdb_len = 10;
3075 	}
3076 
3077 
3078 	/* Fall through Normal case, just load LBA here */
3079 	switch (cdb_len) {
3080 	case 6:
3081 	{
3082 		U8 val = cdb[1] & 0xE0;
3083 		cdb[3] = (U8)(start_blk & 0xff);
3084 		cdb[2] = (U8)((start_blk >> 8) & 0xff);
3085 		cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3086 		break;
3087 	}
3088 	case 10:
3089 		cdb[5] = (U8)(start_blk & 0xff);
3090 		cdb[4] = (U8)((start_blk >> 8) & 0xff);
3091 		cdb[3] = (U8)((start_blk >> 16) & 0xff);
3092 		cdb[2] = (U8)((start_blk >> 24) & 0xff);
3093 		break;
3094 	case 12:
3095 		cdb[5]	  = (U8)(start_blk & 0xff);
3096 		cdb[4]	  = (U8)((start_blk >> 8) & 0xff);
3097 		cdb[3]	  = (U8)((start_blk >> 16) & 0xff);
3098 		cdb[2]	  = (U8)((start_blk >> 24) & 0xff);
3099 		break;
3100 
3101 	case 16:
3102 		cdb[9]	= (U8)(start_blk & 0xff);
3103 		cdb[8]	= (U8)((start_blk >> 8) & 0xff);
3104 		cdb[7]	= (U8)((start_blk >> 16) & 0xff);
3105 		cdb[6]	= (U8)((start_blk >> 24) & 0xff);
3106 		cdb[5]	= (U8)((start_blk >> 32) & 0xff);
3107 		cdb[4]	= (U8)((start_blk >> 40) & 0xff);
3108 		cdb[3]	= (U8)((start_blk >> 48) & 0xff);
3109 		cdb[2]	= (U8)((start_blk >> 56) & 0xff);
3110 		break;
3111 	}
3112 
3113 	*cdb_len_ptr = cdb_len;
3114 }
3115 
3116 
3117 static int
3118 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3119 {
3120 	MR_FW_RAID_MAP_ALL *ld_map;
3121 
3122 	if (!mrsas_tbolt_get_ld_map_info(instance)) {
3123 
3124 		ld_map = instance->ld_map[instance->map_id & 1];
3125 
3126 		con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3127 		    ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3128 
3129 		if (MR_ValidateMapInfo(
3130 		    instance->ld_map[instance->map_id & 1],
3131 		    instance->load_balance_info)) {
3132 			con_log(CL_ANN,
3133 			    (CE_CONT, "MR_ValidateMapInfo success"));
3134 
3135 			instance->fast_path_io = 1;
3136 			con_log(CL_ANN,
3137 			    (CE_NOTE, "instance->fast_path_io %d",
3138 			    instance->fast_path_io));
3139 
3140 			return (DDI_SUCCESS);
3141 		}
3142 
3143 	}
3144 
3145 	instance->fast_path_io = 0;
3146 	dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3147 	con_log(CL_ANN, (CE_NOTE,
3148 	    "instance->fast_path_io %d", instance->fast_path_io));
3149 
3150 	return (DDI_FAILURE);
3151 }
3152 
3153 /*
3154  * Marks HBA as bad. This will be called either when an
3155  * IO packet times out even after 3 FW resets
3156  * or FW is found to be fault even after 3 continuous resets.
3157  */
3158 
3159 void
3160 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3161 {
3162 	dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3163 
3164 	if (instance->deadadapter == 1)
3165 		return;
3166 
3167 	con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3168 	    "Writing to doorbell with MFI_STOP_ADP "));
3169 	mutex_enter(&instance->ocr_flags_mtx);
3170 	instance->deadadapter = 1;
3171 	mutex_exit(&instance->ocr_flags_mtx);
3172 	instance->func_ptr->disable_intr(instance);
3173 	WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3174 	/* Flush */
3175 	(void) RD_RESERVED0_REGISTER(instance);
3176 
3177 	(void) mrsas_print_pending_cmds(instance);
3178 	(void) mrsas_complete_pending_cmds(instance);
3179 }
3180 
3181 void
3182 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3183 {
3184 	int i;
3185 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3186 	instance->reply_read_index = 0;
3187 
3188 	/* initializing reply address to 0xFFFFFFFF */
3189 	reply_desc = instance->reply_frame_pool;
3190 
3191 	for (i = 0; i < instance->reply_q_depth; i++) {
3192 		reply_desc->Words = (uint64_t)~0;
3193 		reply_desc++;
3194 	}
3195 }
3196 
3197 int
3198 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3199 {
3200 	uint32_t status = 0x00;
3201 	uint32_t retry = 0;
3202 	uint32_t cur_abs_reg_val;
3203 	uint32_t fw_state;
3204 	uint32_t abs_state;
3205 	uint32_t i;
3206 
3207 	con_log(CL_ANN, (CE_NOTE,
3208 	    "mrsas_tbolt_reset_ppc entered"));
3209 
3210 	if (instance->deadadapter == 1) {
3211 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3212 		    "no more resets as HBA has been marked dead ");
3213 		return (DDI_FAILURE);
3214 	}
3215 
3216 	mutex_enter(&instance->ocr_flags_mtx);
3217 	instance->adapterresetinprogress = 1;
3218 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3219 	    "adpterresetinprogress flag set, time %llx", gethrtime()));
3220 	mutex_exit(&instance->ocr_flags_mtx);
3221 
3222 	instance->func_ptr->disable_intr(instance);
3223 
3224 	/* Add delay inorder to complete the ioctl & io cmds in-flight */
3225 	for (i = 0; i < 3000; i++) {
3226 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3227 	}
3228 
3229 	instance->reply_read_index = 0;
3230 
3231 retry_reset:
3232 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3233 	    ":Resetting TBOLT "));
3234 
3235 	WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3236 	WR_TBOLT_IB_WRITE_SEQ(4, instance);
3237 	WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3238 	WR_TBOLT_IB_WRITE_SEQ(2, instance);
3239 	WR_TBOLT_IB_WRITE_SEQ(7, instance);
3240 	WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3241 	con_log(CL_ANN1, (CE_NOTE,
3242 	    "mrsas_tbolt_reset_ppc: magic number written "
3243 	    "to write sequence register"));
3244 	delay(100 * drv_usectohz(MILLISEC));
3245 	status = RD_TBOLT_HOST_DIAG(instance);
3246 	con_log(CL_ANN1, (CE_NOTE,
3247 	    "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3248 	    "to write sequence register"));
3249 
3250 	while (status & DIAG_TBOLT_RESET_ADAPTER) {
3251 		delay(100 * drv_usectohz(MILLISEC));
3252 		status = RD_TBOLT_HOST_DIAG(instance);
3253 		if (retry++ == 100) {
3254 			dev_err(instance->dip, CE_WARN,
3255 			    "mrsas_tbolt_reset_ppc:"
3256 			    "resetadapter bit is set already "
3257 			    "check retry count %d", retry);
3258 			return (DDI_FAILURE);
3259 		}
3260 	}
3261 
3262 	WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3263 	delay(100 * drv_usectohz(MILLISEC));
3264 
3265 	ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3266 	    (uint8_t *)((uintptr_t)(instance)->regmap +
3267 	    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3268 
3269 	while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3270 		delay(100 * drv_usectohz(MILLISEC));
3271 		ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3272 		    (uint8_t *)((uintptr_t)(instance)->regmap +
3273 		    RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3274 		if (retry++ == 100) {
3275 			/* Dont call kill adapter here */
3276 			/* RESET BIT ADAPTER is cleared by firmare */
3277 			/* mrsas_tbolt_kill_adapter(instance); */
3278 			dev_err(instance->dip, CE_WARN,
3279 			    "%s(): RESET FAILED; return failure!!!", __func__);
3280 			return (DDI_FAILURE);
3281 		}
3282 	}
3283 
3284 	con_log(CL_ANN,
3285 	    (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3286 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3287 	    "Calling mfi_state_transition_to_ready"));
3288 
3289 	abs_state = instance->func_ptr->read_fw_status_reg(instance);
3290 	retry = 0;
3291 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3292 		delay(100 * drv_usectohz(MILLISEC));
3293 		abs_state = instance->func_ptr->read_fw_status_reg(instance);
3294 	}
3295 	if (abs_state <= MFI_STATE_FW_INIT) {
3296 		dev_err(instance->dip, CE_WARN,
3297 		    "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3298 		    "state = 0x%x, RETRY RESET.", abs_state);
3299 		goto retry_reset;
3300 	}
3301 
3302 	/* Mark HBA as bad, if FW is fault after 3 continuous resets */
3303 	if (mfi_state_transition_to_ready(instance) ||
3304 	    debug_tbolt_fw_faults_after_ocr_g == 1) {
3305 		cur_abs_reg_val =
3306 		    instance->func_ptr->read_fw_status_reg(instance);
3307 		fw_state	= cur_abs_reg_val & MFI_STATE_MASK;
3308 
3309 		con_log(CL_ANN1, (CE_NOTE,
3310 		    "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3311 		    "FW state = 0x%x", fw_state));
3312 		if (debug_tbolt_fw_faults_after_ocr_g == 1)
3313 			fw_state = MFI_STATE_FAULT;
3314 
3315 		con_log(CL_ANN,
3316 		    (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3317 		    "FW state = 0x%x", fw_state));
3318 
3319 		if (fw_state == MFI_STATE_FAULT) {
3320 			/* increment the count */
3321 			instance->fw_fault_count_after_ocr++;
3322 			if (instance->fw_fault_count_after_ocr
3323 			    < MAX_FW_RESET_COUNT) {
3324 				dev_err(instance->dip, CE_WARN,
3325 				    "mrsas_tbolt_reset_ppc: "
3326 				    "FW is in fault after OCR count %d "
3327 				    "Retry Reset",
3328 				    instance->fw_fault_count_after_ocr);
3329 				goto retry_reset;
3330 
3331 			} else {
3332 				dev_err(instance->dip, CE_WARN, "%s:"
3333 				    "Max Reset Count exceeded >%d"
3334 				    "Mark HBA as bad, KILL adapter",
3335 				    __func__, MAX_FW_RESET_COUNT);
3336 
3337 				mrsas_tbolt_kill_adapter(instance);
3338 				return (DDI_FAILURE);
3339 			}
3340 		}
3341 	}
3342 
3343 	/* reset the counter as FW is up after OCR */
3344 	instance->fw_fault_count_after_ocr = 0;
3345 
3346 	mrsas_reset_reply_desc(instance);
3347 
3348 
3349 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 	    "Calling mrsas_issue_init_mpi2"));
3351 	abs_state = mrsas_issue_init_mpi2(instance);
3352 	if (abs_state == (uint32_t)DDI_FAILURE) {
3353 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3354 		    "INIT failed Retrying Reset");
3355 		goto retry_reset;
3356 	}
3357 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3358 	    "mrsas_issue_init_mpi2 Done"));
3359 
3360 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361 	    "Calling mrsas_print_pending_cmd"));
3362 	(void) mrsas_print_pending_cmds(instance);
3363 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3364 	    "mrsas_print_pending_cmd done"));
3365 
3366 	instance->func_ptr->enable_intr(instance);
3367 	instance->fw_outstanding = 0;
3368 
3369 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3370 	    "Calling mrsas_issue_pending_cmds"));
3371 	(void) mrsas_issue_pending_cmds(instance);
3372 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3373 	"issue_pending_cmds done."));
3374 
3375 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3376 	    "Calling aen registration"));
3377 
3378 	instance->aen_cmd->retry_count_for_ocr = 0;
3379 	instance->aen_cmd->drv_pkt_time = 0;
3380 
3381 	instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3382 
3383 	con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3384 	mutex_enter(&instance->ocr_flags_mtx);
3385 	instance->adapterresetinprogress = 0;
3386 	mutex_exit(&instance->ocr_flags_mtx);
3387 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3388 	    "adpterresetinprogress flag unset"));
3389 
3390 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3391 	return (DDI_SUCCESS);
3392 
3393 }
3394 
3395 
3396 /*
3397  * mrsas_sync_map_info -	Returns FW's ld_map structure
3398  * @instance:				Adapter soft state
3399  *
3400  * Issues an internal command (DCMD) to get the FW's controller PD
3401  * list structure.  This information is mainly used to find out SYSTEM
3402  * supported by the FW.
3403  */
3404 
3405 static int
3406 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3407 {
3408 	int			ret = 0, i;
3409 	struct mrsas_cmd	*cmd = NULL;
3410 	struct mrsas_dcmd_frame	*dcmd;
3411 	uint32_t size_sync_info, num_lds;
3412 	LD_TARGET_SYNC *ci = NULL;
3413 	MR_FW_RAID_MAP_ALL *map;
3414 	MR_LD_RAID  *raid;
3415 	LD_TARGET_SYNC *ld_sync;
3416 	uint32_t ci_h = 0;
3417 	uint32_t size_map_info;
3418 
3419 	cmd = get_raid_msg_pkt(instance);
3420 
3421 	if (cmd == NULL) {
3422 		dev_err(instance->dip, CE_WARN,
3423 		    "Failed to get a cmd from free-pool in "
3424 		    "mrsas_tbolt_sync_map_info().");
3425 		return (DDI_FAILURE);
3426 	}
3427 
3428 	/* Clear the frame buffer and assign back the context id */
3429 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3430 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3431 	    cmd->index);
3432 	bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3433 
3434 
3435 	map = instance->ld_map[instance->map_id & 1];
3436 
3437 	num_lds = map->raidMap.ldCount;
3438 
3439 	dcmd = &cmd->frame->dcmd;
3440 
3441 	size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3442 
3443 	con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3444 	    size_sync_info, num_lds));
3445 
3446 	ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3447 
3448 	bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3449 	ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3450 
3451 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3452 
3453 	ld_sync = (LD_TARGET_SYNC *)ci;
3454 
3455 	for (i = 0; i < num_lds; i++, ld_sync++) {
3456 		raid = MR_LdRaidGet(i, map);
3457 
3458 		con_log(CL_ANN1,
3459 		    (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3460 		    i, raid->seqNum, raid->flags.ldSyncRequired));
3461 
3462 		ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3463 
3464 		con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3465 		    i, ld_sync->ldTargetId));
3466 
3467 		ld_sync->seqNum = raid->seqNum;
3468 	}
3469 
3470 
3471 	size_map_info = sizeof (MR_FW_RAID_MAP) +
3472 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3473 
3474 	dcmd->cmd = MFI_CMD_OP_DCMD;
3475 	dcmd->cmd_status = 0xFF;
3476 	dcmd->sge_count = 1;
3477 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3478 	dcmd->timeout = 0;
3479 	dcmd->pad_0 = 0;
3480 	dcmd->data_xfer_len = size_map_info;
3481 	ASSERT(num_lds <= 255);
3482 	dcmd->mbox.b[0] = (U8)num_lds;
3483 	dcmd->mbox.b[1] = 1; /* Pend */
3484 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3485 	dcmd->sgl.sge32[0].phys_addr = ci_h;
3486 	dcmd->sgl.sge32[0].length = size_map_info;
3487 
3488 
3489 	instance->map_update_cmd = cmd;
3490 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3491 
3492 	instance->func_ptr->issue_cmd(cmd, instance);
3493 
3494 	instance->unroll.syncCmd = 1;
3495 	con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3496 
3497 	return (ret);
3498 }
3499 
3500 /*
3501  * abort_syncmap_cmd
3502  */
3503 int
3504 abort_syncmap_cmd(struct mrsas_instance *instance,
3505     struct mrsas_cmd *cmd_to_abort)
3506 {
3507 	int	ret = 0;
3508 
3509 	struct mrsas_cmd		*cmd;
3510 	struct mrsas_abort_frame	*abort_fr;
3511 
3512 	con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3513 
3514 	cmd = get_raid_msg_mfi_pkt(instance);
3515 
3516 	if (!cmd) {
3517 		dev_err(instance->dip, CE_WARN,
3518 		    "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3519 		return (DDI_FAILURE);
3520 	}
3521 	/* Clear the frame buffer and assign back the context id */
3522 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3523 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3524 	    cmd->index);
3525 
3526 	abort_fr = &cmd->frame->abort;
3527 
3528 	/* prepare and issue the abort frame */
3529 	ddi_put8(cmd->frame_dma_obj.acc_handle,
3530 	    &abort_fr->cmd, MFI_CMD_OP_ABORT);
3531 	ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3532 	    MFI_CMD_STATUS_SYNC_MODE);
3533 	ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3534 	ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3535 	    cmd_to_abort->index);
3536 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3537 	    &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3538 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3539 	    &abort_fr->abort_mfi_phys_addr_hi, 0);
3540 
3541 	cmd->frame_count = 1;
3542 
3543 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3544 
3545 	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3546 		con_log(CL_ANN1, (CE_WARN,
3547 		    "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3548 		ret = -1;
3549 	} else {
3550 		ret = 0;
3551 	}
3552 
3553 	return_raid_msg_mfi_pkt(instance, cmd);
3554 
3555 	atomic_add_16(&instance->fw_outstanding, (-1));
3556 
3557 	return (ret);
3558 }
3559 
3560 
3561 #ifdef PDSUPPORT
3562 /*
3563  * Even though these functions were originally intended for 2208 only, it
3564  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3565  * these two functions would be either in mr_sas.c, or in their own new source
3566  * file.  Since this driver needs some cleanup anyway, keep this portion in
3567  * mind as well.
3568  */
3569 
3570 int
3571 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3572     uint8_t lun, dev_info_t **ldip)
3573 {
3574 	struct scsi_device *sd;
3575 	dev_info_t *child;
3576 	int rval, dtype;
3577 	struct mrsas_tbolt_pd_info *pds = NULL;
3578 
3579 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3580 	    tgt, lun));
3581 
3582 	if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3583 		if (ldip) {
3584 			*ldip = child;
3585 		}
3586 		if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3587 			rval = mrsas_service_evt(instance, tgt, 1,
3588 			    MRSAS_EVT_UNCONFIG_TGT, NULL);
3589 			con_log(CL_ANN1, (CE_WARN,
3590 			    "mr_sas:DELETING STALE ENTRY  rval = %d "
3591 			    "tgt id = %d", rval, tgt));
3592 			return (NDI_FAILURE);
3593 		}
3594 		return (NDI_SUCCESS);
3595 	}
3596 
3597 	pds = (struct mrsas_tbolt_pd_info *)
3598 	    kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3599 	mrsas_tbolt_get_pd_info(instance, pds, tgt);
3600 	dtype = pds->scsiDevType;
3601 
3602 	/* Check for Disk */
3603 	if ((dtype == DTYPE_DIRECT)) {
3604 		if ((dtype == DTYPE_DIRECT) &&
3605 		    (LE_16(pds->fwState) != PD_SYSTEM)) {
3606 			kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3607 			return (NDI_FAILURE);
3608 		}
3609 		sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3610 		sd->sd_address.a_hba_tran = instance->tran;
3611 		sd->sd_address.a_target = (uint16_t)tgt;
3612 		sd->sd_address.a_lun = (uint8_t)lun;
3613 
3614 		if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3615 			rval = mrsas_config_scsi_device(instance, sd, ldip);
3616 			dev_err(instance->dip, CE_CONT,
3617 			    "?Phys. device found: tgt %d dtype %d: %s\n",
3618 			    tgt, dtype, sd->sd_inq->inq_vid);
3619 		} else {
3620 			rval = NDI_FAILURE;
3621 			con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3622 			    "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3623 			    tgt, dtype, sd->sd_inq->inq_vid));
3624 		}
3625 
3626 		/* sd_unprobe is blank now. Free buffer manually */
3627 		if (sd->sd_inq) {
3628 			kmem_free(sd->sd_inq, SUN_INQSIZE);
3629 			sd->sd_inq = (struct scsi_inquiry *)NULL;
3630 		}
3631 		kmem_free(sd, sizeof (struct scsi_device));
3632 	} else {
3633 		con_log(CL_ANN1, (CE_NOTE,
3634 		    "?Device not supported: tgt %d lun %d dtype %d",
3635 		    tgt, lun, dtype));
3636 		rval = NDI_FAILURE;
3637 	}
3638 
3639 	kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3640 	con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3641 	    rval));
3642 	return (rval);
3643 }
3644 
3645 static void
3646 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3647     struct mrsas_tbolt_pd_info *pds, int tgt)
3648 {
3649 	struct mrsas_cmd	*cmd;
3650 	struct mrsas_dcmd_frame	*dcmd;
3651 	dma_obj_t		dcmd_dma_obj;
3652 
3653 	ASSERT(instance->tbolt || instance->skinny);
3654 
3655 	if (instance->tbolt)
3656 		cmd = get_raid_msg_pkt(instance);
3657 	else
3658 		cmd = mrsas_get_mfi_pkt(instance);
3659 
3660 	if (!cmd) {
3661 		con_log(CL_ANN1,
3662 		    (CE_WARN, "Failed to get a cmd for get pd info"));
3663 		return;
3664 	}
3665 
3666 	/* Clear the frame buffer and assign back the context id */
3667 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3668 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3669 	    cmd->index);
3670 
3671 
3672 	dcmd = &cmd->frame->dcmd;
3673 	dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3674 	dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3675 	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3676 	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3677 	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3678 	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3679 
3680 	(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3681 	    DDI_STRUCTURE_LE_ACC);
3682 	bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3683 	bzero(dcmd->mbox.b, 12);
3684 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3685 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3686 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3687 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3688 	    MFI_FRAME_DIR_READ);
3689 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3690 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3691 	    sizeof (struct mrsas_tbolt_pd_info));
3692 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3693 	    MR_DCMD_PD_GET_INFO);
3694 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3695 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3696 	    sizeof (struct mrsas_tbolt_pd_info));
3697 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3698 	    dcmd_dma_obj.dma_cookie[0].dmac_address);
3699 
3700 	cmd->sync_cmd = MRSAS_TRUE;
3701 	cmd->frame_count = 1;
3702 
3703 	if (instance->tbolt)
3704 		mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3705 
3706 	instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3707 
3708 	ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3709 	    (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3710 	    DDI_DEV_AUTOINCR);
3711 	(void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3712 
3713 	if (instance->tbolt)
3714 		return_raid_msg_pkt(instance, cmd);
3715 	else
3716 		mrsas_return_mfi_pkt(instance, cmd);
3717 }
3718 #endif
3719