xref: /illumos-gate/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c (revision 33efde4275d24731ef87927237b0ffb0630b6b2d)
1 /*
2  * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3  * i.e. Thunderbolt and Invader
4  *
5  * Solaris MegaRAID device driver for SAS2.0 controllers
6  * Copyright (c) 2008-2012, LSI Logic Corporation.
7  * All rights reserved.
8  *
9  * Version:
10  * Author:
11  *		Swaminathan K S
12  *		Arun Chandrashekhar
13  *		Manju R
14  *		Rasheed
15  *		Shakeel Bukhari
16  */
17 
18 /*
19  * Copyright 2018 Nexenta Systems, Inc.
20  * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
21  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
22  */
23 
24 
25 #include <sys/types.h>
26 #include <sys/file.h>
27 #include <sys/atomic.h>
28 #include <sys/scsi/scsi.h>
29 #include <sys/byteorder.h>
30 #include <sys/sdt.h>
31 #include "ld_pd_map.h"
32 #include "mr_sas.h"
33 #include "fusion.h"
34 
35 /*
36  * FMA header files
37  */
38 #include <sys/ddifm.h>
39 #include <sys/fm/protocol.h>
40 #include <sys/fm/util.h>
41 #include <sys/fm/io/ddi.h>
42 
43 
44 /* Pre-TB command size and TB command size. */
45 #define	MR_COMMAND_SIZE (64*20)	/* 1280 bytes */
46 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
48 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
49 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
50 extern ddi_dma_attr_t mrsas_generic_dma_attr;
51 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
52 extern struct ddi_device_acc_attr endian_attr;
53 extern int	debug_level_g;
54 extern unsigned int	enable_fp;
55 volatile int dump_io_wait_time = 900;
56 extern volatile int  debug_timeout_g;
57 extern int	mrsas_issue_pending_cmds(struct mrsas_instance *);
58 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
59 extern void	push_pending_mfi_pkt(struct mrsas_instance *,
60 			struct mrsas_cmd *);
61 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
62 	    MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
63 
64 /* Local static prototypes. */
65 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
66     struct scsi_address *, struct scsi_pkt *, uchar_t *);
67 static void mrsas_tbolt_set_pd_lba(U8 *, size_t, uint8_t *, U64, U32);
68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
72 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
73     struct mrsas_tbolt_pd_info *, int);
74 
75 static int mrsas_debug_tbolt_fw_faults_after_ocr = 0;
76 
77 /*
78  * destroy_mfi_mpi_frame_pool
79  */
80 void
destroy_mfi_mpi_frame_pool(struct mrsas_instance * instance)81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
82 {
83 	int	i;
84 
85 	struct mrsas_cmd	*cmd;
86 
87 	/* return all mfi frames to pool */
88 	for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
89 		cmd = instance->cmd_list[i];
90 		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
91 			(void) mrsas_free_dma_obj(instance,
92 			    cmd->frame_dma_obj);
93 		}
94 		cmd->frame_dma_obj_status = DMA_OBJ_FREED;
95 	}
96 }
97 
98 /*
99  * destroy_mpi2_frame_pool
100  */
101 void
destroy_mpi2_frame_pool(struct mrsas_instance * instance)102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
103 {
104 
105 	if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
106 		(void) mrsas_free_dma_obj(instance,
107 		    instance->mpi2_frame_pool_dma_obj);
108 		instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
109 	}
110 }
111 
112 
113 /*
114  * mrsas_tbolt_free_additional_dma_buffer
115  */
116 void
mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance * instance)117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
118 {
119 	int i;
120 
121 	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
122 		(void) mrsas_free_dma_obj(instance,
123 		    instance->mfi_internal_dma_obj);
124 		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
125 	}
126 	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
127 		(void) mrsas_free_dma_obj(instance,
128 		    instance->mfi_evt_detail_obj);
129 		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
130 	}
131 
132 	for (i = 0; i < 2; i++) {
133 		if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
134 			(void) mrsas_free_dma_obj(instance,
135 			    instance->ld_map_obj[i]);
136 			instance->ld_map_obj[i].status = DMA_OBJ_FREED;
137 		}
138 	}
139 }
140 
141 
142 /*
143  * free_req_desc_pool
144  */
145 void
free_req_rep_desc_pool(struct mrsas_instance * instance)146 free_req_rep_desc_pool(struct mrsas_instance *instance)
147 {
148 	if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
149 		(void) mrsas_free_dma_obj(instance,
150 		    instance->request_desc_dma_obj);
151 		instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
152 	}
153 
154 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
155 		(void) mrsas_free_dma_obj(instance,
156 		    instance->reply_desc_dma_obj);
157 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
158 	}
159 
160 
161 }
162 
163 
164 /*
165  * ThunderBolt(TB) Request Message Frame Pool
166  */
167 int
create_mpi2_frame_pool(struct mrsas_instance * instance)168 create_mpi2_frame_pool(struct mrsas_instance *instance)
169 {
170 	int		i = 0;
171 	uint16_t	max_cmd;
172 	uint32_t	sgl_sz;
173 	uint32_t	raid_msg_size;
174 	uint32_t	total_size;
175 	uint32_t	offset;
176 	uint32_t	io_req_base_phys;
177 	uint8_t		*io_req_base;
178 	struct mrsas_cmd	*cmd;
179 
180 	max_cmd = instance->max_fw_cmds;
181 
182 	sgl_sz		= 1024;
183 	raid_msg_size	= MRSAS_THUNDERBOLT_MSG_SIZE;
184 
185 	/* Allocating additional 256 bytes to accomodate SMID 0. */
186 	total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
187 	    (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
188 
189 	con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
190 	    "max_cmd %x", max_cmd));
191 
192 	con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
193 	    "request message frame pool size %x", total_size));
194 
195 	/*
196 	 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
197 	 * and then split the memory to 1024 commands. Each command should be
198 	 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
199 	 * within it. Further refer the "alloc_req_rep_desc" function where
200 	 * we allocate request/reply descriptors queues for a clue.
201 	 */
202 
203 	instance->mpi2_frame_pool_dma_obj.size = total_size;
204 	instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
205 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
206 	    0xFFFFFFFFU;
207 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
208 	    0xFFFFFFFFU;
209 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
210 	instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
211 
212 	if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
213 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
214 		dev_err(instance->dip, CE_WARN,
215 		    "could not alloc mpi2 frame pool");
216 		return (DDI_FAILURE);
217 	}
218 
219 	bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
220 	instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
221 
222 	instance->io_request_frames =
223 	    (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
224 	instance->io_request_frames_phy =
225 	    (uint32_t)
226 	    instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
227 
228 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
229 	    (void *)instance->io_request_frames));
230 
231 	con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
232 	    instance->io_request_frames_phy));
233 
234 	io_req_base = (uint8_t *)instance->io_request_frames +
235 	    MRSAS_THUNDERBOLT_MSG_SIZE;
236 	io_req_base_phys = instance->io_request_frames_phy +
237 	    MRSAS_THUNDERBOLT_MSG_SIZE;
238 
239 	con_log(CL_DLEVEL3, (CE_NOTE,
240 	    "io req_base_phys 0x%x", io_req_base_phys));
241 
242 	for (i = 0; i < max_cmd; i++) {
243 		cmd = instance->cmd_list[i];
244 
245 		offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
246 
247 		cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
248 		    ((uint8_t *)io_req_base + offset);
249 		cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
250 
251 		cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
252 		    (max_cmd * raid_msg_size) + i * sgl_sz);
253 
254 		cmd->sgl_phys_addr = (io_req_base_phys +
255 		    (max_cmd * raid_msg_size) + i * sgl_sz);
256 
257 		cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
258 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
259 		    (i * SENSE_LENGTH));
260 
261 		cmd->sense_phys_addr1 = (io_req_base_phys +
262 		    (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
263 		    (i * SENSE_LENGTH));
264 
265 
266 		cmd->SMID = i + 1;
267 
268 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
269 		    cmd->index, (void *)cmd->scsi_io_request));
270 
271 		con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
272 		    cmd->index, cmd->scsi_io_request_phys_addr));
273 
274 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
275 		    cmd->index, (void *)cmd->sense1));
276 
277 		con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
278 		    cmd->index, cmd->sense_phys_addr1));
279 
280 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
281 		    cmd->index, (void *)cmd->sgl));
282 
283 		con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
284 		    cmd->index, cmd->sgl_phys_addr));
285 	}
286 
287 	return (DDI_SUCCESS);
288 
289 }
290 
291 
292 /*
293  * alloc_additional_dma_buffer for AEN
294  */
295 int
mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance * instance)296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
297 {
298 	uint32_t	internal_buf_size = PAGESIZE*2;
299 	int i;
300 
301 	/* Initialize buffer status as free */
302 	instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
303 	instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
304 	instance->ld_map_obj[0].status = DMA_OBJ_FREED;
305 	instance->ld_map_obj[1].status = DMA_OBJ_FREED;
306 
307 
308 	instance->mfi_internal_dma_obj.size = internal_buf_size;
309 	instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
310 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
311 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
312 	    0xFFFFFFFFU;
313 	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
314 
315 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
316 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
317 		dev_err(instance->dip, CE_WARN,
318 		    "could not alloc reply queue");
319 		return (DDI_FAILURE);
320 	}
321 
322 	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
323 
324 	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
325 	instance->internal_buf =
326 	    (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
327 	instance->internal_buf_dmac_add =
328 	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
329 	instance->internal_buf_size = internal_buf_size;
330 
331 	/* allocate evt_detail */
332 	instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
333 	instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
334 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
335 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
336 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
337 	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
338 
339 	if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
340 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
341 		dev_err(instance->dip, CE_WARN,
342 		    "mrsas_tbolt_alloc_additional_dma_buffer: "
343 		    "could not allocate data transfer buffer.");
344 		goto fail_tbolt_additional_buff;
345 	}
346 
347 	bzero(instance->mfi_evt_detail_obj.buffer,
348 	    sizeof (struct mrsas_evt_detail));
349 
350 	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
351 
352 	instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
353 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
354 
355 	for (i = 0; i < 2; i++) {
356 		/* allocate the data transfer buffer */
357 		instance->ld_map_obj[i].size = instance->size_map_info;
358 		instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
359 		instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
360 		instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
361 		    0xFFFFFFFFU;
362 		instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
363 		instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
364 
365 		if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
366 		    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
367 			dev_err(instance->dip, CE_WARN,
368 			    "could not allocate data transfer buffer.");
369 			goto fail_tbolt_additional_buff;
370 		}
371 
372 		instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
373 
374 		bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
375 
376 		instance->ld_map[i] =
377 		    (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
378 		instance->ld_map_phy[i] = (uint32_t)instance->
379 		    ld_map_obj[i].dma_cookie[0].dmac_address;
380 
381 		con_log(CL_DLEVEL3, (CE_NOTE,
382 		    "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
383 
384 		con_log(CL_DLEVEL3, (CE_NOTE,
385 		    "size_map_info 0x%x", instance->size_map_info));
386 	}
387 
388 	return (DDI_SUCCESS);
389 
390 fail_tbolt_additional_buff:
391 	mrsas_tbolt_free_additional_dma_buffer(instance);
392 
393 	return (DDI_FAILURE);
394 }
395 
396 MRSAS_REQUEST_DESCRIPTOR_UNION *
mr_sas_get_request_descriptor(struct mrsas_instance * instance,uint16_t index)397 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
398 {
399 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
400 
401 	if (index > instance->max_fw_cmds) {
402 		con_log(CL_ANN1, (CE_NOTE,
403 		    "Invalid SMID 0x%x request for descriptor", index));
404 		con_log(CL_ANN1, (CE_NOTE,
405 		    "max_fw_cmds : 0x%x", instance->max_fw_cmds));
406 		return (NULL);
407 	}
408 
409 	req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
410 	    ((char *)instance->request_message_pool +
411 	    (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
412 
413 	con_log(CL_ANN1, (CE_NOTE,
414 	    "request descriptor : 0x%08lx", (unsigned long)req_desc));
415 
416 	con_log(CL_ANN1, (CE_NOTE,
417 	    "request descriptor base phy : 0x%08lx",
418 	    (unsigned long)instance->request_message_pool_phy));
419 
420 	return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
421 }
422 
423 
424 /*
425  * Allocate Request and Reply  Queue Descriptors.
426  */
427 int
alloc_req_rep_desc(struct mrsas_instance * instance)428 alloc_req_rep_desc(struct mrsas_instance *instance)
429 {
430 	uint32_t	request_q_sz, reply_q_sz;
431 	int		i, max_reply_q_sz;
432 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
433 
434 	/*
435 	 * ThunderBolt(TB) There's no longer producer consumer mechanism.
436 	 * Once we have an interrupt we are supposed to scan through the list of
437 	 * reply descriptors and process them accordingly. We would be needing
438 	 * to allocate memory for 1024 reply descriptors
439 	 */
440 
441 	/* Allocate Reply Descriptors */
442 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
443 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
444 
445 	/* reply queue size should be multiple of 16 */
446 	max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
447 
448 	reply_q_sz = 8 * max_reply_q_sz;
449 
450 
451 	con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
452 	    (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
453 
454 	instance->reply_desc_dma_obj.size = reply_q_sz;
455 	instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
456 	instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
457 	instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
458 	instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
459 	instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
460 
461 	if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
462 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
463 		dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
464 		return (DDI_FAILURE);
465 	}
466 
467 	bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
468 	instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
469 
470 	/* virtual address of  reply queue */
471 	instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
472 	    instance->reply_desc_dma_obj.buffer);
473 
474 	instance->reply_q_depth = max_reply_q_sz;
475 
476 	con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
477 	    instance->reply_q_depth));
478 
479 	con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
480 	    (void *)instance->reply_frame_pool));
481 
482 	/* initializing reply address to 0xFFFFFFFF */
483 	reply_desc = instance->reply_frame_pool;
484 
485 	for (i = 0; i < instance->reply_q_depth; i++) {
486 		reply_desc->Words = (uint64_t)~0;
487 		reply_desc++;
488 	}
489 
490 
491 	instance->reply_frame_pool_phy =
492 	    (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
493 
494 	con_log(CL_ANN1, (CE_NOTE,
495 	    "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
496 
497 
498 	instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
499 	    reply_q_sz);
500 
501 	con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
502 	    instance->reply_pool_limit_phy));
503 
504 
505 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
506 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 
508 	/* Allocate Request Descriptors */
509 	con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
510 	    (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
511 
512 	request_q_sz = 8 *
513 	    (instance->max_fw_cmds);
514 
515 	instance->request_desc_dma_obj.size = request_q_sz;
516 	instance->request_desc_dma_obj.dma_attr	= mrsas_generic_dma_attr;
517 	instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
518 	instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
519 	    0xFFFFFFFFU;
520 	instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen	= 1;
521 	instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
522 
523 	if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
524 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
525 		dev_err(instance->dip, CE_WARN,
526 		    "could not alloc request queue desc");
527 		goto fail_undo_reply_queue;
528 	}
529 
530 	bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
531 	instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
532 
533 	/* virtual address of  request queue desc */
534 	instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
535 	    (instance->request_desc_dma_obj.buffer);
536 
537 	instance->request_message_pool_phy =
538 	    (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
539 
540 	return (DDI_SUCCESS);
541 
542 fail_undo_reply_queue:
543 	if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
544 		(void) mrsas_free_dma_obj(instance,
545 		    instance->reply_desc_dma_obj);
546 		instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
547 	}
548 
549 	return (DDI_FAILURE);
550 }
551 
552 /*
553  * mrsas_alloc_cmd_pool_tbolt
554  *
555  * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
556  * routine
557  */
558 int
mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance * instance)559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
560 {
561 	int		i;
562 	int		count;
563 	uint32_t	max_cmd;
564 	uint32_t	reserve_cmd;
565 	size_t		sz;
566 
567 	struct mrsas_cmd	*cmd;
568 
569 	max_cmd = instance->max_fw_cmds;
570 	con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
571 	    "max_cmd %x", max_cmd));
572 
573 
574 	sz = sizeof (struct mrsas_cmd *) * max_cmd;
575 
576 	/*
577 	 * instance->cmd_list is an array of struct mrsas_cmd pointers.
578 	 * Allocate the dynamic array first and then allocate individual
579 	 * commands.
580 	 */
581 	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
582 
583 	/* create a frame pool and assign one frame to each cmd */
584 	for (count = 0; count < max_cmd; count++) {
585 		instance->cmd_list[count] =
586 		    kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
587 	}
588 
589 	/* add all the commands to command pool */
590 
591 	INIT_LIST_HEAD(&instance->cmd_pool_list);
592 	INIT_LIST_HEAD(&instance->cmd_pend_list);
593 	INIT_LIST_HEAD(&instance->cmd_app_pool_list);
594 
595 	reserve_cmd = MRSAS_APP_RESERVED_CMDS;
596 
597 	/* cmd index 0 reservered for IOC INIT */
598 	for (i = 1; i < reserve_cmd; i++) {
599 		cmd		= instance->cmd_list[i];
600 		cmd->index	= i;
601 		mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
602 	}
603 
604 
605 	for (i = reserve_cmd; i < max_cmd; i++) {
606 		cmd		= instance->cmd_list[i];
607 		cmd->index	= i;
608 		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
609 	}
610 
611 	return (DDI_SUCCESS);
612 }
613 
614 
615 /*
616  * free_space_for_mpi2
617  */
618 void
free_space_for_mpi2(struct mrsas_instance * instance)619 free_space_for_mpi2(struct mrsas_instance *instance)
620 {
621 	/* already freed */
622 	if (instance->cmd_list == NULL) {
623 		return;
624 	}
625 
626 	/* First free the additional DMA buffer */
627 	mrsas_tbolt_free_additional_dma_buffer(instance);
628 
629 	/* Free the request/reply descriptor pool */
630 	free_req_rep_desc_pool(instance);
631 
632 	/*  Free the MPI message pool */
633 	destroy_mpi2_frame_pool(instance);
634 
635 	/* Free the MFI frame pool */
636 	destroy_mfi_frame_pool(instance);
637 
638 	/* Free all the commands in the cmd_list */
639 	/* Free the cmd_list buffer itself */
640 	mrsas_free_cmd_pool(instance);
641 }
642 
643 
644 /*
645  * ThunderBolt(TB) memory allocations for commands/messages/frames.
646  */
647 int
alloc_space_for_mpi2(struct mrsas_instance * instance)648 alloc_space_for_mpi2(struct mrsas_instance *instance)
649 {
650 	/* Allocate command pool (memory for cmd_list & individual commands) */
651 	if (mrsas_alloc_cmd_pool_tbolt(instance)) {
652 		dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
653 		return (DDI_FAILURE);
654 	}
655 
656 	/* Initialize single reply size and Message size */
657 	instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
658 	instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
659 
660 	instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
661 	    (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
662 	    sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
663 	instance->max_sge_in_chain = (MR_COMMAND_SIZE -
664 	    MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
665 
666 	/* Reduce SG count by 1 to take care of group cmds feature in FW */
667 	instance->max_num_sge = (instance->max_sge_in_main_msg +
668 	    instance->max_sge_in_chain - 2);
669 	instance->chain_offset_mpt_msg =
670 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
671 	instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
672 	    sizeof (MPI2_SGE_IO_UNION)) / 16;
673 	instance->reply_read_index = 0;
674 
675 
676 	/* Allocate Request and Reply descriptors Array */
677 	/* Make sure the buffer is aligned to 8 for req/rep  descriptor Pool */
678 	if (alloc_req_rep_desc(instance)) {
679 		dev_err(instance->dip, CE_WARN,
680 		    "Error, allocating memory for descripter-pool");
681 		goto mpi2_undo_cmd_pool;
682 	}
683 	con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
684 	    instance->request_message_pool_phy));
685 
686 
687 	/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
688 	if (create_mfi_frame_pool(instance)) {
689 		dev_err(instance->dip, CE_WARN,
690 		    "Error, allocating memory for MFI frame-pool");
691 		goto mpi2_undo_descripter_pool;
692 	}
693 
694 
695 	/* Allocate MPI2 Message pool */
696 	/*
697 	 * Make sure the buffer is alligned to 256 for raid message packet
698 	 * create a io request pool and assign one frame to each cmd
699 	 */
700 
701 	if (create_mpi2_frame_pool(instance)) {
702 		dev_err(instance->dip, CE_WARN,
703 		    "Error, allocating memory for MPI2 Message-pool");
704 		goto mpi2_undo_mfi_frame_pool;
705 	}
706 
707 #ifdef DEBUG
708 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
709 	    instance->max_sge_in_main_msg));
710 	con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
711 	    instance->max_sge_in_chain));
712 	con_log(CL_ANN1, (CE_CONT,
713 	    "[max_sge]0x%x", instance->max_num_sge));
714 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
715 	    instance->chain_offset_mpt_msg));
716 	con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
717 	    instance->chain_offset_io_req));
718 #endif
719 
720 
721 	/* Allocate additional dma buffer */
722 	if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
723 		dev_err(instance->dip, CE_WARN,
724 		    "Error, allocating tbolt additional DMA buffer");
725 		goto mpi2_undo_message_pool;
726 	}
727 
728 	return (DDI_SUCCESS);
729 
730 mpi2_undo_message_pool:
731 	destroy_mpi2_frame_pool(instance);
732 
733 mpi2_undo_mfi_frame_pool:
734 	destroy_mfi_frame_pool(instance);
735 
736 mpi2_undo_descripter_pool:
737 	free_req_rep_desc_pool(instance);
738 
739 mpi2_undo_cmd_pool:
740 	mrsas_free_cmd_pool(instance);
741 
742 	return (DDI_FAILURE);
743 }
744 
745 
746 /*
747  * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
748  */
749 int
mrsas_init_adapter_tbolt(struct mrsas_instance * instance)750 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
751 {
752 
753 	/*
754 	 * Reduce the max supported cmds by 1. This is to ensure that the
755 	 * reply_q_sz (1 more than the max cmd that driver may send)
756 	 * does not exceed max cmds that the FW can support
757 	 */
758 
759 	if (instance->max_fw_cmds > 1008) {
760 		instance->max_fw_cmds = 1008;
761 		instance->max_fw_cmds = instance->max_fw_cmds-1;
762 	}
763 
764 	con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
765 	    "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
766 
767 
768 	/* create a pool of commands */
769 	if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
770 		dev_err(instance->dip, CE_WARN,
771 		    "alloc_space_for_mpi2() failed.");
772 
773 		return (DDI_FAILURE);
774 	}
775 
776 	/* Send ioc init message */
777 	/* NOTE: the issue_init call does FMA checking already. */
778 	if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
779 		dev_err(instance->dip, CE_WARN,
780 		    "mrsas_issue_init_mpi2() failed.");
781 
782 		goto fail_init_fusion;
783 	}
784 
785 	instance->unroll.alloc_space_mpi2 = 1;
786 
787 	con_log(CL_ANN, (CE_NOTE,
788 	    "mrsas_init_adapter_tbolt: SUCCESSFUL"));
789 
790 	return (DDI_SUCCESS);
791 
792 fail_init_fusion:
793 	free_space_for_mpi2(instance);
794 
795 	return (DDI_FAILURE);
796 }
797 
798 
799 
800 /*
801  * init_mpi2
802  */
803 int
mrsas_issue_init_mpi2(struct mrsas_instance * instance)804 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
805 {
806 	dma_obj_t init2_dma_obj;
807 	int ret_val = DDI_SUCCESS;
808 
809 	/* allocate DMA buffer for IOC INIT message */
810 	init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
811 	init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
812 	init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
813 	init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
814 	init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
815 	init2_dma_obj.dma_attr.dma_attr_align = 256;
816 
817 	if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
818 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
819 		dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
820 		    "could not allocate data transfer buffer.");
821 		return (DDI_FAILURE);
822 	}
823 	(void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
824 
825 	con_log(CL_ANN1, (CE_NOTE,
826 	    "mrsas_issue_init_mpi2 _phys adr: %x",
827 	    init2_dma_obj.dma_cookie[0].dmac_address));
828 
829 
830 	/* Initialize and send ioc init message */
831 	ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
832 	if (ret_val == DDI_FAILURE) {
833 		con_log(CL_ANN1, (CE_WARN,
834 		    "mrsas_issue_init_mpi2: Failed"));
835 		goto fail_init_mpi2;
836 	}
837 
838 	/* free IOC init DMA buffer */
839 	if (mrsas_free_dma_obj(instance, init2_dma_obj)
840 	    != DDI_SUCCESS) {
841 		con_log(CL_ANN1, (CE_WARN,
842 		    "mrsas_issue_init_mpi2: Free Failed"));
843 		return (DDI_FAILURE);
844 	}
845 
846 	/* Get/Check and sync ld_map info */
847 	instance->map_id = 0;
848 	if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
849 		(void) mrsas_tbolt_sync_map_info(instance);
850 
851 
852 	/* No mrsas_cmd to send, so send NULL. */
853 	if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
854 		goto fail_init_mpi2;
855 
856 	con_log(CL_ANN, (CE_NOTE,
857 	    "mrsas_issue_init_mpi2: SUCCESSFUL"));
858 
859 	return (DDI_SUCCESS);
860 
861 fail_init_mpi2:
862 	(void) mrsas_free_dma_obj(instance, init2_dma_obj);
863 
864 	return (DDI_FAILURE);
865 }
866 
867 static int
mrsas_tbolt_ioc_init(struct mrsas_instance * instance,dma_obj_t * mpi2_dma_obj)868 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
869 {
870 	int				numbytes;
871 	uint16_t			flags;
872 	struct mrsas_init_frame2	*mfiFrameInit2;
873 	struct mrsas_header		*frame_hdr;
874 	Mpi2IOCInitRequest_t		*init;
875 	struct mrsas_cmd		*cmd = NULL;
876 	struct mrsas_drv_ver		drv_ver_info;
877 	MRSAS_REQUEST_DESCRIPTOR_UNION	req_desc;
878 	uint32_t			timeout;
879 
880 	con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
881 
882 
883 #ifdef DEBUG
884 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
885 	    (int)sizeof (*mfiFrameInit2)));
886 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
887 	con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
888 	    (int)sizeof (struct mrsas_init_frame2)));
889 	con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
890 	    (int)sizeof (Mpi2IOCInitRequest_t)));
891 #endif
892 
893 	init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
894 	numbytes = sizeof (*init);
895 	bzero(init, numbytes);
896 
897 	ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
898 	    MPI2_FUNCTION_IOC_INIT);
899 
900 	ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
901 	    MPI2_WHOINIT_HOST_DRIVER);
902 
903 	/* set MsgVersion and HeaderVersion host driver was built with */
904 	ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
905 	    MPI2_VERSION);
906 
907 	ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
908 	    MPI2_HEADER_VERSION);
909 
910 	ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
911 	    instance->raid_io_msg_size / 4);
912 
913 	ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
914 	    0);
915 
916 	ddi_put16(mpi2_dma_obj->acc_handle,
917 	    &init->ReplyDescriptorPostQueueDepth,
918 	    instance->reply_q_depth);
919 	/*
920 	 * These addresses are set using the DMA cookie addresses from when the
921 	 * memory was allocated.  Sense buffer hi address should be 0.
922 	 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
923 	 */
924 
925 	ddi_put32(mpi2_dma_obj->acc_handle,
926 	    &init->SenseBufferAddressHigh, 0);
927 
928 	ddi_put64(mpi2_dma_obj->acc_handle,
929 	    (uint64_t *)&init->SystemRequestFrameBaseAddress,
930 	    instance->io_request_frames_phy);
931 
932 	ddi_put64(mpi2_dma_obj->acc_handle,
933 	    &init->ReplyDescriptorPostQueueAddress,
934 	    instance->reply_frame_pool_phy);
935 
936 	ddi_put64(mpi2_dma_obj->acc_handle,
937 	    &init->ReplyFreeQueueAddress, 0);
938 
939 	cmd = instance->cmd_list[0];
940 	if (cmd == NULL) {
941 		return (DDI_FAILURE);
942 	}
943 	cmd->retry_count_for_ocr = 0;
944 	cmd->pkt = NULL;
945 	cmd->drv_pkt_time = 0;
946 
947 	mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
948 	con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
949 
950 	frame_hdr = &cmd->frame->hdr;
951 
952 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
953 	    MFI_CMD_STATUS_POLL_MODE);
954 
955 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
956 
957 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
958 
959 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
960 
961 	con_log(CL_ANN, (CE_CONT,
962 	    "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
963 
964 	/* Init the MFI Header */
965 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
966 	    &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
967 
968 	con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
969 
970 	ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
971 	    &mfiFrameInit2->cmd_status,
972 	    MFI_STAT_INVALID_STATUS);
973 
974 	con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
975 
976 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
977 	    &mfiFrameInit2->queue_info_new_phys_addr_lo,
978 	    mpi2_dma_obj->dma_cookie[0].dmac_address);
979 
980 	ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
981 	    &mfiFrameInit2->data_xfer_len,
982 	    sizeof (Mpi2IOCInitRequest_t));
983 
984 	con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
985 	    (int)init->ReplyDescriptorPostQueueAddress));
986 
987 	/* fill driver version information */
988 	fill_up_drv_ver(&drv_ver_info);
989 
990 	/* allocate the driver version data transfer buffer */
991 	instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
992 	instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
993 	instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
994 	instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
995 	instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
996 	instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
997 
998 	if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
999 	    (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1000 		dev_err(instance->dip, CE_WARN,
1001 		    "fusion init: Could not allocate driver version buffer.");
1002 		return (DDI_FAILURE);
1003 	}
1004 	/* copy driver version to dma buffer */
1005 	bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1006 	ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1007 	    (uint8_t *)drv_ver_info.drv_ver,
1008 	    (uint8_t *)instance->drv_ver_dma_obj.buffer,
1009 	    sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1010 
1011 	/* send driver version physical address to firmware */
1012 	ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1013 	    instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1014 
1015 	con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1016 	    mfiFrameInit2->queue_info_new_phys_addr_lo,
1017 	    (int)sizeof (Mpi2IOCInitRequest_t)));
1018 
1019 	con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1020 
1021 	con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1022 	    cmd->scsi_io_request_phys_addr,
1023 	    (int)sizeof (struct mrsas_init_frame2)));
1024 
1025 	/* disable interrupts before sending INIT2 frame */
1026 	instance->func_ptr->disable_intr(instance);
1027 
1028 	req_desc.Words = cmd->scsi_io_request_phys_addr;
1029 	req_desc.MFAIo.RequestFlags =
1030 	    (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1031 
1032 	cmd->request_desc = &req_desc;
1033 
1034 	/* issue the init frame */
1035 
1036 	mutex_enter(&instance->reg_write_mtx);
1037 	WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1038 	WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1039 	mutex_exit(&instance->reg_write_mtx);
1040 
1041 	con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1042 	con_log(CL_ANN1, (CE_CONT, "[cmd  Status= %x] ",
1043 	    frame_hdr->cmd_status));
1044 
1045 	timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1046 	do {
1047 		if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1048 		    &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1049 			break;
1050 		delay(1);
1051 		timeout--;
1052 	} while (timeout > 0);
1053 
1054 	if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1055 	    &mfiFrameInit2->cmd_status) == 0) {
1056 		con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1057 	} else {
1058 		con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1059 		mrsas_dump_reply_desc(instance);
1060 		goto fail_ioc_init;
1061 	}
1062 
1063 	mrsas_dump_reply_desc(instance);
1064 
1065 	instance->unroll.verBuff = 1;
1066 
1067 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1068 
1069 	return (DDI_SUCCESS);
1070 
1071 
1072 fail_ioc_init:
1073 
1074 	(void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1075 
1076 	return (DDI_FAILURE);
1077 }
1078 
1079 int
wait_for_outstanding_poll_io(struct mrsas_instance * instance)1080 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1081 {
1082 	int i;
1083 	uint32_t wait_time = dump_io_wait_time;
1084 	for (i = 0; i < wait_time; i++) {
1085 		/*
1086 		 * Check For Outstanding poll Commands
1087 		 * except ldsync command and aen command
1088 		 */
1089 		if (instance->fw_outstanding <= 2) {
1090 			break;
1091 		}
1092 		drv_usecwait(MILLISEC);
1093 		/* complete commands from reply queue */
1094 		(void) mr_sas_tbolt_process_outstanding_cmd(instance);
1095 	}
1096 	if (instance->fw_outstanding > 2) {
1097 		return (1);
1098 	}
1099 	return (0);
1100 }
1101 /*
1102  * scsi_pkt handling
1103  *
1104  * Visible to the external world via the transport structure.
1105  */
1106 
1107 int
mrsas_tbolt_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1108 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1109 {
1110 	struct mrsas_instance	*instance = ADDR2MR(ap);
1111 	struct scsa_cmd		*acmd = PKT2CMD(pkt);
1112 	struct mrsas_cmd	*cmd = NULL;
1113 	uchar_t			cmd_done = 0;
1114 
1115 	con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1116 	if (instance->deadadapter == 1) {
1117 		dev_err(instance->dip, CE_WARN,
1118 		    "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1119 		    "for IO, as the HBA doesnt take any more IOs");
1120 		if (pkt) {
1121 			pkt->pkt_reason		= CMD_DEV_GONE;
1122 			pkt->pkt_statistics	= STAT_DISCON;
1123 		}
1124 		return (TRAN_FATAL_ERROR);
1125 	}
1126 	if (instance->adapterresetinprogress) {
1127 		con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1128 		    "returning mfi_pkt and setting TRAN_BUSY\n"));
1129 		return (TRAN_BUSY);
1130 	}
1131 	(void) mrsas_tbolt_prepare_pkt(acmd);
1132 
1133 	cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1134 
1135 	/*
1136 	 * Check if the command is already completed by the mrsas_build_cmd()
1137 	 * routine. In which case the busy_flag would be clear and scb will be
1138 	 * NULL and appropriate reason provided in pkt_reason field
1139 	 */
1140 	if (cmd_done) {
1141 		pkt->pkt_reason = CMD_CMPLT;
1142 		pkt->pkt_scbp[0] = STATUS_GOOD;
1143 		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1144 		    | STATE_SENT_CMD;
1145 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1146 			(*pkt->pkt_comp)(pkt);
1147 		}
1148 
1149 		return (TRAN_ACCEPT);
1150 	}
1151 
1152 	if (cmd == NULL) {
1153 		return (TRAN_BUSY);
1154 	}
1155 
1156 
1157 	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1158 		if (instance->fw_outstanding > instance->max_fw_cmds) {
1159 			dev_err(instance->dip, CE_WARN,
1160 			    "Command Queue Full... Returning BUSY");
1161 			DTRACE_PROBE2(tbolt_start_tran_err,
1162 			    uint16_t, instance->fw_outstanding,
1163 			    uint16_t, instance->max_fw_cmds);
1164 			return_raid_msg_pkt(instance, cmd);
1165 			return (TRAN_BUSY);
1166 		}
1167 
1168 		/* Synchronize the Cmd frame for the controller */
1169 		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1170 		    DDI_DMA_SYNC_FORDEV);
1171 
1172 		con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1173 		    "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1174 		    cmd->index, cmd->SMID));
1175 
1176 		instance->func_ptr->issue_cmd(cmd, instance);
1177 	} else {
1178 		instance->func_ptr->issue_cmd(cmd, instance);
1179 		(void) wait_for_outstanding_poll_io(instance);
1180 		(void) mrsas_common_check(instance, cmd);
1181 		DTRACE_PROBE2(tbolt_start_nointr_done,
1182 		    uint8_t, cmd->frame->hdr.cmd,
1183 		    uint8_t, cmd->frame->hdr.cmd_status);
1184 	}
1185 
1186 	return (TRAN_ACCEPT);
1187 }
1188 
1189 /*
1190  * prepare the pkt:
1191  * the pkt may have been resubmitted or just reused so
1192  * initialize some fields and do some checks.
1193  */
1194 static int
mrsas_tbolt_prepare_pkt(struct scsa_cmd * acmd)1195 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1196 {
1197 	struct scsi_pkt	*pkt = CMD2PKT(acmd);
1198 
1199 
1200 	/*
1201 	 * Reinitialize some fields that need it; the packet may
1202 	 * have been resubmitted
1203 	 */
1204 	pkt->pkt_reason = CMD_CMPLT;
1205 	pkt->pkt_state = 0;
1206 	pkt->pkt_statistics = 0;
1207 	pkt->pkt_resid = 0;
1208 
1209 	/*
1210 	 * zero status byte.
1211 	 */
1212 	*(pkt->pkt_scbp) = 0;
1213 
1214 	return (0);
1215 }
1216 
1217 
1218 int
mr_sas_tbolt_build_sgl(struct mrsas_instance * instance,struct scsa_cmd * acmd,struct mrsas_cmd * cmd,Mpi2RaidSCSIIORequest_t * scsi_raid_io,uint32_t * datalen)1219 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1220     struct scsa_cmd *acmd,
1221     struct mrsas_cmd *cmd,
1222     Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1223     uint32_t *datalen)
1224 {
1225 	uint32_t		MaxSGEs;
1226 	int			sg_to_process;
1227 	uint32_t		i, j;
1228 	uint32_t		numElements, endElement;
1229 	Mpi25IeeeSgeChain64_t	*ieeeChainElement = NULL;
1230 	Mpi25IeeeSgeChain64_t	*scsi_raid_io_sgl_ieee = NULL;
1231 	ddi_acc_handle_t acc_handle =
1232 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1233 
1234 	con_log(CL_ANN1, (CE_NOTE,
1235 	    "chkpnt: Building Chained SGL :%d", __LINE__));
1236 
1237 	/* Calulate SGE size in number of Words(32bit) */
1238 	/* Clear the datalen before updating it. */
1239 	*datalen = 0;
1240 
1241 	MaxSGEs = instance->max_sge_in_main_msg;
1242 
1243 	ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1244 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1245 
1246 	/* set data transfer flag. */
1247 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1248 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1249 		    MPI2_SCSIIO_CONTROL_WRITE);
1250 	} else {
1251 		ddi_put32(acc_handle, &scsi_raid_io->Control,
1252 		    MPI2_SCSIIO_CONTROL_READ);
1253 	}
1254 
1255 
1256 	numElements = acmd->cmd_cookiecnt;
1257 
1258 	con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1259 
1260 	if (numElements > instance->max_num_sge) {
1261 		con_log(CL_ANN, (CE_NOTE,
1262 		    "[Max SGE Count Exceeded]:%x", numElements));
1263 		return (numElements);
1264 	}
1265 
1266 	ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1267 	    (uint8_t)numElements);
1268 
1269 	/* set end element in main message frame */
1270 	endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1271 
1272 	/* prepare the scatter-gather list for the firmware */
1273 	scsi_raid_io_sgl_ieee =
1274 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1275 
1276 	if (instance->gen3) {
1277 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1278 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1279 
1280 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1281 	}
1282 
1283 	for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1284 		ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1285 		    acmd->cmd_dmacookies[i].dmac_laddress);
1286 
1287 		ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1288 		    acmd->cmd_dmacookies[i].dmac_size);
1289 
1290 		ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1291 
1292 		if (instance->gen3) {
1293 			if (i == (numElements - 1)) {
1294 				ddi_put8(acc_handle,
1295 				    &scsi_raid_io_sgl_ieee->Flags,
1296 				    IEEE_SGE_FLAGS_END_OF_LIST);
1297 			}
1298 		}
1299 
1300 		*datalen += acmd->cmd_dmacookies[i].dmac_size;
1301 
1302 #ifdef DEBUG
1303 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1304 		    scsi_raid_io_sgl_ieee->Address));
1305 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1306 		    scsi_raid_io_sgl_ieee->Length));
1307 		con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1308 		    scsi_raid_io_sgl_ieee->Flags));
1309 #endif
1310 
1311 	}
1312 
1313 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1314 
1315 	/* check if chained SGL required */
1316 	if (i < numElements) {
1317 
1318 		con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1319 
1320 		if (instance->gen3) {
1321 			uint16_t ioFlags =
1322 			    ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1323 
1324 			if ((ioFlags &
1325 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1326 			    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1327 				ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1328 				    (U8)instance->chain_offset_io_req);
1329 			} else {
1330 				ddi_put8(acc_handle,
1331 				    &scsi_raid_io->ChainOffset, 0);
1332 			}
1333 		} else {
1334 			ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1335 			    (U8)instance->chain_offset_io_req);
1336 		}
1337 
1338 		/* prepare physical chain element */
1339 		ieeeChainElement = scsi_raid_io_sgl_ieee;
1340 
1341 		ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1342 
1343 		if (instance->gen3) {
1344 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1345 			    IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1346 		} else {
1347 			ddi_put8(acc_handle, &ieeeChainElement->Flags,
1348 			    (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1349 			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1350 		}
1351 
1352 		ddi_put32(acc_handle, &ieeeChainElement->Length,
1353 		    (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1354 
1355 		ddi_put64(acc_handle, &ieeeChainElement->Address,
1356 		    (U64)cmd->sgl_phys_addr);
1357 
1358 		sg_to_process = numElements - i;
1359 
1360 		con_log(CL_ANN1, (CE_NOTE,
1361 		    "[Additional SGE Count]:%x", endElement));
1362 
1363 		/* point to the chained SGL buffer */
1364 		scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1365 
1366 		/* build rest of the SGL in chained buffer */
1367 		for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1368 			con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1369 
1370 			ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1371 			    acmd->cmd_dmacookies[i].dmac_laddress);
1372 
1373 			ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1374 			    acmd->cmd_dmacookies[i].dmac_size);
1375 
1376 			ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1377 
1378 			if (instance->gen3) {
1379 				if (i == (numElements - 1)) {
1380 					ddi_put8(acc_handle,
1381 					    &scsi_raid_io_sgl_ieee->Flags,
1382 					    IEEE_SGE_FLAGS_END_OF_LIST);
1383 				}
1384 			}
1385 
1386 			*datalen += acmd->cmd_dmacookies[i].dmac_size;
1387 
1388 #if DEBUG
1389 			con_log(CL_DLEVEL1, (CE_NOTE,
1390 			    "[SGL Address]: %" PRIx64,
1391 			    scsi_raid_io_sgl_ieee->Address));
1392 			con_log(CL_DLEVEL1, (CE_NOTE,
1393 			    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1394 			con_log(CL_DLEVEL1, (CE_NOTE,
1395 			    "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1396 #endif
1397 
1398 			i++;
1399 		}
1400 	}
1401 
1402 	return (0);
1403 } /*end of BuildScatterGather */
1404 
1405 
1406 /*
1407  * build_cmd
1408  */
1409 static struct mrsas_cmd *
mrsas_tbolt_build_cmd(struct mrsas_instance * instance,struct scsi_address * ap,struct scsi_pkt * pkt,uchar_t * cmd_done)1410 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1411     struct scsi_pkt *pkt, uchar_t *cmd_done)
1412 {
1413 	uint8_t		fp_possible = 0;
1414 	uint32_t	index;
1415 	uint32_t	lba_count = 0;
1416 	uint32_t	start_lba_hi = 0;
1417 	uint32_t	start_lba_lo = 0;
1418 	ddi_acc_handle_t acc_handle =
1419 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
1420 	struct mrsas_cmd		*cmd = NULL;
1421 	struct scsa_cmd			*acmd = PKT2CMD(pkt);
1422 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
1423 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
1424 	uint32_t			datalen;
1425 	struct IO_REQUEST_INFO io_info;
1426 	MR_FW_RAID_MAP_ALL *local_map_ptr;
1427 	uint16_t pd_cmd_cdblen;
1428 
1429 	con_log(CL_DLEVEL1, (CE_NOTE,
1430 	    "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1431 
1432 	/* find out if this is logical or physical drive command.  */
1433 	acmd->islogical = MRDRV_IS_LOGICAL(ap);
1434 	acmd->device_id = MAP_DEVICE_ID(instance, ap);
1435 
1436 	*cmd_done = 0;
1437 
1438 	/* get the command packet */
1439 	if (!(cmd = get_raid_msg_pkt(instance))) {
1440 		DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1441 		    instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1442 		return (NULL);
1443 	}
1444 
1445 	index = cmd->index;
1446 	ReqDescUnion =	mr_sas_get_request_descriptor(instance, index);
1447 	ReqDescUnion->Words = 0;
1448 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1449 	ReqDescUnion->SCSIIO.RequestFlags =
1450 	    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1451 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1452 
1453 
1454 	cmd->request_desc = ReqDescUnion;
1455 	cmd->pkt = pkt;
1456 	cmd->cmd = acmd;
1457 
1458 	DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1459 	    ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1460 	    uint16_t, acmd->device_id);
1461 
1462 	/* lets get the command directions */
1463 	if (acmd->cmd_flags & CFLAG_DMASEND) {
1464 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1465 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1466 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1467 			    DDI_DMA_SYNC_FORDEV);
1468 		}
1469 	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1470 		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1471 			(void) ddi_dma_sync(acmd->cmd_dmahandle,
1472 			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
1473 			    DDI_DMA_SYNC_FORCPU);
1474 		}
1475 	} else {
1476 		con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1477 	}
1478 
1479 
1480 	/* get SCSI_IO raid message frame pointer */
1481 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1482 
1483 	/* zero out SCSI_IO raid message frame */
1484 	bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1485 
1486 	/* Set the ldTargetId set by BuildRaidContext() */
1487 	ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1488 	    acmd->device_id);
1489 
1490 	/*  Copy CDB to scsi_io_request message frame */
1491 	ddi_rep_put8(acc_handle,
1492 	    (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1493 	    acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1494 
1495 	/*
1496 	 * Just the CDB length, rest of the Flags are zero
1497 	 * This will be modified later.
1498 	 */
1499 	ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1500 
1501 	pd_cmd_cdblen = acmd->cmd_cdblen;
1502 
1503 	if (acmd->islogical) {
1504 
1505 		switch (pkt->pkt_cdbp[0]) {
1506 		case SCMD_READ:
1507 		case SCMD_WRITE:
1508 		case SCMD_READ_G1:
1509 		case SCMD_WRITE_G1:
1510 		case SCMD_READ_G4:
1511 		case SCMD_WRITE_G4:
1512 		case SCMD_READ_G5:
1513 		case SCMD_WRITE_G5:
1514 
1515 			/* Initialize sense Information */
1516 			if (cmd->sense1 == NULL) {
1517 				con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1518 				    "Sense buffer ptr NULL "));
1519 			}
1520 			bzero(cmd->sense1, SENSE_LENGTH);
1521 			con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1522 			    "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1523 
1524 			if (acmd->cmd_cdblen == CDB_GROUP0) {
1525 				/* 6-byte cdb */
1526 				lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1527 				start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1528 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1529 				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1530 				    << 16));
1531 			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
1532 				/* 10-byte cdb */
1533 				lba_count =
1534 				    (((uint16_t)(pkt->pkt_cdbp[8])) |
1535 				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1536 
1537 				start_lba_lo =
1538 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1539 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1540 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1541 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1542 
1543 			} else if (acmd->cmd_cdblen == CDB_GROUP5) {
1544 				/* 12-byte cdb */
1545 				lba_count = (
1546 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1547 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1548 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1549 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1550 
1551 				start_lba_lo =
1552 				    (((uint32_t)(pkt->pkt_cdbp[5])) |
1553 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1554 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1555 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1556 
1557 			} else if (acmd->cmd_cdblen == CDB_GROUP4) {
1558 				/* 16-byte cdb */
1559 				lba_count = (
1560 				    ((uint32_t)(pkt->pkt_cdbp[13])) |
1561 				    ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1562 				    ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1563 				    ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1564 
1565 				start_lba_lo = (
1566 				    ((uint32_t)(pkt->pkt_cdbp[9])) |
1567 				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568 				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569 				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1570 
1571 				start_lba_hi = (
1572 				    ((uint32_t)(pkt->pkt_cdbp[5])) |
1573 				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574 				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575 				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1576 			}
1577 
1578 			if (instance->tbolt &&
1579 			    ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1580 				dev_err(instance->dip, CE_WARN,
1581 				    "IO SECTOR COUNT exceeds "
1582 				    "controller limit 0x%x sectors",
1583 				    lba_count);
1584 			}
1585 
1586 			bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1587 			io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1588 			    start_lba_lo;
1589 			io_info.numBlocks = lba_count;
1590 			io_info.ldTgtId = acmd->device_id;
1591 
1592 			if (acmd->cmd_flags & CFLAG_DMASEND)
1593 				io_info.isRead = 0;
1594 			else
1595 				io_info.isRead = 1;
1596 
1597 
1598 			/* Acquire SYNC MAP UPDATE lock */
1599 			mutex_enter(&instance->sync_map_mtx);
1600 
1601 			local_map_ptr =
1602 			    instance->ld_map[(instance->map_id & 1)];
1603 
1604 			if ((MR_TargetIdToLdGet(
1605 			    acmd->device_id, local_map_ptr) >=
1606 			    MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1607 				dev_err(instance->dip, CE_NOTE,
1608 				    "Fast Path NOT Possible, "
1609 				    "targetId >= MAX_LOGICAL_DRIVES || "
1610 				    "!instance->fast_path_io");
1611 				fp_possible = 0;
1612 				/* Set Regionlock flags to BYPASS */
1613 				/* io_request->RaidContext.regLockFlags  = 0; */
1614 				ddi_put8(acc_handle,
1615 				    &scsi_raid_io->RaidContext.regLockFlags, 0);
1616 			} else {
1617 				if (MR_BuildRaidContext(instance, &io_info,
1618 				    &scsi_raid_io->RaidContext, local_map_ptr))
1619 					fp_possible = io_info.fpOkForIo;
1620 			}
1621 
1622 			if (!enable_fp)
1623 				fp_possible = 0;
1624 
1625 			con_log(CL_ANN1, (CE_NOTE, "enable_fp %d  "
1626 			    "instance->fast_path_io %d fp_possible %d",
1627 			    enable_fp, instance->fast_path_io, fp_possible));
1628 
1629 		if (fp_possible) {
1630 
1631 			/* Check for DIF enabled LD */
1632 			if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1633 				/* Prepare 32 Byte CDB for DIF capable Disk */
1634 				mrsas_tbolt_prepare_cdb(instance,
1635 				    scsi_raid_io->CDB.CDB32,
1636 				    &io_info, scsi_raid_io, start_lba_lo);
1637 			} else {
1638 				mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1639 				    sizeof (scsi_raid_io->CDB.CDB32),
1640 				    (uint8_t *)&pd_cmd_cdblen,
1641 				    io_info.pdBlock, io_info.numBlocks);
1642 				ddi_put16(acc_handle,
1643 				    &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1644 			}
1645 
1646 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1647 			    MPI2_FUNCTION_SCSI_IO_REQUEST);
1648 
1649 			ReqDescUnion->SCSIIO.RequestFlags =
1650 			    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1651 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1652 
1653 			if (instance->gen3) {
1654 				uint8_t regLockFlags = ddi_get8(acc_handle,
1655 				    &scsi_raid_io->RaidContext.regLockFlags);
1656 				uint16_t IoFlags = ddi_get16(acc_handle,
1657 				    &scsi_raid_io->IoFlags);
1658 
1659 				if (regLockFlags == REGION_TYPE_UNUSED)
1660 					ReqDescUnion->SCSIIO.RequestFlags =
1661 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1662 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663 
1664 				IoFlags |=
1665 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1666 				regLockFlags |=
1667 				    (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1668 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1669 
1670 				ddi_put8(acc_handle,
1671 				    &scsi_raid_io->ChainOffset, 0);
1672 				ddi_put8(acc_handle,
1673 				    &scsi_raid_io->RaidContext.nsegType,
1674 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1675 				    MPI2_TYPE_CUDA));
1676 				ddi_put8(acc_handle,
1677 				    &scsi_raid_io->RaidContext.regLockFlags,
1678 				    regLockFlags);
1679 				ddi_put16(acc_handle,
1680 				    &scsi_raid_io->IoFlags, IoFlags);
1681 			}
1682 
1683 			if ((instance->load_balance_info[
1684 			    acmd->device_id].loadBalanceFlag) &&
1685 			    (io_info.isRead)) {
1686 				io_info.devHandle =
1687 				    get_updated_dev_handle(&instance->
1688 				    load_balance_info[acmd->device_id],
1689 				    &io_info);
1690 				cmd->load_balance_flag |=
1691 				    MEGASAS_LOAD_BALANCE_FLAG;
1692 			} else {
1693 				cmd->load_balance_flag &=
1694 				    ~MEGASAS_LOAD_BALANCE_FLAG;
1695 			}
1696 
1697 			ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1698 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1699 			    io_info.devHandle);
1700 
1701 		} else { /* FP Not Possible */
1702 
1703 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1704 			    MPI2_FUNCTION_LD_IO_REQUEST);
1705 
1706 			ddi_put16(acc_handle,
1707 			    &scsi_raid_io->DevHandle, acmd->device_id);
1708 
1709 			ReqDescUnion->SCSIIO.RequestFlags =
1710 			    (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1711 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1712 
1713 			ddi_put16(acc_handle,
1714 			    &scsi_raid_io->RaidContext.timeoutValue,
1715 			    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1716 
1717 			if (instance->gen3) {
1718 				uint8_t regLockFlags = ddi_get8(acc_handle,
1719 				    &scsi_raid_io->RaidContext.regLockFlags);
1720 
1721 				if (regLockFlags == REGION_TYPE_UNUSED) {
1722 					ReqDescUnion->SCSIIO.RequestFlags =
1723 					    (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1724 					    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1725 				}
1726 
1727 				regLockFlags |=
1728 				    (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1729 				    MR_RL_FLAGS_SEQ_NUM_ENABLE);
1730 
1731 				ddi_put8(acc_handle,
1732 				    &scsi_raid_io->RaidContext.nsegType,
1733 				    ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1734 				    MPI2_TYPE_CUDA));
1735 				ddi_put8(acc_handle,
1736 				    &scsi_raid_io->RaidContext.regLockFlags,
1737 				    regLockFlags);
1738 			}
1739 		} /* Not FP */
1740 
1741 		/* Release SYNC MAP UPDATE lock */
1742 		mutex_exit(&instance->sync_map_mtx);
1743 
1744 		break;
1745 
1746 		case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1747 			return_raid_msg_pkt(instance, cmd);
1748 			*cmd_done = 1;
1749 			return (NULL);
1750 		}
1751 
1752 		case SCMD_MODE_SENSE:
1753 		case SCMD_MODE_SENSE_G1: {
1754 			union scsi_cdb	*cdbp;
1755 			uint16_t	page_code;
1756 
1757 			cdbp = (void *)pkt->pkt_cdbp;
1758 			page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1759 			switch (page_code) {
1760 			case 0x3:
1761 			case 0x4:
1762 				(void) mrsas_mode_sense_build(pkt);
1763 				return_raid_msg_pkt(instance, cmd);
1764 				*cmd_done = 1;
1765 				return (NULL);
1766 			}
1767 			return (cmd);
1768 		}
1769 
1770 		default:
1771 			/* Pass-through command to logical drive */
1772 			ddi_put8(acc_handle, &scsi_raid_io->Function,
1773 			    MPI2_FUNCTION_LD_IO_REQUEST);
1774 			ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1775 			ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1776 			    acmd->device_id);
1777 			ReqDescUnion->SCSIIO.RequestFlags =
1778 			    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1779 			    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1780 			break;
1781 		}
1782 	} else { /* Physical */
1783 		/* Pass-through command to physical drive */
1784 
1785 		/* Acquire SYNC MAP UPDATE lock */
1786 		mutex_enter(&instance->sync_map_mtx);
1787 
1788 		local_map_ptr = instance->ld_map[instance->map_id & 1];
1789 
1790 		ddi_put8(acc_handle, &scsi_raid_io->Function,
1791 		    MPI2_FUNCTION_SCSI_IO_REQUEST);
1792 
1793 		ReqDescUnion->SCSIIO.RequestFlags =
1794 		    (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1795 		    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1796 
1797 		ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1798 		    local_map_ptr->raidMap.
1799 		    devHndlInfo[acmd->device_id].curDevHdl);
1800 
1801 		/* Set regLockFlasgs to REGION_TYPE_BYPASS */
1802 		ddi_put8(acc_handle,
1803 		    &scsi_raid_io->RaidContext.regLockFlags, 0);
1804 		ddi_put64(acc_handle,
1805 		    &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1806 		ddi_put32(acc_handle,
1807 		    &scsi_raid_io->RaidContext.regLockLength, 0);
1808 		ddi_put8(acc_handle,
1809 		    &scsi_raid_io->RaidContext.RAIDFlags,
1810 		    MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1811 		    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1812 		ddi_put16(acc_handle,
1813 		    &scsi_raid_io->RaidContext.timeoutValue,
1814 		    local_map_ptr->raidMap.fpPdIoTimeoutSec);
1815 		ddi_put16(acc_handle,
1816 		    &scsi_raid_io->RaidContext.ldTargetId,
1817 		    acmd->device_id);
1818 		ddi_put8(acc_handle,
1819 		    &scsi_raid_io->LUN[1], acmd->lun);
1820 
1821 		if (instance->fast_path_io && instance->gen3) {
1822 			uint16_t IoFlags = ddi_get16(acc_handle,
1823 			    &scsi_raid_io->IoFlags);
1824 			IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1825 			ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1826 		}
1827 		ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1828 		    local_map_ptr->raidMap.
1829 		    devHndlInfo[acmd->device_id].curDevHdl);
1830 
1831 		/* Release SYNC MAP UPDATE lock */
1832 		mutex_exit(&instance->sync_map_mtx);
1833 	}
1834 
1835 	/* Set sense buffer physical address/length in scsi_io_request. */
1836 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1837 	    cmd->sense_phys_addr1);
1838 	ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1839 
1840 	/* Construct SGL */
1841 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1842 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1843 
1844 	(void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1845 	    scsi_raid_io, &datalen);
1846 
1847 	ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1848 
1849 	con_log(CL_ANN, (CE_CONT,
1850 	    "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1851 	    pkt->pkt_cdbp[0], acmd->device_id));
1852 	con_log(CL_DLEVEL1, (CE_CONT,
1853 	    "data length = %x\n",
1854 	    scsi_raid_io->DataLength));
1855 	con_log(CL_DLEVEL1, (CE_CONT,
1856 	    "cdb length = %x\n",
1857 	    acmd->cmd_cdblen));
1858 
1859 	return (cmd);
1860 }
1861 
1862 uint32_t
tbolt_read_fw_status_reg(struct mrsas_instance * instance)1863 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1864 {
1865 	return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1866 }
1867 
1868 void
tbolt_issue_cmd(struct mrsas_cmd * cmd,struct mrsas_instance * instance)1869 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1870 {
1871 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1872 	atomic_inc_16(&instance->fw_outstanding);
1873 
1874 	struct scsi_pkt *pkt;
1875 
1876 	con_log(CL_ANN1,
1877 	    (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1878 
1879 	con_log(CL_DLEVEL1, (CE_CONT,
1880 	    " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1881 	con_log(CL_DLEVEL1, (CE_CONT,
1882 	    " [req desc low part] %x \n",
1883 	    (uint_t)(req_desc->Words & 0xffffffffff)));
1884 	con_log(CL_DLEVEL1, (CE_CONT,
1885 	    " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1886 	pkt = cmd->pkt;
1887 
1888 	if (pkt) {
1889 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1890 		    "ISSUED CMD TO FW : called : cmd:"
1891 		    ": %p instance : %p pkt : %p pkt_time : %x\n",
1892 		    gethrtime(), (void *)cmd, (void *)instance,
1893 		    (void *)pkt, cmd->drv_pkt_time));
1894 		if (instance->adapterresetinprogress) {
1895 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1896 			con_log(CL_ANN, (CE_NOTE,
1897 			    "TBOLT Reset the scsi_pkt timer"));
1898 		} else {
1899 			push_pending_mfi_pkt(instance, cmd);
1900 		}
1901 
1902 	} else {
1903 		con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1904 		    "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1905 		    "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1906 	}
1907 
1908 	/* Issue the command to the FW */
1909 	mutex_enter(&instance->reg_write_mtx);
1910 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1911 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1912 	mutex_exit(&instance->reg_write_mtx);
1913 }
1914 
1915 /*
1916  * issue_cmd_in_sync_mode
1917  */
1918 int
tbolt_issue_cmd_in_sync_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)1919 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1920     struct mrsas_cmd *cmd)
1921 {
1922 	int		i;
1923 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1924 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1925 
1926 	struct mrsas_header	*hdr;
1927 	hdr = (struct mrsas_header *)&cmd->frame->hdr;
1928 
1929 	con_log(CL_ANN,
1930 	    (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1931 	    cmd->SMID));
1932 
1933 
1934 	if (instance->adapterresetinprogress) {
1935 		cmd->drv_pkt_time = ddi_get16
1936 		    (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1937 		if (cmd->drv_pkt_time < debug_timeout_g)
1938 			cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1939 		con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1940 		    "RESET-IN-PROGRESS, issue cmd & return."));
1941 
1942 		mutex_enter(&instance->reg_write_mtx);
1943 		WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1944 		WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1945 		mutex_exit(&instance->reg_write_mtx);
1946 
1947 		return (DDI_SUCCESS);
1948 	} else {
1949 		con_log(CL_ANN1, (CE_NOTE,
1950 		    "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1951 		push_pending_mfi_pkt(instance, cmd);
1952 	}
1953 
1954 	con_log(CL_DLEVEL2, (CE_NOTE,
1955 	    "HighQport offset :%p",
1956 	    (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1957 	con_log(CL_DLEVEL2, (CE_NOTE,
1958 	    "LowQport offset :%p",
1959 	    (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1960 
1961 	cmd->sync_cmd = MRSAS_TRUE;
1962 	cmd->cmd_status =  ENODATA;
1963 
1964 
1965 	mutex_enter(&instance->reg_write_mtx);
1966 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1967 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1968 	mutex_exit(&instance->reg_write_mtx);
1969 
1970 	con_log(CL_ANN1, (CE_NOTE,
1971 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1972 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1973 	    (uint_t)(req_desc->Words & 0xffffffff)));
1974 
1975 	mutex_enter(&instance->int_cmd_mtx);
1976 	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
1977 		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
1978 	}
1979 	mutex_exit(&instance->int_cmd_mtx);
1980 
1981 
1982 	if (i < (msecs -1)) {
1983 		return (DDI_SUCCESS);
1984 	} else {
1985 		return (DDI_FAILURE);
1986 	}
1987 }
1988 
1989 /*
1990  * issue_cmd_in_poll_mode
1991  */
1992 int
tbolt_issue_cmd_in_poll_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)1993 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
1994     struct mrsas_cmd *cmd)
1995 {
1996 	int		i;
1997 	uint16_t	flags;
1998 	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1999 	struct mrsas_header *frame_hdr;
2000 
2001 	con_log(CL_ANN,
2002 	    (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2003 	    cmd->SMID));
2004 
2005 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2006 
2007 	frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2008 	ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2009 	    MFI_CMD_STATUS_POLL_MODE);
2010 	flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2011 	flags	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2012 	ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2013 
2014 	con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2015 	    (uint_t)(req_desc->Words & 0xffffffff)));
2016 	con_log(CL_ANN1, (CE_NOTE,
2017 	    " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2018 
2019 	/* issue the frame using inbound queue port */
2020 	mutex_enter(&instance->reg_write_mtx);
2021 	WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2022 	WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2023 	mutex_exit(&instance->reg_write_mtx);
2024 
2025 	for (i = 0; i < msecs && (
2026 	    ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2027 	    == MFI_CMD_STATUS_POLL_MODE); i++) {
2028 		/* wait for cmd_status to change from 0xFF */
2029 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2030 	}
2031 
2032 	DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2033 
2034 	if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2035 	    &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2036 		con_log(CL_ANN1, (CE_NOTE,
2037 		    " cmd failed %" PRIx64, (req_desc->Words)));
2038 		return (DDI_FAILURE);
2039 	}
2040 
2041 	return (DDI_SUCCESS);
2042 }
2043 
2044 void
tbolt_enable_intr(struct mrsas_instance * instance)2045 tbolt_enable_intr(struct mrsas_instance *instance)
2046 {
2047 	/* TODO: For Thunderbolt/Invader also clear intr on enable */
2048 	/* writel(~0, &regs->outbound_intr_status); */
2049 	/* readl(&regs->outbound_intr_status); */
2050 
2051 	WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2052 
2053 	/* dummy read to force PCI flush */
2054 	(void) RD_OB_INTR_MASK(instance);
2055 
2056 }
2057 
2058 void
tbolt_disable_intr(struct mrsas_instance * instance)2059 tbolt_disable_intr(struct mrsas_instance *instance)
2060 {
2061 	uint32_t mask = 0xFFFFFFFF;
2062 
2063 	WR_OB_INTR_MASK(mask, instance);
2064 
2065 	/* Dummy readl to force pci flush */
2066 
2067 	(void) RD_OB_INTR_MASK(instance);
2068 }
2069 
2070 
2071 int
tbolt_intr_ack(struct mrsas_instance * instance)2072 tbolt_intr_ack(struct mrsas_instance *instance)
2073 {
2074 	uint32_t	status;
2075 
2076 	/* check if it is our interrupt */
2077 	status = RD_OB_INTR_STATUS(instance);
2078 	con_log(CL_ANN1, (CE_NOTE,
2079 	    "chkpnt: Entered tbolt_intr_ack status = %d", status));
2080 
2081 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2082 		return (DDI_INTR_UNCLAIMED);
2083 	}
2084 
2085 	if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2086 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2087 		return (DDI_INTR_UNCLAIMED);
2088 	}
2089 
2090 	if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2091 		/* clear the interrupt by writing back the same value */
2092 		WR_OB_INTR_STATUS(status, instance);
2093 		/* dummy READ */
2094 		(void) RD_OB_INTR_STATUS(instance);
2095 	}
2096 	return (DDI_INTR_CLAIMED);
2097 }
2098 
2099 /*
2100  * get_raid_msg_pkt : Get a command from the free pool
2101  * After successful allocation, the caller of this routine
2102  * must clear the frame buffer (memset to zero) before
2103  * using the packet further.
2104  *
2105  * ***** Note *****
2106  * After clearing the frame buffer the context id of the
2107  * frame buffer SHOULD be restored back.
2108  */
2109 
2110 struct mrsas_cmd *
get_raid_msg_pkt(struct mrsas_instance * instance)2111 get_raid_msg_pkt(struct mrsas_instance *instance)
2112 {
2113 	mlist_t			*head = &instance->cmd_pool_list;
2114 	struct mrsas_cmd	*cmd = NULL;
2115 
2116 	mutex_enter(&instance->cmd_pool_mtx);
2117 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2118 
2119 
2120 	if (!mlist_empty(head)) {
2121 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2122 		mlist_del_init(head->next);
2123 	}
2124 	if (cmd != NULL) {
2125 		cmd->pkt = NULL;
2126 		cmd->retry_count_for_ocr = 0;
2127 		cmd->drv_pkt_time = 0;
2128 	}
2129 	mutex_exit(&instance->cmd_pool_mtx);
2130 
2131 	if (cmd != NULL)
2132 		bzero(cmd->scsi_io_request,
2133 		    sizeof (Mpi2RaidSCSIIORequest_t));
2134 	return (cmd);
2135 }
2136 
2137 struct mrsas_cmd *
get_raid_msg_mfi_pkt(struct mrsas_instance * instance)2138 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2139 {
2140 	mlist_t			*head = &instance->cmd_app_pool_list;
2141 	struct mrsas_cmd	*cmd = NULL;
2142 
2143 	mutex_enter(&instance->cmd_app_pool_mtx);
2144 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2145 
2146 	if (!mlist_empty(head)) {
2147 		cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2148 		mlist_del_init(head->next);
2149 	}
2150 	if (cmd != NULL) {
2151 		cmd->retry_count_for_ocr = 0;
2152 		cmd->drv_pkt_time = 0;
2153 		cmd->pkt = NULL;
2154 		cmd->request_desc = NULL;
2155 
2156 	}
2157 
2158 	mutex_exit(&instance->cmd_app_pool_mtx);
2159 
2160 	if (cmd != NULL) {
2161 		bzero(cmd->scsi_io_request,
2162 		    sizeof (Mpi2RaidSCSIIORequest_t));
2163 	}
2164 
2165 	return (cmd);
2166 }
2167 
2168 /*
2169  * return_raid_msg_pkt : Return a cmd to free command pool
2170  */
2171 void
return_raid_msg_pkt(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2172 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2173 {
2174 	mutex_enter(&instance->cmd_pool_mtx);
2175 	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2176 
2177 
2178 	mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2179 
2180 	mutex_exit(&instance->cmd_pool_mtx);
2181 }
2182 
2183 void
return_raid_msg_mfi_pkt(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2184 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2185 {
2186 	mutex_enter(&instance->cmd_app_pool_mtx);
2187 	ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2188 
2189 	mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2190 
2191 	mutex_exit(&instance->cmd_app_pool_mtx);
2192 }
2193 
2194 
2195 void
mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2196 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2197     struct mrsas_cmd *cmd)
2198 {
2199 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2200 	Mpi25IeeeSgeChain64_t		*scsi_raid_io_sgl_ieee;
2201 	MRSAS_REQUEST_DESCRIPTOR_UNION	*ReqDescUnion;
2202 	uint32_t			index;
2203 	ddi_acc_handle_t acc_handle =
2204 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2205 
2206 	if (!instance->tbolt) {
2207 		con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2208 		return;
2209 	}
2210 
2211 	index = cmd->index;
2212 
2213 	ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2214 
2215 	if (!ReqDescUnion) {
2216 		con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2217 		return;
2218 	}
2219 
2220 	con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2221 
2222 	ReqDescUnion->Words = 0;
2223 
2224 	ReqDescUnion->SCSIIO.RequestFlags =
2225 	    (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2226 	    MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2227 
2228 	ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2229 
2230 	cmd->request_desc = ReqDescUnion;
2231 
2232 	/* get raid message frame pointer */
2233 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2234 
2235 	if (instance->gen3) {
2236 		Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2237 		    &scsi_raid_io->SGL.IeeeChain;
2238 		sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2239 		ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2240 	}
2241 
2242 	ddi_put8(acc_handle, &scsi_raid_io->Function,
2243 	    MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2244 
2245 	ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2246 	    offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2247 
2248 	ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2249 	    (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2250 
2251 	ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2252 	    cmd->sense_phys_addr1);
2253 
2254 
2255 	scsi_raid_io_sgl_ieee =
2256 	    (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2257 
2258 	ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2259 	    (U64)cmd->frame_phys_addr);
2260 
2261 	ddi_put8(acc_handle,
2262 	    &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2263 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2264 	/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2265 	ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2266 
2267 	con_log(CL_ANN1, (CE_NOTE,
2268 	    "[MFI CMD PHY ADDRESS]:%" PRIx64,
2269 	    scsi_raid_io_sgl_ieee->Address));
2270 	con_log(CL_ANN1, (CE_NOTE,
2271 	    "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2272 	con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2273 	    scsi_raid_io_sgl_ieee->Flags));
2274 }
2275 
2276 
2277 void
tbolt_complete_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2278 tbolt_complete_cmd(struct mrsas_instance *instance,
2279     struct mrsas_cmd *cmd)
2280 {
2281 	uint8_t				status;
2282 	uint8_t				extStatus;
2283 	uint8_t				function;
2284 	uint8_t				arm;
2285 	struct scsa_cmd			*acmd;
2286 	struct scsi_pkt			*pkt;
2287 	struct scsi_arq_status		*arqstat;
2288 	Mpi2RaidSCSIIORequest_t		*scsi_raid_io;
2289 	LD_LOAD_BALANCE_INFO		*lbinfo;
2290 	ddi_acc_handle_t acc_handle =
2291 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2292 
2293 	scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2294 
2295 	status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2296 	extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2297 
2298 	con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2299 	con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2300 
2301 	if (status != MFI_STAT_OK) {
2302 		con_log(CL_ANN, (CE_WARN,
2303 		    "IO Cmd Failed SMID %x", cmd->SMID));
2304 	} else {
2305 		con_log(CL_ANN, (CE_NOTE,
2306 		    "IO Cmd Success  SMID %x", cmd->SMID));
2307 	}
2308 
2309 	/* regular commands */
2310 
2311 	function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2312 	DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2313 	    uint8_t, status, uint8_t, extStatus);
2314 
2315 	switch (function) {
2316 
2317 	case MPI2_FUNCTION_SCSI_IO_REQUEST :  /* Fast Path IO. */
2318 		acmd =	(struct scsa_cmd *)cmd->cmd;
2319 		lbinfo = &instance->load_balance_info[acmd->device_id];
2320 
2321 		if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2322 			arm = lbinfo->raid1DevHandle[0] ==
2323 			    scsi_raid_io->DevHandle ? 0 : 1;
2324 
2325 			lbinfo->scsi_pending_cmds[arm]--;
2326 			cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2327 		}
2328 		con_log(CL_DLEVEL3, (CE_NOTE,
2329 		    "FastPath IO Completion Success "));
2330 		/* FALLTHRU */
2331 
2332 	case MPI2_FUNCTION_LD_IO_REQUEST :   { /* Regular Path IO. */
2333 		acmd =	(struct scsa_cmd *)cmd->cmd;
2334 		pkt =	(struct scsi_pkt *)CMD2PKT(acmd);
2335 
2336 		if (acmd->cmd_flags & CFLAG_DMAVALID) {
2337 			if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2338 				(void) ddi_dma_sync(acmd->cmd_dmahandle,
2339 				    acmd->cmd_dma_offset, acmd->cmd_dma_len,
2340 				    DDI_DMA_SYNC_FORCPU);
2341 			}
2342 		}
2343 
2344 		pkt->pkt_reason		= CMD_CMPLT;
2345 		pkt->pkt_statistics	= 0;
2346 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2347 		    STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2348 
2349 		con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2350 		    "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2351 		    ((acmd->islogical) ? "LD" : "PD"),
2352 		    acmd->cmd_dmacount, cmd->SMID, status));
2353 
2354 		if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2355 			struct scsi_inquiry	*inq;
2356 
2357 			if (acmd->cmd_dmacount != 0) {
2358 				bp_mapin(acmd->cmd_buf);
2359 				inq = (struct scsi_inquiry *)
2360 				    acmd->cmd_buf->b_un.b_addr;
2361 
2362 				/* don't expose physical drives to OS */
2363 				if (acmd->islogical &&
2364 				    (status == MFI_STAT_OK)) {
2365 					display_scsi_inquiry((caddr_t)inq);
2366 				} else if ((status == MFI_STAT_OK) &&
2367 				    inq->inq_dtype == DTYPE_DIRECT) {
2368 					display_scsi_inquiry((caddr_t)inq);
2369 				} else {
2370 					/* for physical disk */
2371 					status = MFI_STAT_DEVICE_NOT_FOUND;
2372 				}
2373 			}
2374 		}
2375 
2376 		switch (status) {
2377 		case MFI_STAT_OK:
2378 			pkt->pkt_scbp[0] = STATUS_GOOD;
2379 			break;
2380 		case MFI_STAT_LD_CC_IN_PROGRESS:
2381 		case MFI_STAT_LD_RECON_IN_PROGRESS:
2382 			pkt->pkt_scbp[0] = STATUS_GOOD;
2383 			break;
2384 		case MFI_STAT_LD_INIT_IN_PROGRESS:
2385 			pkt->pkt_reason	= CMD_TRAN_ERR;
2386 			break;
2387 		case MFI_STAT_SCSI_IO_FAILED:
2388 			dev_err(instance->dip, CE_WARN,
2389 			    "tbolt_complete_cmd: scsi_io failed");
2390 			pkt->pkt_reason	= CMD_TRAN_ERR;
2391 			break;
2392 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
2393 			con_log(CL_ANN, (CE_WARN,
2394 			    "tbolt_complete_cmd: scsi_done with error"));
2395 
2396 			pkt->pkt_reason	= CMD_CMPLT;
2397 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2398 
2399 			if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2400 				con_log(CL_ANN,
2401 				    (CE_WARN, "TEST_UNIT_READY fail"));
2402 			} else {
2403 				pkt->pkt_state |= STATE_ARQ_DONE;
2404 				arqstat = (void *)(pkt->pkt_scbp);
2405 				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2406 				arqstat->sts_rqpkt_resid = 0;
2407 				arqstat->sts_rqpkt_state |=
2408 				    STATE_GOT_BUS | STATE_GOT_TARGET
2409 				    | STATE_SENT_CMD
2410 				    | STATE_XFERRED_DATA;
2411 				*(uint8_t *)&arqstat->sts_rqpkt_status =
2412 				    STATUS_GOOD;
2413 				con_log(CL_ANN1,
2414 				    (CE_NOTE, "Copying Sense data %x",
2415 				    cmd->SMID));
2416 
2417 				ddi_rep_get8(acc_handle,
2418 				    (uint8_t *)&(arqstat->sts_sensedata),
2419 				    cmd->sense1,
2420 				    sizeof (struct scsi_extended_sense),
2421 				    DDI_DEV_AUTOINCR);
2422 
2423 			}
2424 			break;
2425 		case MFI_STAT_LD_OFFLINE:
2426 			dev_err(instance->dip, CE_WARN,
2427 			    "tbolt_complete_cmd: ld offline "
2428 			    "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2429 			    /* UNDO: */
2430 			    ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2431 
2432 			    ddi_get16(acc_handle,
2433 			    &scsi_raid_io->RaidContext.ldTargetId),
2434 
2435 			    ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2436 
2437 			pkt->pkt_reason	= CMD_DEV_GONE;
2438 			pkt->pkt_statistics  = STAT_DISCON;
2439 			break;
2440 		case MFI_STAT_DEVICE_NOT_FOUND:
2441 			con_log(CL_ANN, (CE_CONT,
2442 			    "tbolt_complete_cmd: device not found error"));
2443 			pkt->pkt_reason	= CMD_DEV_GONE;
2444 			pkt->pkt_statistics  = STAT_DISCON;
2445 			break;
2446 
2447 		case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2448 			pkt->pkt_state |= STATE_ARQ_DONE;
2449 			pkt->pkt_reason	= CMD_CMPLT;
2450 			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2451 
2452 			arqstat = (void *)(pkt->pkt_scbp);
2453 			arqstat->sts_rqpkt_reason = CMD_CMPLT;
2454 			arqstat->sts_rqpkt_resid = 0;
2455 			arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2456 			    | STATE_GOT_TARGET | STATE_SENT_CMD
2457 			    | STATE_XFERRED_DATA;
2458 			*(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2459 
2460 			arqstat->sts_sensedata.es_valid = 1;
2461 			arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2462 			arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2463 
2464 			/*
2465 			 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2466 			 * ASC: 0x21h; ASCQ: 0x00h;
2467 			 */
2468 			arqstat->sts_sensedata.es_add_code = 0x21;
2469 			arqstat->sts_sensedata.es_qual_code = 0x00;
2470 			break;
2471 		case MFI_STAT_INVALID_CMD:
2472 		case MFI_STAT_INVALID_DCMD:
2473 		case MFI_STAT_INVALID_PARAMETER:
2474 		case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2475 		default:
2476 			dev_err(instance->dip, CE_WARN,
2477 			    "tbolt_complete_cmd: Unknown status!");
2478 			pkt->pkt_reason	= CMD_TRAN_ERR;
2479 
2480 			break;
2481 		}
2482 
2483 		atomic_add_16(&instance->fw_outstanding, (-1));
2484 
2485 		(void) mrsas_common_check(instance, cmd);
2486 		if (acmd->cmd_dmahandle) {
2487 			if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2488 			    DDI_SUCCESS) {
2489 				ddi_fm_service_impact(instance->dip,
2490 				    DDI_SERVICE_UNAFFECTED);
2491 				pkt->pkt_reason = CMD_TRAN_ERR;
2492 				pkt->pkt_statistics = 0;
2493 			}
2494 		}
2495 
2496 		/* Call the callback routine */
2497 		if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2498 			(*pkt->pkt_comp)(pkt);
2499 
2500 		con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2501 
2502 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2503 
2504 		ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2505 
2506 		return_raid_msg_pkt(instance, cmd);
2507 		break;
2508 	}
2509 	case MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	 /* MFA command. */
2510 
2511 		if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2512 		    cmd->frame->dcmd.mbox.b[1] == 1) {
2513 
2514 			mutex_enter(&instance->sync_map_mtx);
2515 
2516 			con_log(CL_ANN, (CE_NOTE,
2517 			    "LDMAP sync command	SMID RECEIVED 0x%X",
2518 			    cmd->SMID));
2519 			if (cmd->frame->hdr.cmd_status != 0) {
2520 				dev_err(instance->dip, CE_WARN,
2521 				    "map sync failed, status = 0x%x.",
2522 				    cmd->frame->hdr.cmd_status);
2523 			} else {
2524 				instance->map_id++;
2525 				con_log(CL_ANN1, (CE_NOTE,
2526 				    "map sync received, switched map_id to %"
2527 				    PRIu64, instance->map_id));
2528 			}
2529 
2530 			if (MR_ValidateMapInfo(
2531 			    instance->ld_map[instance->map_id & 1],
2532 			    instance->load_balance_info)) {
2533 				instance->fast_path_io = 1;
2534 			} else {
2535 				instance->fast_path_io = 0;
2536 			}
2537 
2538 			con_log(CL_ANN, (CE_NOTE,
2539 			    "instance->fast_path_io %d",
2540 			    instance->fast_path_io));
2541 
2542 			instance->unroll.syncCmd = 0;
2543 
2544 			if (instance->map_update_cmd == cmd) {
2545 				return_raid_msg_pkt(instance, cmd);
2546 				atomic_add_16(&instance->fw_outstanding, (-1));
2547 				(void) mrsas_tbolt_sync_map_info(instance);
2548 			}
2549 
2550 			con_log(CL_ANN1, (CE_NOTE,
2551 			    "LDMAP sync completed, ldcount=%d",
2552 			    instance->ld_map[instance->map_id & 1]
2553 			    ->raidMap.ldCount));
2554 			mutex_exit(&instance->sync_map_mtx);
2555 			break;
2556 		}
2557 
2558 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2559 			con_log(CL_ANN1, (CE_CONT,
2560 			    "AEN command SMID RECEIVED 0x%X",
2561 			    cmd->SMID));
2562 			if ((instance->aen_cmd == cmd) &&
2563 			    (instance->aen_cmd->abort_aen)) {
2564 				con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2565 				    "aborted_aen returned"));
2566 			} else {
2567 				atomic_add_16(&instance->fw_outstanding, (-1));
2568 				service_mfi_aen(instance, cmd);
2569 			}
2570 		}
2571 
2572 		if (cmd->sync_cmd == MRSAS_TRUE) {
2573 			con_log(CL_ANN1, (CE_CONT,
2574 			    "Sync-mode Command Response SMID RECEIVED 0x%X",
2575 			    cmd->SMID));
2576 
2577 			tbolt_complete_cmd_in_sync_mode(instance, cmd);
2578 		} else {
2579 			con_log(CL_ANN, (CE_CONT,
2580 			    "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2581 			    cmd->SMID));
2582 		}
2583 		break;
2584 	default:
2585 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2586 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2587 
2588 		/* free message */
2589 		con_log(CL_ANN,
2590 		    (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2591 		break;
2592 	}
2593 }
2594 
2595 uint_t
mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance * instance)2596 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2597 {
2598 	uint8_t				replyType;
2599 	Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2600 	Mpi2ReplyDescriptorsUnion_t	*desc;
2601 	uint16_t			smid;
2602 	union desc_value		d_val;
2603 	struct mrsas_cmd		*cmd;
2604 
2605 	struct mrsas_header	*hdr;
2606 	struct scsi_pkt		*pkt;
2607 
2608 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2609 	    0, 0, DDI_DMA_SYNC_FORDEV);
2610 
2611 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2612 	    0, 0, DDI_DMA_SYNC_FORCPU);
2613 
2614 	desc = instance->reply_frame_pool;
2615 	desc += instance->reply_read_index;
2616 
2617 	replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2618 	replyType = replyDesc->ReplyFlags &
2619 	    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2620 
2621 	if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2622 		return (DDI_INTR_UNCLAIMED);
2623 
2624 	if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2625 	    != DDI_SUCCESS) {
2626 		mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2627 		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2628 		con_log(CL_ANN1,
2629 		    (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2630 		    "FMA check, returning DDI_INTR_UNCLAIMED"));
2631 		return (DDI_INTR_CLAIMED);
2632 	}
2633 
2634 	con_log(CL_ANN1, (CE_NOTE, "Reply Desc	= %p  Words = %" PRIx64,
2635 	    (void *)desc, desc->Words));
2636 
2637 	d_val.word = desc->Words;
2638 
2639 
2640 	/* Read Reply descriptor */
2641 	while ((d_val.u1.low != 0xffffffff) &&
2642 	    (d_val.u1.high != 0xffffffff)) {
2643 
2644 		(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2645 		    0, 0, DDI_DMA_SYNC_FORCPU);
2646 
2647 		smid = replyDesc->SMID;
2648 
2649 		if (!smid || smid > instance->max_fw_cmds + 1) {
2650 			con_log(CL_ANN1, (CE_NOTE,
2651 			    "Reply Desc at Break  = %p	Words = %" PRIx64,
2652 			    (void *)desc, desc->Words));
2653 			break;
2654 		}
2655 
2656 		cmd	= instance->cmd_list[smid - 1];
2657 		if (!cmd) {
2658 			con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2659 			    "outstanding_cmd: Invalid command "
2660 			    " or Poll commad Received in completion path"));
2661 		} else {
2662 			mutex_enter(&instance->cmd_pend_mtx);
2663 			if (cmd->sync_cmd == MRSAS_TRUE) {
2664 				hdr = (struct mrsas_header *)&cmd->frame->hdr;
2665 				if (hdr) {
2666 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2667 					    "tbolt_process_outstanding_cmd:"
2668 					    " mlist_del_init(&cmd->list)."));
2669 					mlist_del_init(&cmd->list);
2670 				}
2671 			} else {
2672 				pkt = cmd->pkt;
2673 				if (pkt) {
2674 					con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2675 					    "tbolt_process_outstanding_cmd:"
2676 					    "mlist_del_init(&cmd->list)."));
2677 					mlist_del_init(&cmd->list);
2678 				}
2679 			}
2680 
2681 			mutex_exit(&instance->cmd_pend_mtx);
2682 
2683 			tbolt_complete_cmd(instance, cmd);
2684 		}
2685 		/* set it back to all 1s. */
2686 		desc->Words = -1LL;
2687 
2688 		instance->reply_read_index++;
2689 
2690 		if (instance->reply_read_index >= (instance->reply_q_depth)) {
2691 			con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2692 			instance->reply_read_index = 0;
2693 		}
2694 
2695 		/* Get the next reply descriptor */
2696 		if (!instance->reply_read_index)
2697 			desc = instance->reply_frame_pool;
2698 		else
2699 			desc++;
2700 
2701 		replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2702 
2703 		d_val.word = desc->Words;
2704 
2705 		con_log(CL_ANN1, (CE_NOTE,
2706 		    "Next Reply Desc  = %p Words = %" PRIx64,
2707 		    (void *)desc, desc->Words));
2708 
2709 		replyType = replyDesc->ReplyFlags &
2710 		    MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2711 
2712 		if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2713 			break;
2714 
2715 	} /* End of while loop. */
2716 
2717 	/* update replyIndex to FW */
2718 	WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2719 
2720 
2721 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2722 	    0, 0, DDI_DMA_SYNC_FORDEV);
2723 
2724 	(void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2725 	    0, 0, DDI_DMA_SYNC_FORCPU);
2726 	return (DDI_INTR_CLAIMED);
2727 }
2728 
2729 
2730 
2731 
2732 /*
2733  * complete_cmd_in_sync_mode -	Completes an internal command
2734  * @instance:			Adapter soft state
2735  * @cmd:			Command to be completed
2736  *
2737  * The issue_cmd_in_sync_mode() function waits for a command to complete
2738  * after it issues a command. This function wakes up that waiting routine by
2739  * calling wake_up() on the wait queue.
2740  */
2741 void
tbolt_complete_cmd_in_sync_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2742 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2743     struct mrsas_cmd *cmd)
2744 {
2745 
2746 	cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2747 	    &cmd->frame->io.cmd_status);
2748 
2749 	cmd->sync_cmd = MRSAS_FALSE;
2750 
2751 	mutex_enter(&instance->int_cmd_mtx);
2752 	if (cmd->cmd_status == ENODATA) {
2753 		cmd->cmd_status = 0;
2754 	}
2755 	cv_broadcast(&instance->int_cmd_cv);
2756 	mutex_exit(&instance->int_cmd_mtx);
2757 
2758 }
2759 
2760 /*
2761  * mrsas_tbolt_get_ld_map_info -	Returns	 ld_map structure
2762  * instance:				Adapter soft state
2763  *
2764  * Issues an internal command (DCMD) to get the FW's controller PD
2765  * list structure.  This information is mainly used to find out SYSTEM
2766  * supported by the FW.
2767  */
2768 int
mrsas_tbolt_get_ld_map_info(struct mrsas_instance * instance)2769 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2770 {
2771 	int ret = 0;
2772 	struct mrsas_cmd	*cmd = NULL;
2773 	struct mrsas_dcmd_frame	*dcmd;
2774 	MR_FW_RAID_MAP_ALL *ci;
2775 	uint32_t ci_h = 0;
2776 	U32 size_map_info;
2777 
2778 	cmd = get_raid_msg_pkt(instance);
2779 
2780 	if (cmd == NULL) {
2781 		dev_err(instance->dip, CE_WARN,
2782 		    "Failed to get a cmd from free-pool in get_ld_map_info()");
2783 		return (DDI_FAILURE);
2784 	}
2785 
2786 	dcmd = &cmd->frame->dcmd;
2787 
2788 	size_map_info =	sizeof (MR_FW_RAID_MAP) +
2789 	    (sizeof (MR_LD_SPAN_MAP) *
2790 	    (MAX_LOGICAL_DRIVES - 1));
2791 
2792 	con_log(CL_ANN, (CE_NOTE,
2793 	    "size_map_info : 0x%x", size_map_info));
2794 
2795 	ci = instance->ld_map[instance->map_id & 1];
2796 	ci_h = instance->ld_map_phy[instance->map_id & 1];
2797 
2798 	if (!ci) {
2799 		dev_err(instance->dip, CE_WARN,
2800 		    "Failed to alloc mem for ld_map_info");
2801 		return_raid_msg_pkt(instance, cmd);
2802 		return (-1);
2803 	}
2804 
2805 	bzero(ci, sizeof (*ci));
2806 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2807 
2808 	dcmd->cmd = MFI_CMD_OP_DCMD;
2809 	dcmd->cmd_status = 0xFF;
2810 	dcmd->sge_count = 1;
2811 	dcmd->flags = MFI_FRAME_DIR_READ;
2812 	dcmd->timeout = 0;
2813 	dcmd->pad_0 = 0;
2814 	dcmd->data_xfer_len = size_map_info;
2815 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2816 	dcmd->sgl.sge32[0].phys_addr = ci_h;
2817 	dcmd->sgl.sge32[0].length = size_map_info;
2818 
2819 
2820 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2821 
2822 	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2823 		ret = 0;
2824 		con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2825 	} else {
2826 		dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2827 		ret = -1;
2828 	}
2829 
2830 	return_raid_msg_pkt(instance, cmd);
2831 
2832 	return (ret);
2833 }
2834 
2835 void
mrsas_dump_reply_desc(struct mrsas_instance * instance)2836 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2837 {
2838 	uint32_t i;
2839 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2840 	union desc_value d_val;
2841 
2842 	reply_desc = instance->reply_frame_pool;
2843 
2844 	for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2845 		d_val.word = reply_desc->Words;
2846 		con_log(CL_DLEVEL3, (CE_NOTE,
2847 		    "i=%d, %x:%x",
2848 		    i, d_val.u1.high, d_val.u1.low));
2849 	}
2850 }
2851 
2852 /*
2853  * mrsas_tbolt_command_create -	Create command for fast path.
2854  * @io_info:	MegaRAID IO request packet pointer.
2855  * @ref_tag:	Reference tag for RD/WRPROTECT
2856  *
2857  * Create the command for fast path.
2858  */
2859 void
mrsas_tbolt_prepare_cdb(struct mrsas_instance * instance,U8 cdb[],struct IO_REQUEST_INFO * io_info,Mpi2RaidSCSIIORequest_t * scsi_io_request,U32 ref_tag)2860 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2861     struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2862     U32 ref_tag)
2863 {
2864 	uint16_t		EEDPFlags;
2865 	uint32_t		Control;
2866 	ddi_acc_handle_t acc_handle =
2867 	    instance->mpi2_frame_pool_dma_obj.acc_handle;
2868 
2869 	/* Prepare 32-byte CDB if DIF is supported on this device */
2870 	con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2871 
2872 	bzero(cdb, 32);
2873 
2874 	cdb[0] =  MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2875 
2876 
2877 	cdb[7] =  MRSAS_SCSI_ADDL_CDB_LEN;
2878 
2879 	if (io_info->isRead)
2880 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2881 	else
2882 		cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2883 
2884 	/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2885 	cdb[10] = MRSAS_RD_WR_PROTECT;
2886 
2887 	/* LOGICAL BLOCK ADDRESS */
2888 	cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2889 	cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2890 	cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2891 	cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2892 	cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2893 	cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2894 	cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2895 	cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2896 
2897 	/* Logical block reference tag */
2898 	ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2899 	    BE_32(ref_tag));
2900 
2901 	ddi_put16(acc_handle,
2902 	    &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2903 
2904 	ddi_put32(acc_handle, &scsi_io_request->DataLength,
2905 	    ((io_info->numBlocks)*512));
2906 	/* Specify 32-byte cdb */
2907 	ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2908 
2909 	/* Transfer length */
2910 	cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2911 	cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2912 	cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2913 	cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2914 
2915 	/* set SCSI IO EEDPFlags */
2916 	EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2917 	Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2918 
2919 	/* set SCSI IO EEDPFlags bits */
2920 	if (io_info->isRead) {
2921 		/*
2922 		 * For READ commands, the EEDPFlags shall be set to specify to
2923 		 * Increment the Primary Reference Tag, to Check the Reference
2924 		 * Tag, and to Check and Remove the Protection Information
2925 		 * fields.
2926 		 */
2927 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2928 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG	|
2929 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP	|
2930 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG	|
2931 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2932 	} else {
2933 		/*
2934 		 * For WRITE commands, the EEDPFlags shall be set to specify to
2935 		 * Increment the Primary Reference Tag, and to Insert
2936 		 * Protection Information fields.
2937 		 */
2938 		EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG	|
2939 		    MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2940 	}
2941 	Control |= (0x4 << 26);
2942 
2943 	ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2944 	ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2945 	ddi_put32(acc_handle,
2946 	    &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2947 }
2948 
2949 
2950 /*
2951  * mrsas_tbolt_set_pd_lba -	Sets PD LBA
2952  * @cdb:		CDB
2953  * @cdb_size:		CDB size
2954  * @cdb_len_ptr:	cdb length
2955  * @start_blk:		Start block of IO
2956  * @num_blocks:		Number of blocks
2957  *
2958  * Used to set the PD LBA in CDB for FP IOs
2959  */
2960 static void
mrsas_tbolt_set_pd_lba(U8 * cdb,size_t cdb_size,uint8_t * cdb_len_ptr,U64 start_blk,U32 num_blocks)2961 mrsas_tbolt_set_pd_lba(U8 *cdb, size_t cdb_size, uint8_t *cdb_len_ptr,
2962     U64 start_blk, U32 num_blocks)
2963 {
2964 	U8 cdb_len = *cdb_len_ptr;
2965 	U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2966 
2967 	/* Some drives don't support 16/12 byte CDB's, convert to 10 */
2968 	if (((cdb_len == 12) || (cdb_len == 16)) &&
2969 	    (start_blk <= 0xffffffff)) {
2970 		if (cdb_len == 16) {
2971 			con_log(CL_ANN,
2972 			    (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2973 			opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2974 			flagvals = cdb[1];
2975 			groupnum = cdb[14];
2976 			control = cdb[15];
2977 		} else {
2978 			con_log(CL_ANN,
2979 			    (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
2980 			opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2981 			flagvals = cdb[1];
2982 			groupnum = cdb[10];
2983 			control = cdb[11];
2984 		}
2985 
2986 		bzero(cdb, cdb_size);
2987 
2988 		cdb[0] = opcode;
2989 		cdb[1] = flagvals;
2990 		cdb[6] = groupnum;
2991 		cdb[9] = control;
2992 		/* Set transfer length */
2993 		cdb[8] = (U8)(num_blocks & 0xff);
2994 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
2995 		cdb_len = 10;
2996 	} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
2997 		/* Convert to 16 byte CDB for large LBA's */
2998 		con_log(CL_ANN,
2999 		    (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3000 		switch (cdb_len) {
3001 		case 6:
3002 			opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3003 			control = cdb[5];
3004 			break;
3005 		case 10:
3006 			opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3007 			flagvals = cdb[1];
3008 			groupnum = cdb[6];
3009 			control = cdb[9];
3010 			break;
3011 		case 12:
3012 			opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3013 			flagvals = cdb[1];
3014 			groupnum = cdb[10];
3015 			control = cdb[11];
3016 			break;
3017 		}
3018 
3019 		bzero(cdb, cdb_size);
3020 
3021 		cdb[0] = opcode;
3022 		cdb[1] = flagvals;
3023 		cdb[14] = groupnum;
3024 		cdb[15] = control;
3025 
3026 		/* Transfer length */
3027 		cdb[13] = (U8)(num_blocks & 0xff);
3028 		cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3029 		cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3030 		cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3031 
3032 		/* Specify 16-byte cdb */
3033 		cdb_len = 16;
3034 	} else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3035 		/* convert to 10 byte CDB */
3036 		opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3037 		control = cdb[5];
3038 
3039 		bzero(cdb, cdb_size);
3040 		cdb[0] = opcode;
3041 		cdb[9] = control;
3042 
3043 		/* Set transfer length */
3044 		cdb[8] = (U8)(num_blocks & 0xff);
3045 		cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3046 
3047 		/* Specify 10-byte cdb */
3048 		cdb_len = 10;
3049 	}
3050 
3051 
3052 	/* Fall through Normal case, just load LBA here */
3053 	switch (cdb_len) {
3054 	case 6:
3055 	{
3056 		U8 val = cdb[1] & 0xE0;
3057 		cdb[3] = (U8)(start_blk & 0xff);
3058 		cdb[2] = (U8)((start_blk >> 8) & 0xff);
3059 		cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3060 		break;
3061 	}
3062 	case 10:
3063 		cdb[5] = (U8)(start_blk & 0xff);
3064 		cdb[4] = (U8)((start_blk >> 8) & 0xff);
3065 		cdb[3] = (U8)((start_blk >> 16) & 0xff);
3066 		cdb[2] = (U8)((start_blk >> 24) & 0xff);
3067 		break;
3068 	case 12:
3069 		cdb[5]	  = (U8)(start_blk & 0xff);
3070 		cdb[4]	  = (U8)((start_blk >> 8) & 0xff);
3071 		cdb[3]	  = (U8)((start_blk >> 16) & 0xff);
3072 		cdb[2]	  = (U8)((start_blk >> 24) & 0xff);
3073 		break;
3074 
3075 	case 16:
3076 		cdb[9]	= (U8)(start_blk & 0xff);
3077 		cdb[8]	= (U8)((start_blk >> 8) & 0xff);
3078 		cdb[7]	= (U8)((start_blk >> 16) & 0xff);
3079 		cdb[6]	= (U8)((start_blk >> 24) & 0xff);
3080 		cdb[5]	= (U8)((start_blk >> 32) & 0xff);
3081 		cdb[4]	= (U8)((start_blk >> 40) & 0xff);
3082 		cdb[3]	= (U8)((start_blk >> 48) & 0xff);
3083 		cdb[2]	= (U8)((start_blk >> 56) & 0xff);
3084 		break;
3085 	}
3086 
3087 	*cdb_len_ptr = cdb_len;
3088 }
3089 
3090 
3091 static int
mrsas_tbolt_check_map_info(struct mrsas_instance * instance)3092 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3093 {
3094 	MR_FW_RAID_MAP_ALL *ld_map;
3095 
3096 	if (!mrsas_tbolt_get_ld_map_info(instance)) {
3097 
3098 		ld_map = instance->ld_map[instance->map_id & 1];
3099 
3100 		con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3101 		    ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3102 
3103 		if (MR_ValidateMapInfo(
3104 		    instance->ld_map[instance->map_id & 1],
3105 		    instance->load_balance_info)) {
3106 			con_log(CL_ANN,
3107 			    (CE_CONT, "MR_ValidateMapInfo success"));
3108 
3109 			instance->fast_path_io = 1;
3110 			con_log(CL_ANN,
3111 			    (CE_NOTE, "instance->fast_path_io %d",
3112 			    instance->fast_path_io));
3113 
3114 			return (DDI_SUCCESS);
3115 		}
3116 
3117 	}
3118 
3119 	instance->fast_path_io = 0;
3120 	dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3121 	con_log(CL_ANN, (CE_NOTE,
3122 	    "instance->fast_path_io %d", instance->fast_path_io));
3123 
3124 	return (DDI_FAILURE);
3125 }
3126 
3127 /*
3128  * Marks HBA as bad. This will be called either when an
3129  * IO packet times out even after 3 FW resets
3130  * or FW is found to be fault even after 3 continuous resets.
3131  */
3132 
3133 void
mrsas_tbolt_kill_adapter(struct mrsas_instance * instance)3134 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3135 {
3136 	dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3137 
3138 	if (instance->deadadapter == 1)
3139 		return;
3140 
3141 	con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3142 	    "Writing to doorbell with MFI_STOP_ADP "));
3143 	mutex_enter(&instance->ocr_flags_mtx);
3144 	instance->deadadapter = 1;
3145 	mutex_exit(&instance->ocr_flags_mtx);
3146 	instance->func_ptr->disable_intr(instance);
3147 	WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3148 	/* Flush */
3149 	(void) RD_RESERVED0_REGISTER(instance);
3150 
3151 	(void) mrsas_print_pending_cmds(instance);
3152 	(void) mrsas_complete_pending_cmds(instance);
3153 }
3154 
3155 void
mrsas_reset_reply_desc(struct mrsas_instance * instance)3156 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3157 {
3158 	int i;
3159 	MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3160 	instance->reply_read_index = 0;
3161 
3162 	/* initializing reply address to 0xFFFFFFFF */
3163 	reply_desc = instance->reply_frame_pool;
3164 
3165 	for (i = 0; i < instance->reply_q_depth; i++) {
3166 		reply_desc->Words = (uint64_t)~0;
3167 		reply_desc++;
3168 	}
3169 }
3170 
3171 int
mrsas_tbolt_reset_ppc(struct mrsas_instance * instance)3172 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3173 {
3174 	uint32_t status = 0x00;
3175 	uint32_t retry = 0;
3176 	uint32_t cur_abs_reg_val;
3177 	uint32_t fw_state;
3178 	uint32_t abs_state;
3179 	uint32_t i;
3180 
3181 	if (instance->deadadapter == 1) {
3182 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3183 		    "no more resets as HBA has been marked dead");
3184 		return (DDI_FAILURE);
3185 	}
3186 
3187 	mutex_enter(&instance->ocr_flags_mtx);
3188 	instance->adapterresetinprogress = 1;
3189 	mutex_exit(&instance->ocr_flags_mtx);
3190 
3191 	instance->func_ptr->disable_intr(instance);
3192 
3193 	/* Add delay in order to complete the ioctl & io cmds in-flight */
3194 	for (i = 0; i < 3000; i++)
3195 		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3196 
3197 	instance->reply_read_index = 0;
3198 
3199 retry_reset:
3200 	con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Resetting TBOLT"));
3201 
3202 	/* Flush */
3203 	WR_TBOLT_IB_WRITE_SEQ(0x0, instance);
3204 	/* Write magic number */
3205 	WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3206 	WR_TBOLT_IB_WRITE_SEQ(0x4, instance);
3207 	WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3208 	WR_TBOLT_IB_WRITE_SEQ(0x2, instance);
3209 	WR_TBOLT_IB_WRITE_SEQ(0x7, instance);
3210 	WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3211 
3212 	con_log(CL_ANN1, (CE_NOTE,
3213 	    "mrsas_tbolt_reset_ppc: magic number written "
3214 	    "to write sequence register"));
3215 
3216 	/* Wait for the diag write enable (DRWE) bit to be set */
3217 	retry = 0;
3218 	status = RD_TBOLT_HOST_DIAG(instance);
3219 	while (!(status & DIAG_WRITE_ENABLE)) {
3220 		delay(100 * drv_usectohz(MILLISEC));
3221 		status = RD_TBOLT_HOST_DIAG(instance);
3222 		if (retry++ >= 100) {
3223 			dev_err(instance->dip, CE_WARN,
3224 			    "%s(): timeout waiting for DRWE.", __func__);
3225 			return (DDI_FAILURE);
3226 		}
3227 	}
3228 
3229 	/* Send reset command */
3230 	WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3231 	delay(100 * drv_usectohz(MILLISEC));
3232 
3233 	/* Wait for reset bit to clear */
3234 	retry = 0;
3235 	status = RD_TBOLT_HOST_DIAG(instance);
3236 	while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3237 		delay(100 * drv_usectohz(MILLISEC));
3238 		status = RD_TBOLT_HOST_DIAG(instance);
3239 		if (retry++ == 100) {
3240 			/* Dont call kill adapter here */
3241 			/* RESET BIT ADAPTER is cleared by firmare */
3242 			/* mrsas_tbolt_kill_adapter(instance); */
3243 			dev_err(instance->dip, CE_WARN,
3244 			    "%s(): RESET FAILED; return failure!!!", __func__);
3245 			return (DDI_FAILURE);
3246 		}
3247 	}
3248 
3249 	con_log(CL_ANN,
3250 	    (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3251 
3252 	abs_state = instance->func_ptr->read_fw_status_reg(instance);
3253 	retry = 0;
3254 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3255 		delay(100 * drv_usectohz(MILLISEC));
3256 		abs_state = instance->func_ptr->read_fw_status_reg(instance);
3257 	}
3258 	if (abs_state <= MFI_STATE_FW_INIT) {
3259 		dev_err(instance->dip, CE_WARN,
3260 		    "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3261 		    "state = 0x%x, RETRY RESET.", abs_state);
3262 		goto retry_reset;
3263 	}
3264 
3265 	/* Mark HBA as bad, if FW is fault after 3 continuous resets */
3266 	if (mfi_state_transition_to_ready(instance) ||
3267 	    mrsas_debug_tbolt_fw_faults_after_ocr == 1) {
3268 		cur_abs_reg_val =
3269 		    instance->func_ptr->read_fw_status_reg(instance);
3270 		fw_state	= cur_abs_reg_val & MFI_STATE_MASK;
3271 
3272 		con_log(CL_ANN1, (CE_NOTE,
3273 		    "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3274 		    "FW state = 0x%x", fw_state));
3275 		if (mrsas_debug_tbolt_fw_faults_after_ocr == 1)
3276 			fw_state = MFI_STATE_FAULT;
3277 
3278 		con_log(CL_ANN,
3279 		    (CE_NOTE,  "mrsas_tbolt_reset_ppc : FW is not ready "
3280 		    "FW state = 0x%x", fw_state));
3281 
3282 		if (fw_state == MFI_STATE_FAULT) {
3283 			/* increment the count */
3284 			instance->fw_fault_count_after_ocr++;
3285 			if (instance->fw_fault_count_after_ocr
3286 			    < MAX_FW_RESET_COUNT) {
3287 				dev_err(instance->dip, CE_WARN,
3288 				    "mrsas_tbolt_reset_ppc: "
3289 				    "FW is in fault after OCR count %d "
3290 				    "Retry Reset",
3291 				    instance->fw_fault_count_after_ocr);
3292 				goto retry_reset;
3293 
3294 			} else {
3295 				dev_err(instance->dip, CE_WARN, "%s:"
3296 				    "Max Reset Count exceeded >%d"
3297 				    "Mark HBA as bad, KILL adapter",
3298 				    __func__, MAX_FW_RESET_COUNT);
3299 
3300 				mrsas_tbolt_kill_adapter(instance);
3301 				return (DDI_FAILURE);
3302 			}
3303 		}
3304 	}
3305 
3306 	/* reset the counter as FW is up after OCR */
3307 	instance->fw_fault_count_after_ocr = 0;
3308 
3309 	mrsas_reset_reply_desc(instance);
3310 
3311 	abs_state = mrsas_issue_init_mpi2(instance);
3312 	if (abs_state == (uint32_t)DDI_FAILURE) {
3313 		dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3314 		    "INIT failed Retrying Reset");
3315 		goto retry_reset;
3316 	}
3317 
3318 	(void) mrsas_print_pending_cmds(instance);
3319 
3320 	instance->func_ptr->enable_intr(instance);
3321 	instance->fw_outstanding = 0;
3322 
3323 	(void) mrsas_issue_pending_cmds(instance);
3324 
3325 	instance->aen_cmd->retry_count_for_ocr = 0;
3326 	instance->aen_cmd->drv_pkt_time = 0;
3327 
3328 	instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3329 
3330 	mutex_enter(&instance->ocr_flags_mtx);
3331 	instance->adapterresetinprogress = 0;
3332 	mutex_exit(&instance->ocr_flags_mtx);
3333 
3334 	dev_err(instance->dip, CE_NOTE, "TBOLT adapter reset successfully");
3335 
3336 	return (DDI_SUCCESS);
3337 }
3338 
3339 /*
3340  * mrsas_sync_map_info -	Returns FW's ld_map structure
3341  * @instance:				Adapter soft state
3342  *
3343  * Issues an internal command (DCMD) to get the FW's controller PD
3344  * list structure.  This information is mainly used to find out SYSTEM
3345  * supported by the FW.
3346  */
3347 
3348 static int
mrsas_tbolt_sync_map_info(struct mrsas_instance * instance)3349 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3350 {
3351 	int			ret = 0, i;
3352 	struct mrsas_cmd	*cmd = NULL;
3353 	struct mrsas_dcmd_frame	*dcmd;
3354 	uint32_t size_sync_info, num_lds;
3355 	LD_TARGET_SYNC *ci = NULL;
3356 	MR_FW_RAID_MAP_ALL *map;
3357 	MR_LD_RAID  *raid;
3358 	LD_TARGET_SYNC *ld_sync;
3359 	uint32_t ci_h = 0;
3360 	uint32_t size_map_info;
3361 
3362 	cmd = get_raid_msg_pkt(instance);
3363 
3364 	if (cmd == NULL) {
3365 		dev_err(instance->dip, CE_WARN,
3366 		    "Failed to get a cmd from free-pool in "
3367 		    "mrsas_tbolt_sync_map_info().");
3368 		return (DDI_FAILURE);
3369 	}
3370 
3371 	/* Clear the frame buffer and assign back the context id */
3372 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3373 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3374 	    cmd->index);
3375 	bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3376 
3377 
3378 	map = instance->ld_map[instance->map_id & 1];
3379 
3380 	num_lds = map->raidMap.ldCount;
3381 
3382 	dcmd = &cmd->frame->dcmd;
3383 
3384 	size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3385 
3386 	con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3387 	    size_sync_info, num_lds));
3388 
3389 	ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3390 
3391 	bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3392 	ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3393 
3394 	bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3395 
3396 	ld_sync = (LD_TARGET_SYNC *)ci;
3397 
3398 	for (i = 0; i < num_lds; i++, ld_sync++) {
3399 		raid = MR_LdRaidGet(i, map);
3400 
3401 		con_log(CL_ANN1,
3402 		    (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3403 		    i, raid->seqNum, raid->flags.ldSyncRequired));
3404 
3405 		ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3406 
3407 		con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3408 		    i, ld_sync->ldTargetId));
3409 
3410 		ld_sync->seqNum = raid->seqNum;
3411 	}
3412 
3413 
3414 	size_map_info = sizeof (MR_FW_RAID_MAP) +
3415 	    (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3416 
3417 	dcmd->cmd = MFI_CMD_OP_DCMD;
3418 	dcmd->cmd_status = 0xFF;
3419 	dcmd->sge_count = 1;
3420 	dcmd->flags = MFI_FRAME_DIR_WRITE;
3421 	dcmd->timeout = 0;
3422 	dcmd->pad_0 = 0;
3423 	dcmd->data_xfer_len = size_map_info;
3424 	ASSERT(num_lds <= 255);
3425 	dcmd->mbox.b[0] = (U8)num_lds;
3426 	dcmd->mbox.b[1] = 1; /* Pend */
3427 	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3428 	dcmd->sgl.sge32[0].phys_addr = ci_h;
3429 	dcmd->sgl.sge32[0].length = size_map_info;
3430 
3431 
3432 	instance->map_update_cmd = cmd;
3433 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3434 
3435 	instance->func_ptr->issue_cmd(cmd, instance);
3436 
3437 	instance->unroll.syncCmd = 1;
3438 	con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3439 
3440 	return (ret);
3441 }
3442 
3443 /*
3444  * abort_syncmap_cmd
3445  */
3446 int
abort_syncmap_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd_to_abort)3447 abort_syncmap_cmd(struct mrsas_instance *instance,
3448     struct mrsas_cmd *cmd_to_abort)
3449 {
3450 	int	ret = 0;
3451 
3452 	struct mrsas_cmd		*cmd;
3453 	struct mrsas_abort_frame	*abort_fr;
3454 
3455 	con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3456 
3457 	cmd = get_raid_msg_mfi_pkt(instance);
3458 
3459 	if (!cmd) {
3460 		dev_err(instance->dip, CE_WARN,
3461 		    "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3462 		return (DDI_FAILURE);
3463 	}
3464 	/* Clear the frame buffer and assign back the context id */
3465 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3466 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3467 	    cmd->index);
3468 
3469 	abort_fr = &cmd->frame->abort;
3470 
3471 	/* prepare and issue the abort frame */
3472 	ddi_put8(cmd->frame_dma_obj.acc_handle,
3473 	    &abort_fr->cmd, MFI_CMD_OP_ABORT);
3474 	ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3475 	    MFI_CMD_STATUS_SYNC_MODE);
3476 	ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3477 	ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3478 	    cmd_to_abort->index);
3479 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3480 	    &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3481 	ddi_put32(cmd->frame_dma_obj.acc_handle,
3482 	    &abort_fr->abort_mfi_phys_addr_hi, 0);
3483 
3484 	cmd->frame_count = 1;
3485 
3486 	mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3487 
3488 	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3489 		con_log(CL_ANN1, (CE_WARN,
3490 		    "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3491 		ret = -1;
3492 	} else {
3493 		ret = 0;
3494 	}
3495 
3496 	return_raid_msg_mfi_pkt(instance, cmd);
3497 
3498 	atomic_add_16(&instance->fw_outstanding, (-1));
3499 
3500 	return (ret);
3501 }
3502 
3503 /*
3504  * Even though these functions were originally intended for 2208 only, it
3505  * turns out they're useful for "Skinny" support as well.  In a perfect world,
3506  * these two functions would be either in mr_sas.c, or in their own new source
3507  * file.  Since this driver needs some cleanup anyway, keep this portion in
3508  * mind as well.
3509  */
3510 
3511 int
mrsas_tbolt_config_pd(struct mrsas_instance * instance,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)3512 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3513     uint8_t lun, dev_info_t **ldip)
3514 {
3515 	struct scsi_device *sd;
3516 	dev_info_t *child;
3517 	int rval, dtype;
3518 	struct mrsas_tbolt_pd_info *pds = NULL;
3519 
3520 	con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3521 	    tgt, lun));
3522 
3523 	if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3524 		if (ldip) {
3525 			*ldip = child;
3526 		}
3527 		if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3528 			rval = mrsas_service_evt(instance, tgt, 1,
3529 			    MRSAS_EVT_UNCONFIG_TGT, 0);
3530 			con_log(CL_ANN1, (CE_WARN,
3531 			    "mr_sas:DELETING STALE ENTRY  rval = %d "
3532 			    "tgt id = %d", rval, tgt));
3533 			return (NDI_FAILURE);
3534 		}
3535 		return (NDI_SUCCESS);
3536 	}
3537 
3538 	pds = (struct mrsas_tbolt_pd_info *)
3539 	    kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3540 	mrsas_tbolt_get_pd_info(instance, pds, tgt);
3541 	dtype = pds->scsiDevType;
3542 
3543 	/* Check for Disk */
3544 	if ((dtype == DTYPE_DIRECT)) {
3545 		if ((dtype == DTYPE_DIRECT) &&
3546 		    (LE_16(pds->fwState) != PD_SYSTEM)) {
3547 			kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3548 			return (NDI_FAILURE);
3549 		}
3550 		sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3551 		sd->sd_address.a_hba_tran = instance->tran;
3552 		sd->sd_address.a_target = (uint16_t)tgt;
3553 		sd->sd_address.a_lun = (uint8_t)lun;
3554 
3555 		if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3556 			rval = mrsas_config_scsi_device(instance, sd, ldip);
3557 			dev_err(instance->dip, CE_CONT,
3558 			    "?Phys. device found: tgt %d dtype %d: %s\n",
3559 			    tgt, dtype, sd->sd_inq->inq_vid);
3560 		} else {
3561 			rval = NDI_FAILURE;
3562 			con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3563 			    "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3564 			    tgt, dtype, sd->sd_inq->inq_vid));
3565 		}
3566 
3567 		/* sd_unprobe is blank now. Free buffer manually */
3568 		if (sd->sd_inq) {
3569 			kmem_free(sd->sd_inq, SUN_INQSIZE);
3570 			sd->sd_inq = (struct scsi_inquiry *)NULL;
3571 		}
3572 		kmem_free(sd, sizeof (struct scsi_device));
3573 	} else {
3574 		con_log(CL_ANN1, (CE_NOTE,
3575 		    "?Device not supported: tgt %d lun %d dtype %d",
3576 		    tgt, lun, dtype));
3577 		rval = NDI_FAILURE;
3578 	}
3579 
3580 	kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3581 	con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3582 	    rval));
3583 	return (rval);
3584 }
3585 
3586 static void
mrsas_tbolt_get_pd_info(struct mrsas_instance * instance,struct mrsas_tbolt_pd_info * pds,int tgt)3587 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3588     struct mrsas_tbolt_pd_info *pds, int tgt)
3589 {
3590 	struct mrsas_cmd	*cmd;
3591 	struct mrsas_dcmd_frame	*dcmd;
3592 	dma_obj_t		dcmd_dma_obj;
3593 
3594 	ASSERT(instance->tbolt || instance->skinny);
3595 
3596 	if (instance->tbolt)
3597 		cmd = get_raid_msg_pkt(instance);
3598 	else
3599 		cmd = mrsas_get_mfi_pkt(instance);
3600 
3601 	if (!cmd) {
3602 		con_log(CL_ANN1,
3603 		    (CE_WARN, "Failed to get a cmd for get pd info"));
3604 		return;
3605 	}
3606 
3607 	/* Clear the frame buffer and assign back the context id */
3608 	bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3609 	ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3610 	    cmd->index);
3611 
3612 
3613 	dcmd = &cmd->frame->dcmd;
3614 	dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3615 	dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3616 	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3617 	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3618 	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3619 	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3620 
3621 	(void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3622 	    DDI_STRUCTURE_LE_ACC);
3623 	bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3624 	bzero(dcmd->mbox.b, 12);
3625 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3626 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3627 	ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3628 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3629 	    MFI_FRAME_DIR_READ);
3630 	ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3631 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3632 	    sizeof (struct mrsas_tbolt_pd_info));
3633 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3634 	    MR_DCMD_PD_GET_INFO);
3635 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3636 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3637 	    sizeof (struct mrsas_tbolt_pd_info));
3638 	ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3639 	    dcmd_dma_obj.dma_cookie[0].dmac_address);
3640 
3641 	cmd->sync_cmd = MRSAS_TRUE;
3642 	cmd->frame_count = 1;
3643 
3644 	if (instance->tbolt)
3645 		mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3646 
3647 	instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3648 
3649 	ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3650 	    (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3651 	    DDI_DEV_AUTOINCR);
3652 	(void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3653 
3654 	if (instance->tbolt)
3655 		return_raid_msg_pkt(instance, cmd);
3656 	else
3657 		mrsas_return_mfi_pkt(instance, cmd);
3658 }
3659