1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18 /*
19 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 * Copyright 2015 Citrus IT Limited. All rights reserved.
21 */
22
23
24 #include <sys/types.h>
25 #include <sys/file.h>
26 #include <sys/atomic.h>
27 #include <sys/scsi/scsi.h>
28 #include <sys/byteorder.h>
29 #include <sys/sdt.h>
30 #include "ld_pd_map.h"
31 #include "mr_sas.h"
32 #include "fusion.h"
33
34 /*
35 * FMA header files
36 */
37 #include <sys/ddifm.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/fm/io/ddi.h>
41
42
43 /* Pre-TB command size and TB command size. */
44 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
45 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
46 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
47 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
48 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
49 extern ddi_dma_attr_t mrsas_generic_dma_attr;
50 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
51 extern struct ddi_device_acc_attr endian_attr;
52 extern int debug_level_g;
53 extern unsigned int enable_fp;
54 volatile int dump_io_wait_time = 90;
55 extern volatile int debug_timeout_g;
56 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
57 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
58 extern void push_pending_mfi_pkt(struct mrsas_instance *,
59 struct mrsas_cmd *);
60 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
61 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
62
63 /* Local static prototypes. */
64 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
65 struct scsi_address *, struct scsi_pkt *, uchar_t *);
66 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
67 U64 start_blk, U32 num_blocks);
68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
72 #ifdef PDSUPPORT
73 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
74 struct mrsas_tbolt_pd_info *, int);
75 #endif /* PDSUPPORT */
76
77 static int debug_tbolt_fw_faults_after_ocr_g = 0;
78
79 /*
80 * destroy_mfi_mpi_frame_pool
81 */
82 void
destroy_mfi_mpi_frame_pool(struct mrsas_instance * instance)83 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
84 {
85 int i;
86
87 struct mrsas_cmd *cmd;
88
89 /* return all mfi frames to pool */
90 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
91 cmd = instance->cmd_list[i];
92 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
93 (void) mrsas_free_dma_obj(instance,
94 cmd->frame_dma_obj);
95 }
96 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
97 }
98 }
99
100 /*
101 * destroy_mpi2_frame_pool
102 */
103 void
destroy_mpi2_frame_pool(struct mrsas_instance * instance)104 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
105 {
106
107 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
108 (void) mrsas_free_dma_obj(instance,
109 instance->mpi2_frame_pool_dma_obj);
110 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
111 }
112 }
113
114
115 /*
116 * mrsas_tbolt_free_additional_dma_buffer
117 */
118 void
mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance * instance)119 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
120 {
121 int i;
122
123 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
124 (void) mrsas_free_dma_obj(instance,
125 instance->mfi_internal_dma_obj);
126 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
127 }
128 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
129 (void) mrsas_free_dma_obj(instance,
130 instance->mfi_evt_detail_obj);
131 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
132 }
133
134 for (i = 0; i < 2; i++) {
135 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
136 (void) mrsas_free_dma_obj(instance,
137 instance->ld_map_obj[i]);
138 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
139 }
140 }
141 }
142
143
144 /*
145 * free_req_desc_pool
146 */
147 void
free_req_rep_desc_pool(struct mrsas_instance * instance)148 free_req_rep_desc_pool(struct mrsas_instance *instance)
149 {
150 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
151 (void) mrsas_free_dma_obj(instance,
152 instance->request_desc_dma_obj);
153 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
154 }
155
156 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
157 (void) mrsas_free_dma_obj(instance,
158 instance->reply_desc_dma_obj);
159 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
160 }
161
162
163 }
164
165
166 /*
167 * ThunderBolt(TB) Request Message Frame Pool
168 */
169 int
create_mpi2_frame_pool(struct mrsas_instance * instance)170 create_mpi2_frame_pool(struct mrsas_instance *instance)
171 {
172 int i = 0;
173 uint16_t max_cmd;
174 uint32_t sgl_sz;
175 uint32_t raid_msg_size;
176 uint32_t total_size;
177 uint32_t offset;
178 uint32_t io_req_base_phys;
179 uint8_t *io_req_base;
180 struct mrsas_cmd *cmd;
181
182 max_cmd = instance->max_fw_cmds;
183
184 sgl_sz = 1024;
185 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
186
187 /* Allocating additional 256 bytes to accomodate SMID 0. */
188 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
189 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
190
191 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
192 "max_cmd %x", max_cmd));
193
194 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
195 "request message frame pool size %x", total_size));
196
197 /*
198 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
199 * and then split the memory to 1024 commands. Each command should be
200 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
201 * within it. Further refer the "alloc_req_rep_desc" function where
202 * we allocate request/reply descriptors queues for a clue.
203 */
204
205 instance->mpi2_frame_pool_dma_obj.size = total_size;
206 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
208 0xFFFFFFFFU;
209 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
210 0xFFFFFFFFU;
211 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
212 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
213
214 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
215 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
216 dev_err(instance->dip, CE_WARN,
217 "could not alloc mpi2 frame pool");
218 return (DDI_FAILURE);
219 }
220
221 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
222 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
223
224 instance->io_request_frames =
225 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
226 instance->io_request_frames_phy =
227 (uint32_t)
228 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
229
230 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
231 (void *)instance->io_request_frames));
232
233 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
234 instance->io_request_frames_phy));
235
236 io_req_base = (uint8_t *)instance->io_request_frames +
237 MRSAS_THUNDERBOLT_MSG_SIZE;
238 io_req_base_phys = instance->io_request_frames_phy +
239 MRSAS_THUNDERBOLT_MSG_SIZE;
240
241 con_log(CL_DLEVEL3, (CE_NOTE,
242 "io req_base_phys 0x%x", io_req_base_phys));
243
244 for (i = 0; i < max_cmd; i++) {
245 cmd = instance->cmd_list[i];
246
247 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
248
249 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
250 ((uint8_t *)io_req_base + offset);
251 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
252
253 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
254 (max_cmd * raid_msg_size) + i * sgl_sz);
255
256 cmd->sgl_phys_addr = (io_req_base_phys +
257 (max_cmd * raid_msg_size) + i * sgl_sz);
258
259 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
260 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
261 (i * SENSE_LENGTH));
262
263 cmd->sense_phys_addr1 = (io_req_base_phys +
264 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
265 (i * SENSE_LENGTH));
266
267
268 cmd->SMID = i + 1;
269
270 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
271 cmd->index, (void *)cmd->scsi_io_request));
272
273 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
274 cmd->index, cmd->scsi_io_request_phys_addr));
275
276 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
277 cmd->index, (void *)cmd->sense1));
278
279 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
280 cmd->index, cmd->sense_phys_addr1));
281
282 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
283 cmd->index, (void *)cmd->sgl));
284
285 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
286 cmd->index, cmd->sgl_phys_addr));
287 }
288
289 return (DDI_SUCCESS);
290
291 }
292
293
294 /*
295 * alloc_additional_dma_buffer for AEN
296 */
297 int
mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance * instance)298 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
299 {
300 uint32_t internal_buf_size = PAGESIZE*2;
301 int i;
302
303 /* Initialize buffer status as free */
304 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
305 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
306 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
307 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
308
309
310 instance->mfi_internal_dma_obj.size = internal_buf_size;
311 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
312 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
313 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
314 0xFFFFFFFFU;
315 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
316
317 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
318 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
319 dev_err(instance->dip, CE_WARN,
320 "could not alloc reply queue");
321 return (DDI_FAILURE);
322 }
323
324 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
325
326 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
327 instance->internal_buf =
328 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
329 instance->internal_buf_dmac_add =
330 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
331 instance->internal_buf_size = internal_buf_size;
332
333 /* allocate evt_detail */
334 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
335 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
338 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
339 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
340
341 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
342 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
343 dev_err(instance->dip, CE_WARN,
344 "mrsas_tbolt_alloc_additional_dma_buffer: "
345 "could not allocate data transfer buffer.");
346 goto fail_tbolt_additional_buff;
347 }
348
349 bzero(instance->mfi_evt_detail_obj.buffer,
350 sizeof (struct mrsas_evt_detail));
351
352 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
353
354 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
355 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
356
357 for (i = 0; i < 2; i++) {
358 /* allocate the data transfer buffer */
359 instance->ld_map_obj[i].size = instance->size_map_info;
360 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
361 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
362 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
363 0xFFFFFFFFU;
364 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
365 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
366
367 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
368 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
369 dev_err(instance->dip, CE_WARN,
370 "could not allocate data transfer buffer.");
371 goto fail_tbolt_additional_buff;
372 }
373
374 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
375
376 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
377
378 instance->ld_map[i] =
379 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
380 instance->ld_map_phy[i] = (uint32_t)instance->
381 ld_map_obj[i].dma_cookie[0].dmac_address;
382
383 con_log(CL_DLEVEL3, (CE_NOTE,
384 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
385
386 con_log(CL_DLEVEL3, (CE_NOTE,
387 "size_map_info 0x%x", instance->size_map_info));
388 }
389
390 return (DDI_SUCCESS);
391
392 fail_tbolt_additional_buff:
393 mrsas_tbolt_free_additional_dma_buffer(instance);
394
395 return (DDI_FAILURE);
396 }
397
398 MRSAS_REQUEST_DESCRIPTOR_UNION *
mr_sas_get_request_descriptor(struct mrsas_instance * instance,uint16_t index)399 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
400 {
401 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
402
403 if (index > instance->max_fw_cmds) {
404 con_log(CL_ANN1, (CE_NOTE,
405 "Invalid SMID 0x%x request for descriptor", index));
406 con_log(CL_ANN1, (CE_NOTE,
407 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
408 return (NULL);
409 }
410
411 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
412 ((char *)instance->request_message_pool +
413 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
414
415 con_log(CL_ANN1, (CE_NOTE,
416 "request descriptor : 0x%08lx", (unsigned long)req_desc));
417
418 con_log(CL_ANN1, (CE_NOTE,
419 "request descriptor base phy : 0x%08lx",
420 (unsigned long)instance->request_message_pool_phy));
421
422 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
423 }
424
425
426 /*
427 * Allocate Request and Reply Queue Descriptors.
428 */
429 int
alloc_req_rep_desc(struct mrsas_instance * instance)430 alloc_req_rep_desc(struct mrsas_instance *instance)
431 {
432 uint32_t request_q_sz, reply_q_sz;
433 int i, max_reply_q_sz;
434 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
435
436 /*
437 * ThunderBolt(TB) There's no longer producer consumer mechanism.
438 * Once we have an interrupt we are supposed to scan through the list of
439 * reply descriptors and process them accordingly. We would be needing
440 * to allocate memory for 1024 reply descriptors
441 */
442
443 /* Allocate Reply Descriptors */
444 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
445 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446
447 /* reply queue size should be multiple of 16 */
448 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
449
450 reply_q_sz = 8 * max_reply_q_sz;
451
452
453 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
454 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
455
456 instance->reply_desc_dma_obj.size = reply_q_sz;
457 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
458 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
459 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
460 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
461 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
462
463 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
464 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
465 dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
466 return (DDI_FAILURE);
467 }
468
469 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
470 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
471
472 /* virtual address of reply queue */
473 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
474 instance->reply_desc_dma_obj.buffer);
475
476 instance->reply_q_depth = max_reply_q_sz;
477
478 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
479 instance->reply_q_depth));
480
481 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
482 (void *)instance->reply_frame_pool));
483
484 /* initializing reply address to 0xFFFFFFFF */
485 reply_desc = instance->reply_frame_pool;
486
487 for (i = 0; i < instance->reply_q_depth; i++) {
488 reply_desc->Words = (uint64_t)~0;
489 reply_desc++;
490 }
491
492
493 instance->reply_frame_pool_phy =
494 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
495
496 con_log(CL_ANN1, (CE_NOTE,
497 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
498
499
500 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
501 reply_q_sz);
502
503 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
504 instance->reply_pool_limit_phy));
505
506
507 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
508 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
509
510 /* Allocate Request Descriptors */
511 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
512 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
513
514 request_q_sz = 8 *
515 (instance->max_fw_cmds);
516
517 instance->request_desc_dma_obj.size = request_q_sz;
518 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
519 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
520 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
521 0xFFFFFFFFU;
522 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
523 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
524
525 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
526 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
527 dev_err(instance->dip, CE_WARN,
528 "could not alloc request queue desc");
529 goto fail_undo_reply_queue;
530 }
531
532 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
533 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
534
535 /* virtual address of request queue desc */
536 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
537 (instance->request_desc_dma_obj.buffer);
538
539 instance->request_message_pool_phy =
540 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
541
542 return (DDI_SUCCESS);
543
544 fail_undo_reply_queue:
545 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
546 (void) mrsas_free_dma_obj(instance,
547 instance->reply_desc_dma_obj);
548 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
549 }
550
551 return (DDI_FAILURE);
552 }
553
554 /*
555 * mrsas_alloc_cmd_pool_tbolt
556 *
557 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
558 * routine
559 */
560 int
mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance * instance)561 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
562 {
563 int i;
564 int count;
565 uint32_t max_cmd;
566 uint32_t reserve_cmd;
567 size_t sz;
568
569 struct mrsas_cmd *cmd;
570
571 max_cmd = instance->max_fw_cmds;
572 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
573 "max_cmd %x", max_cmd));
574
575
576 sz = sizeof (struct mrsas_cmd *) * max_cmd;
577
578 /*
579 * instance->cmd_list is an array of struct mrsas_cmd pointers.
580 * Allocate the dynamic array first and then allocate individual
581 * commands.
582 */
583 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
584
585 /* create a frame pool and assign one frame to each cmd */
586 for (count = 0; count < max_cmd; count++) {
587 instance->cmd_list[count] =
588 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
589 }
590
591 /* add all the commands to command pool */
592
593 INIT_LIST_HEAD(&instance->cmd_pool_list);
594 INIT_LIST_HEAD(&instance->cmd_pend_list);
595 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
596
597 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
598
599 /* cmd index 0 reservered for IOC INIT */
600 for (i = 1; i < reserve_cmd; i++) {
601 cmd = instance->cmd_list[i];
602 cmd->index = i;
603 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
604 }
605
606
607 for (i = reserve_cmd; i < max_cmd; i++) {
608 cmd = instance->cmd_list[i];
609 cmd->index = i;
610 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
611 }
612
613 return (DDI_SUCCESS);
614
615 mrsas_undo_cmds:
616 if (count > 0) {
617 /* free each cmd */
618 for (i = 0; i < count; i++) {
619 if (instance->cmd_list[i] != NULL) {
620 kmem_free(instance->cmd_list[i],
621 sizeof (struct mrsas_cmd));
622 }
623 instance->cmd_list[i] = NULL;
624 }
625 }
626
627 mrsas_undo_cmd_list:
628 if (instance->cmd_list != NULL)
629 kmem_free(instance->cmd_list, sz);
630 instance->cmd_list = NULL;
631
632 return (DDI_FAILURE);
633 }
634
635
636 /*
637 * free_space_for_mpi2
638 */
639 void
free_space_for_mpi2(struct mrsas_instance * instance)640 free_space_for_mpi2(struct mrsas_instance *instance)
641 {
642 /* already freed */
643 if (instance->cmd_list == NULL) {
644 return;
645 }
646
647 /* First free the additional DMA buffer */
648 mrsas_tbolt_free_additional_dma_buffer(instance);
649
650 /* Free the request/reply descriptor pool */
651 free_req_rep_desc_pool(instance);
652
653 /* Free the MPI message pool */
654 destroy_mpi2_frame_pool(instance);
655
656 /* Free the MFI frame pool */
657 destroy_mfi_frame_pool(instance);
658
659 /* Free all the commands in the cmd_list */
660 /* Free the cmd_list buffer itself */
661 mrsas_free_cmd_pool(instance);
662 }
663
664
665 /*
666 * ThunderBolt(TB) memory allocations for commands/messages/frames.
667 */
668 int
alloc_space_for_mpi2(struct mrsas_instance * instance)669 alloc_space_for_mpi2(struct mrsas_instance *instance)
670 {
671 /* Allocate command pool (memory for cmd_list & individual commands) */
672 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
673 dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
674 return (DDI_FAILURE);
675 }
676
677 /* Initialize single reply size and Message size */
678 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
679 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
680
681 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
682 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
683 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
684 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
685 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
686
687 /* Reduce SG count by 1 to take care of group cmds feature in FW */
688 instance->max_num_sge = (instance->max_sge_in_main_msg +
689 instance->max_sge_in_chain - 2);
690 instance->chain_offset_mpt_msg =
691 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
692 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
693 sizeof (MPI2_SGE_IO_UNION)) / 16;
694 instance->reply_read_index = 0;
695
696
697 /* Allocate Request and Reply descriptors Array */
698 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
699 if (alloc_req_rep_desc(instance)) {
700 dev_err(instance->dip, CE_WARN,
701 "Error, allocating memory for descripter-pool");
702 goto mpi2_undo_cmd_pool;
703 }
704 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
705 instance->request_message_pool_phy));
706
707
708 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
709 if (create_mfi_frame_pool(instance)) {
710 dev_err(instance->dip, CE_WARN,
711 "Error, allocating memory for MFI frame-pool");
712 goto mpi2_undo_descripter_pool;
713 }
714
715
716 /* Allocate MPI2 Message pool */
717 /*
718 * Make sure the buffer is alligned to 256 for raid message packet
719 * create a io request pool and assign one frame to each cmd
720 */
721
722 if (create_mpi2_frame_pool(instance)) {
723 dev_err(instance->dip, CE_WARN,
724 "Error, allocating memory for MPI2 Message-pool");
725 goto mpi2_undo_mfi_frame_pool;
726 }
727
728 #ifdef DEBUG
729 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
730 instance->max_sge_in_main_msg));
731 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
732 instance->max_sge_in_chain));
733 con_log(CL_ANN1, (CE_CONT,
734 "[max_sge]0x%x", instance->max_num_sge));
735 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
736 instance->chain_offset_mpt_msg));
737 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
738 instance->chain_offset_io_req));
739 #endif
740
741
742 /* Allocate additional dma buffer */
743 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
744 dev_err(instance->dip, CE_WARN,
745 "Error, allocating tbolt additional DMA buffer");
746 goto mpi2_undo_message_pool;
747 }
748
749 return (DDI_SUCCESS);
750
751 mpi2_undo_message_pool:
752 destroy_mpi2_frame_pool(instance);
753
754 mpi2_undo_mfi_frame_pool:
755 destroy_mfi_frame_pool(instance);
756
757 mpi2_undo_descripter_pool:
758 free_req_rep_desc_pool(instance);
759
760 mpi2_undo_cmd_pool:
761 mrsas_free_cmd_pool(instance);
762
763 return (DDI_FAILURE);
764 }
765
766
767 /*
768 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
769 */
770 int
mrsas_init_adapter_tbolt(struct mrsas_instance * instance)771 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
772 {
773
774 /*
775 * Reduce the max supported cmds by 1. This is to ensure that the
776 * reply_q_sz (1 more than the max cmd that driver may send)
777 * does not exceed max cmds that the FW can support
778 */
779
780 if (instance->max_fw_cmds > 1008) {
781 instance->max_fw_cmds = 1008;
782 instance->max_fw_cmds = instance->max_fw_cmds-1;
783 }
784
785 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
786 "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
787
788
789 /* create a pool of commands */
790 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
791 dev_err(instance->dip, CE_WARN,
792 "alloc_space_for_mpi2() failed.");
793
794 return (DDI_FAILURE);
795 }
796
797 /* Send ioc init message */
798 /* NOTE: the issue_init call does FMA checking already. */
799 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
800 dev_err(instance->dip, CE_WARN,
801 "mrsas_issue_init_mpi2() failed.");
802
803 goto fail_init_fusion;
804 }
805
806 instance->unroll.alloc_space_mpi2 = 1;
807
808 con_log(CL_ANN, (CE_NOTE,
809 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
810
811 return (DDI_SUCCESS);
812
813 fail_init_fusion:
814 free_space_for_mpi2(instance);
815
816 return (DDI_FAILURE);
817 }
818
819
820
821 /*
822 * init_mpi2
823 */
824 int
mrsas_issue_init_mpi2(struct mrsas_instance * instance)825 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
826 {
827 dma_obj_t init2_dma_obj;
828 int ret_val = DDI_SUCCESS;
829
830 /* allocate DMA buffer for IOC INIT message */
831 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
832 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
833 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
834 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
835 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
836 init2_dma_obj.dma_attr.dma_attr_align = 256;
837
838 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
839 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
840 dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
841 "could not allocate data transfer buffer.");
842 return (DDI_FAILURE);
843 }
844 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
845
846 con_log(CL_ANN1, (CE_NOTE,
847 "mrsas_issue_init_mpi2 _phys adr: %x",
848 init2_dma_obj.dma_cookie[0].dmac_address));
849
850
851 /* Initialize and send ioc init message */
852 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
853 if (ret_val == DDI_FAILURE) {
854 con_log(CL_ANN1, (CE_WARN,
855 "mrsas_issue_init_mpi2: Failed"));
856 goto fail_init_mpi2;
857 }
858
859 /* free IOC init DMA buffer */
860 if (mrsas_free_dma_obj(instance, init2_dma_obj)
861 != DDI_SUCCESS) {
862 con_log(CL_ANN1, (CE_WARN,
863 "mrsas_issue_init_mpi2: Free Failed"));
864 return (DDI_FAILURE);
865 }
866
867 /* Get/Check and sync ld_map info */
868 instance->map_id = 0;
869 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
870 (void) mrsas_tbolt_sync_map_info(instance);
871
872
873 /* No mrsas_cmd to send, so send NULL. */
874 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
875 goto fail_init_mpi2;
876
877 con_log(CL_ANN, (CE_NOTE,
878 "mrsas_issue_init_mpi2: SUCCESSFUL"));
879
880 return (DDI_SUCCESS);
881
882 fail_init_mpi2:
883 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
884
885 return (DDI_FAILURE);
886 }
887
888 static int
mrsas_tbolt_ioc_init(struct mrsas_instance * instance,dma_obj_t * mpi2_dma_obj)889 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
890 {
891 int numbytes;
892 uint16_t flags;
893 struct mrsas_init_frame2 *mfiFrameInit2;
894 struct mrsas_header *frame_hdr;
895 Mpi2IOCInitRequest_t *init;
896 struct mrsas_cmd *cmd = NULL;
897 struct mrsas_drv_ver drv_ver_info;
898 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
899 uint32_t timeout;
900
901 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
902
903
904 #ifdef DEBUG
905 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
906 (int)sizeof (*mfiFrameInit2)));
907 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
908 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
909 (int)sizeof (struct mrsas_init_frame2)));
910 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
911 (int)sizeof (Mpi2IOCInitRequest_t)));
912 #endif
913
914 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
915 numbytes = sizeof (*init);
916 bzero(init, numbytes);
917
918 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
919 MPI2_FUNCTION_IOC_INIT);
920
921 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
922 MPI2_WHOINIT_HOST_DRIVER);
923
924 /* set MsgVersion and HeaderVersion host driver was built with */
925 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
926 MPI2_VERSION);
927
928 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
929 MPI2_HEADER_VERSION);
930
931 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
932 instance->raid_io_msg_size / 4);
933
934 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
935 0);
936
937 ddi_put16(mpi2_dma_obj->acc_handle,
938 &init->ReplyDescriptorPostQueueDepth,
939 instance->reply_q_depth);
940 /*
941 * These addresses are set using the DMA cookie addresses from when the
942 * memory was allocated. Sense buffer hi address should be 0.
943 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
944 */
945
946 ddi_put32(mpi2_dma_obj->acc_handle,
947 &init->SenseBufferAddressHigh, 0);
948
949 ddi_put64(mpi2_dma_obj->acc_handle,
950 (uint64_t *)&init->SystemRequestFrameBaseAddress,
951 instance->io_request_frames_phy);
952
953 ddi_put64(mpi2_dma_obj->acc_handle,
954 &init->ReplyDescriptorPostQueueAddress,
955 instance->reply_frame_pool_phy);
956
957 ddi_put64(mpi2_dma_obj->acc_handle,
958 &init->ReplyFreeQueueAddress, 0);
959
960 cmd = instance->cmd_list[0];
961 if (cmd == NULL) {
962 return (DDI_FAILURE);
963 }
964 cmd->retry_count_for_ocr = 0;
965 cmd->pkt = NULL;
966 cmd->drv_pkt_time = 0;
967
968 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
969 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
970
971 frame_hdr = &cmd->frame->hdr;
972
973 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
974 MFI_CMD_STATUS_POLL_MODE);
975
976 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
977
978 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
979
980 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
981
982 con_log(CL_ANN, (CE_CONT,
983 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
984
985 /* Init the MFI Header */
986 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
987 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
988
989 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
990
991 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
992 &mfiFrameInit2->cmd_status,
993 MFI_STAT_INVALID_STATUS);
994
995 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
996
997 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
998 &mfiFrameInit2->queue_info_new_phys_addr_lo,
999 mpi2_dma_obj->dma_cookie[0].dmac_address);
1000
1001 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1002 &mfiFrameInit2->data_xfer_len,
1003 sizeof (Mpi2IOCInitRequest_t));
1004
1005 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1006 (int)init->ReplyDescriptorPostQueueAddress));
1007
1008 /* fill driver version information */
1009 fill_up_drv_ver(&drv_ver_info);
1010
1011 /* allocate the driver version data transfer buffer */
1012 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1013 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1015 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1016 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1018
1019 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1020 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1021 dev_err(instance->dip, CE_WARN,
1022 "fusion init: Could not allocate driver version buffer.");
1023 return (DDI_FAILURE);
1024 }
1025 /* copy driver version to dma buffer */
1026 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1027 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1028 (uint8_t *)drv_ver_info.drv_ver,
1029 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1030 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1031
1032 /* send driver version physical address to firmware */
1033 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1034 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1035
1036 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1037 mfiFrameInit2->queue_info_new_phys_addr_lo,
1038 (int)sizeof (Mpi2IOCInitRequest_t)));
1039
1040 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1041
1042 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1043 cmd->scsi_io_request_phys_addr,
1044 (int)sizeof (struct mrsas_init_frame2)));
1045
1046 /* disable interrupts before sending INIT2 frame */
1047 instance->func_ptr->disable_intr(instance);
1048
1049 req_desc.Words = cmd->scsi_io_request_phys_addr;
1050 req_desc.MFAIo.RequestFlags =
1051 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1052
1053 cmd->request_desc = &req_desc;
1054
1055 /* issue the init frame */
1056
1057 mutex_enter(&instance->reg_write_mtx);
1058 WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1059 WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1060 mutex_exit(&instance->reg_write_mtx);
1061
1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 frame_hdr->cmd_status));
1065
1066 timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1067 do {
1068 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1069 &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1070 break;
1071 delay(1);
1072 timeout--;
1073 } while (timeout > 0);
1074
1075 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1076 &mfiFrameInit2->cmd_status) == 0) {
1077 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1078 } else {
1079 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1080 mrsas_dump_reply_desc(instance);
1081 goto fail_ioc_init;
1082 }
1083
1084 mrsas_dump_reply_desc(instance);
1085
1086 instance->unroll.verBuff = 1;
1087
1088 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1089
1090 return (DDI_SUCCESS);
1091
1092
1093 fail_ioc_init:
1094
1095 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1096
1097 return (DDI_FAILURE);
1098 }
1099
1100 int
wait_for_outstanding_poll_io(struct mrsas_instance * instance)1101 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1102 {
1103 int i;
1104 uint32_t wait_time = dump_io_wait_time;
1105 for (i = 0; i < wait_time; i++) {
1106 /*
1107 * Check For Outstanding poll Commands
1108 * except ldsync command and aen command
1109 */
1110 if (instance->fw_outstanding <= 2) {
1111 break;
1112 }
1113 drv_usecwait(10*MILLISEC);
1114 /* complete commands from reply queue */
1115 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1116 }
1117 if (instance->fw_outstanding > 2) {
1118 return (1);
1119 }
1120 return (0);
1121 }
1122 /*
1123 * scsi_pkt handling
1124 *
1125 * Visible to the external world via the transport structure.
1126 */
1127
1128 int
mrsas_tbolt_tran_start(struct scsi_address * ap,struct scsi_pkt * pkt)1129 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1130 {
1131 struct mrsas_instance *instance = ADDR2MR(ap);
1132 struct scsa_cmd *acmd = PKT2CMD(pkt);
1133 struct mrsas_cmd *cmd = NULL;
1134 uchar_t cmd_done = 0;
1135
1136 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1137 if (instance->deadadapter == 1) {
1138 dev_err(instance->dip, CE_WARN,
1139 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1140 "for IO, as the HBA doesnt take any more IOs");
1141 if (pkt) {
1142 pkt->pkt_reason = CMD_DEV_GONE;
1143 pkt->pkt_statistics = STAT_DISCON;
1144 }
1145 return (TRAN_FATAL_ERROR);
1146 }
1147 if (instance->adapterresetinprogress) {
1148 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1149 "returning mfi_pkt and setting TRAN_BUSY\n"));
1150 return (TRAN_BUSY);
1151 }
1152 (void) mrsas_tbolt_prepare_pkt(acmd);
1153
1154 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1155
1156 /*
1157 * Check if the command is already completed by the mrsas_build_cmd()
1158 * routine. In which case the busy_flag would be clear and scb will be
1159 * NULL and appropriate reason provided in pkt_reason field
1160 */
1161 if (cmd_done) {
1162 pkt->pkt_reason = CMD_CMPLT;
1163 pkt->pkt_scbp[0] = STATUS_GOOD;
1164 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1165 | STATE_SENT_CMD;
1166 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1167 (*pkt->pkt_comp)(pkt);
1168 }
1169
1170 return (TRAN_ACCEPT);
1171 }
1172
1173 if (cmd == NULL) {
1174 return (TRAN_BUSY);
1175 }
1176
1177
1178 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1179 if (instance->fw_outstanding > instance->max_fw_cmds) {
1180 dev_err(instance->dip, CE_WARN,
1181 "Command Queue Full... Returning BUSY");
1182 DTRACE_PROBE2(tbolt_start_tran_err,
1183 uint16_t, instance->fw_outstanding,
1184 uint16_t, instance->max_fw_cmds);
1185 return_raid_msg_pkt(instance, cmd);
1186 return (TRAN_BUSY);
1187 }
1188
1189 /* Synchronize the Cmd frame for the controller */
1190 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1191 DDI_DMA_SYNC_FORDEV);
1192
1193 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1194 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1195 cmd->index, cmd->SMID));
1196
1197 instance->func_ptr->issue_cmd(cmd, instance);
1198 } else {
1199 instance->func_ptr->issue_cmd(cmd, instance);
1200 (void) wait_for_outstanding_poll_io(instance);
1201 (void) mrsas_common_check(instance, cmd);
1202 DTRACE_PROBE2(tbolt_start_nointr_done,
1203 uint8_t, cmd->frame->hdr.cmd,
1204 uint8_t, cmd->frame->hdr.cmd_status);
1205 }
1206
1207 return (TRAN_ACCEPT);
1208 }
1209
1210 /*
1211 * prepare the pkt:
1212 * the pkt may have been resubmitted or just reused so
1213 * initialize some fields and do some checks.
1214 */
1215 static int
mrsas_tbolt_prepare_pkt(struct scsa_cmd * acmd)1216 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1217 {
1218 struct scsi_pkt *pkt = CMD2PKT(acmd);
1219
1220
1221 /*
1222 * Reinitialize some fields that need it; the packet may
1223 * have been resubmitted
1224 */
1225 pkt->pkt_reason = CMD_CMPLT;
1226 pkt->pkt_state = 0;
1227 pkt->pkt_statistics = 0;
1228 pkt->pkt_resid = 0;
1229
1230 /*
1231 * zero status byte.
1232 */
1233 *(pkt->pkt_scbp) = 0;
1234
1235 return (0);
1236 }
1237
1238
1239 int
mr_sas_tbolt_build_sgl(struct mrsas_instance * instance,struct scsa_cmd * acmd,struct mrsas_cmd * cmd,Mpi2RaidSCSIIORequest_t * scsi_raid_io,uint32_t * datalen)1240 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1241 struct scsa_cmd *acmd,
1242 struct mrsas_cmd *cmd,
1243 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1244 uint32_t *datalen)
1245 {
1246 uint32_t MaxSGEs;
1247 int sg_to_process;
1248 uint32_t i, j;
1249 uint32_t numElements, endElement;
1250 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1251 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1252 ddi_acc_handle_t acc_handle =
1253 instance->mpi2_frame_pool_dma_obj.acc_handle;
1254
1255 con_log(CL_ANN1, (CE_NOTE,
1256 "chkpnt: Building Chained SGL :%d", __LINE__));
1257
1258 /* Calulate SGE size in number of Words(32bit) */
1259 /* Clear the datalen before updating it. */
1260 *datalen = 0;
1261
1262 MaxSGEs = instance->max_sge_in_main_msg;
1263
1264 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1265 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1266
1267 /* set data transfer flag. */
1268 if (acmd->cmd_flags & CFLAG_DMASEND) {
1269 ddi_put32(acc_handle, &scsi_raid_io->Control,
1270 MPI2_SCSIIO_CONTROL_WRITE);
1271 } else {
1272 ddi_put32(acc_handle, &scsi_raid_io->Control,
1273 MPI2_SCSIIO_CONTROL_READ);
1274 }
1275
1276
1277 numElements = acmd->cmd_cookiecnt;
1278
1279 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1280
1281 if (numElements > instance->max_num_sge) {
1282 con_log(CL_ANN, (CE_NOTE,
1283 "[Max SGE Count Exceeded]:%x", numElements));
1284 return (numElements);
1285 }
1286
1287 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1288 (uint8_t)numElements);
1289
1290 /* set end element in main message frame */
1291 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1292
1293 /* prepare the scatter-gather list for the firmware */
1294 scsi_raid_io_sgl_ieee =
1295 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1296
1297 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1298 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1299 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1300
1301 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1302 }
1303
1304 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1305 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1306 acmd->cmd_dmacookies[i].dmac_laddress);
1307
1308 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1309 acmd->cmd_dmacookies[i].dmac_size);
1310
1311 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1312
1313 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1314 if (i == (numElements - 1)) {
1315 ddi_put8(acc_handle,
1316 &scsi_raid_io_sgl_ieee->Flags,
1317 IEEE_SGE_FLAGS_END_OF_LIST);
1318 }
1319 }
1320
1321 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1322
1323 #ifdef DEBUG
1324 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1325 scsi_raid_io_sgl_ieee->Address));
1326 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1327 scsi_raid_io_sgl_ieee->Length));
1328 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1329 scsi_raid_io_sgl_ieee->Flags));
1330 #endif
1331
1332 }
1333
1334 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1335
1336 /* check if chained SGL required */
1337 if (i < numElements) {
1338
1339 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1340
1341 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1342 uint16_t ioFlags =
1343 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1344
1345 if ((ioFlags &
1346 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1347 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1348 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1349 (U8)instance->chain_offset_io_req);
1350 } else {
1351 ddi_put8(acc_handle,
1352 &scsi_raid_io->ChainOffset, 0);
1353 }
1354 } else {
1355 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1356 (U8)instance->chain_offset_io_req);
1357 }
1358
1359 /* prepare physical chain element */
1360 ieeeChainElement = scsi_raid_io_sgl_ieee;
1361
1362 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1363
1364 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1365 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1366 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1367 } else {
1368 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1369 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1370 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1371 }
1372
1373 ddi_put32(acc_handle, &ieeeChainElement->Length,
1374 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1375
1376 ddi_put64(acc_handle, &ieeeChainElement->Address,
1377 (U64)cmd->sgl_phys_addr);
1378
1379 sg_to_process = numElements - i;
1380
1381 con_log(CL_ANN1, (CE_NOTE,
1382 "[Additional SGE Count]:%x", endElement));
1383
1384 /* point to the chained SGL buffer */
1385 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1386
1387 /* build rest of the SGL in chained buffer */
1388 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1389 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1390
1391 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1392 acmd->cmd_dmacookies[i].dmac_laddress);
1393
1394 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1395 acmd->cmd_dmacookies[i].dmac_size);
1396
1397 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1398
1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 if (i == (numElements - 1)) {
1401 ddi_put8(acc_handle,
1402 &scsi_raid_io_sgl_ieee->Flags,
1403 IEEE_SGE_FLAGS_END_OF_LIST);
1404 }
1405 }
1406
1407 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1408
1409 #if DEBUG
1410 con_log(CL_DLEVEL1, (CE_NOTE,
1411 "[SGL Address]: %" PRIx64,
1412 scsi_raid_io_sgl_ieee->Address));
1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1415 con_log(CL_DLEVEL1, (CE_NOTE,
1416 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1417 #endif
1418
1419 i++;
1420 }
1421 }
1422
1423 return (0);
1424 } /*end of BuildScatterGather */
1425
1426
1427 /*
1428 * build_cmd
1429 */
1430 static struct mrsas_cmd *
mrsas_tbolt_build_cmd(struct mrsas_instance * instance,struct scsi_address * ap,struct scsi_pkt * pkt,uchar_t * cmd_done)1431 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1432 struct scsi_pkt *pkt, uchar_t *cmd_done)
1433 {
1434 uint8_t fp_possible = 0;
1435 uint32_t index;
1436 uint32_t lba_count = 0;
1437 uint32_t start_lba_hi = 0;
1438 uint32_t start_lba_lo = 0;
1439 ddi_acc_handle_t acc_handle =
1440 instance->mpi2_frame_pool_dma_obj.acc_handle;
1441 struct mrsas_cmd *cmd = NULL;
1442 struct scsa_cmd *acmd = PKT2CMD(pkt);
1443 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1444 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1445 uint32_t datalen;
1446 struct IO_REQUEST_INFO io_info;
1447 MR_FW_RAID_MAP_ALL *local_map_ptr;
1448 uint16_t pd_cmd_cdblen;
1449
1450 con_log(CL_DLEVEL1, (CE_NOTE,
1451 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1452
1453 /* find out if this is logical or physical drive command. */
1454 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1455 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1456
1457 *cmd_done = 0;
1458
1459 /* get the command packet */
1460 if (!(cmd = get_raid_msg_pkt(instance))) {
1461 DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1462 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1463 return (NULL);
1464 }
1465
1466 index = cmd->index;
1467 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1468 ReqDescUnion->Words = 0;
1469 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1470 ReqDescUnion->SCSIIO.RequestFlags =
1471 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1472 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1473
1474
1475 cmd->request_desc = ReqDescUnion;
1476 cmd->pkt = pkt;
1477 cmd->cmd = acmd;
1478
1479 DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1480 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1481 uint16_t, acmd->device_id);
1482
1483 /* lets get the command directions */
1484 if (acmd->cmd_flags & CFLAG_DMASEND) {
1485 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1486 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1487 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1488 DDI_DMA_SYNC_FORDEV);
1489 }
1490 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1491 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1492 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1493 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1494 DDI_DMA_SYNC_FORCPU);
1495 }
1496 } else {
1497 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1498 }
1499
1500
1501 /* get SCSI_IO raid message frame pointer */
1502 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1503
1504 /* zero out SCSI_IO raid message frame */
1505 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1506
1507 /* Set the ldTargetId set by BuildRaidContext() */
1508 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1509 acmd->device_id);
1510
1511 /* Copy CDB to scsi_io_request message frame */
1512 ddi_rep_put8(acc_handle,
1513 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1514 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1515
1516 /*
1517 * Just the CDB length, rest of the Flags are zero
1518 * This will be modified later.
1519 */
1520 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1521
1522 pd_cmd_cdblen = acmd->cmd_cdblen;
1523
1524 if (acmd->islogical) {
1525
1526 switch (pkt->pkt_cdbp[0]) {
1527 case SCMD_READ:
1528 case SCMD_WRITE:
1529 case SCMD_READ_G1:
1530 case SCMD_WRITE_G1:
1531 case SCMD_READ_G4:
1532 case SCMD_WRITE_G4:
1533 case SCMD_READ_G5:
1534 case SCMD_WRITE_G5:
1535
1536 /* Initialize sense Information */
1537 if (cmd->sense1 == NULL) {
1538 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1539 "Sense buffer ptr NULL "));
1540 }
1541 bzero(cmd->sense1, SENSE_LENGTH);
1542 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1543 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1544
1545 if (acmd->cmd_cdblen == CDB_GROUP0) {
1546 /* 6-byte cdb */
1547 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1548 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1549 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1550 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1551 << 16));
1552 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1553 /* 10-byte cdb */
1554 lba_count =
1555 (((uint16_t)(pkt->pkt_cdbp[8])) |
1556 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557
1558 start_lba_lo =
1559 (((uint32_t)(pkt->pkt_cdbp[5])) |
1560 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563
1564 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1565 /* 12-byte cdb */
1566 lba_count = (
1567 ((uint32_t)(pkt->pkt_cdbp[9])) |
1568 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1569 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1570 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1571
1572 start_lba_lo =
1573 (((uint32_t)(pkt->pkt_cdbp[5])) |
1574 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1575 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1576 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1577
1578 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1579 /* 16-byte cdb */
1580 lba_count = (
1581 ((uint32_t)(pkt->pkt_cdbp[13])) |
1582 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1583 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1584 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1585
1586 start_lba_lo = (
1587 ((uint32_t)(pkt->pkt_cdbp[9])) |
1588 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1589 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1590 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1591
1592 start_lba_hi = (
1593 ((uint32_t)(pkt->pkt_cdbp[5])) |
1594 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1595 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1596 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1597 }
1598
1599 if (instance->tbolt &&
1600 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1601 dev_err(instance->dip, CE_WARN,
1602 "IO SECTOR COUNT exceeds "
1603 "controller limit 0x%x sectors",
1604 lba_count);
1605 }
1606
1607 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1608 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1609 start_lba_lo;
1610 io_info.numBlocks = lba_count;
1611 io_info.ldTgtId = acmd->device_id;
1612
1613 if (acmd->cmd_flags & CFLAG_DMASEND)
1614 io_info.isRead = 0;
1615 else
1616 io_info.isRead = 1;
1617
1618
1619 /* Acquire SYNC MAP UPDATE lock */
1620 mutex_enter(&instance->sync_map_mtx);
1621
1622 local_map_ptr =
1623 instance->ld_map[(instance->map_id & 1)];
1624
1625 if ((MR_TargetIdToLdGet(
1626 acmd->device_id, local_map_ptr) >=
1627 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1628 dev_err(instance->dip, CE_NOTE,
1629 "Fast Path NOT Possible, "
1630 "targetId >= MAX_LOGICAL_DRIVES || "
1631 "!instance->fast_path_io");
1632 fp_possible = 0;
1633 /* Set Regionlock flags to BYPASS */
1634 /* io_request->RaidContext.regLockFlags = 0; */
1635 ddi_put8(acc_handle,
1636 &scsi_raid_io->RaidContext.regLockFlags, 0);
1637 } else {
1638 if (MR_BuildRaidContext(instance, &io_info,
1639 &scsi_raid_io->RaidContext, local_map_ptr))
1640 fp_possible = io_info.fpOkForIo;
1641 }
1642
1643 if (!enable_fp)
1644 fp_possible = 0;
1645
1646 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1647 "instance->fast_path_io %d fp_possible %d",
1648 enable_fp, instance->fast_path_io, fp_possible));
1649
1650 if (fp_possible) {
1651
1652 /* Check for DIF enabled LD */
1653 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1654 /* Prepare 32 Byte CDB for DIF capable Disk */
1655 mrsas_tbolt_prepare_cdb(instance,
1656 scsi_raid_io->CDB.CDB32,
1657 &io_info, scsi_raid_io, start_lba_lo);
1658 } else {
1659 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1660 (uint8_t *)&pd_cmd_cdblen,
1661 io_info.pdBlock, io_info.numBlocks);
1662 ddi_put16(acc_handle,
1663 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1664 }
1665
1666 ddi_put8(acc_handle, &scsi_raid_io->Function,
1667 MPI2_FUNCTION_SCSI_IO_REQUEST);
1668
1669 ReqDescUnion->SCSIIO.RequestFlags =
1670 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1671 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1672
1673 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1674 uint8_t regLockFlags = ddi_get8(acc_handle,
1675 &scsi_raid_io->RaidContext.regLockFlags);
1676 uint16_t IoFlags = ddi_get16(acc_handle,
1677 &scsi_raid_io->IoFlags);
1678
1679 if (regLockFlags == REGION_TYPE_UNUSED)
1680 ReqDescUnion->SCSIIO.RequestFlags =
1681 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1682 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1683
1684 IoFlags |=
1685 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1686 regLockFlags |=
1687 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1688 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1689
1690 ddi_put8(acc_handle,
1691 &scsi_raid_io->ChainOffset, 0);
1692 ddi_put8(acc_handle,
1693 &scsi_raid_io->RaidContext.nsegType,
1694 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1695 MPI2_TYPE_CUDA));
1696 ddi_put8(acc_handle,
1697 &scsi_raid_io->RaidContext.regLockFlags,
1698 regLockFlags);
1699 ddi_put16(acc_handle,
1700 &scsi_raid_io->IoFlags, IoFlags);
1701 }
1702
1703 if ((instance->load_balance_info[
1704 acmd->device_id].loadBalanceFlag) &&
1705 (io_info.isRead)) {
1706 io_info.devHandle =
1707 get_updated_dev_handle(&instance->
1708 load_balance_info[acmd->device_id],
1709 &io_info);
1710 cmd->load_balance_flag |=
1711 MEGASAS_LOAD_BALANCE_FLAG;
1712 } else {
1713 cmd->load_balance_flag &=
1714 ~MEGASAS_LOAD_BALANCE_FLAG;
1715 }
1716
1717 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1718 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1719 io_info.devHandle);
1720
1721 } else { /* FP Not Possible */
1722
1723 ddi_put8(acc_handle, &scsi_raid_io->Function,
1724 MPI2_FUNCTION_LD_IO_REQUEST);
1725
1726 ddi_put16(acc_handle,
1727 &scsi_raid_io->DevHandle, acmd->device_id);
1728
1729 ReqDescUnion->SCSIIO.RequestFlags =
1730 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1731 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1732
1733 ddi_put16(acc_handle,
1734 &scsi_raid_io->RaidContext.timeoutValue,
1735 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1736
1737 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1738 uint8_t regLockFlags = ddi_get8(acc_handle,
1739 &scsi_raid_io->RaidContext.regLockFlags);
1740
1741 if (regLockFlags == REGION_TYPE_UNUSED) {
1742 ReqDescUnion->SCSIIO.RequestFlags =
1743 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1744 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1745 }
1746
1747 regLockFlags |=
1748 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1749 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1750
1751 ddi_put8(acc_handle,
1752 &scsi_raid_io->RaidContext.nsegType,
1753 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1754 MPI2_TYPE_CUDA));
1755 ddi_put8(acc_handle,
1756 &scsi_raid_io->RaidContext.regLockFlags,
1757 regLockFlags);
1758 }
1759 } /* Not FP */
1760
1761 /* Release SYNC MAP UPDATE lock */
1762 mutex_exit(&instance->sync_map_mtx);
1763
1764 break;
1765
1766 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1767 return_raid_msg_pkt(instance, cmd);
1768 *cmd_done = 1;
1769 return (NULL);
1770 }
1771
1772 case SCMD_MODE_SENSE:
1773 case SCMD_MODE_SENSE_G1: {
1774 union scsi_cdb *cdbp;
1775 uint16_t page_code;
1776
1777 cdbp = (void *)pkt->pkt_cdbp;
1778 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1779 switch (page_code) {
1780 case 0x3:
1781 case 0x4:
1782 (void) mrsas_mode_sense_build(pkt);
1783 return_raid_msg_pkt(instance, cmd);
1784 *cmd_done = 1;
1785 return (NULL);
1786 }
1787 return (cmd);
1788 }
1789
1790 default:
1791 /* Pass-through command to logical drive */
1792 ddi_put8(acc_handle, &scsi_raid_io->Function,
1793 MPI2_FUNCTION_LD_IO_REQUEST);
1794 ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1795 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1796 acmd->device_id);
1797 ReqDescUnion->SCSIIO.RequestFlags =
1798 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1799 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1800 break;
1801 }
1802 } else { /* Physical */
1803 #ifdef PDSUPPORT
1804 /* Pass-through command to physical drive */
1805
1806 /* Acquire SYNC MAP UPDATE lock */
1807 mutex_enter(&instance->sync_map_mtx);
1808
1809 local_map_ptr = instance->ld_map[instance->map_id & 1];
1810
1811 ddi_put8(acc_handle, &scsi_raid_io->Function,
1812 MPI2_FUNCTION_SCSI_IO_REQUEST);
1813
1814 ReqDescUnion->SCSIIO.RequestFlags =
1815 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1816 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1817
1818 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1819 local_map_ptr->raidMap.
1820 devHndlInfo[acmd->device_id].curDevHdl);
1821
1822 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1823 ddi_put8(acc_handle,
1824 &scsi_raid_io->RaidContext.regLockFlags, 0);
1825 ddi_put64(acc_handle,
1826 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1827 ddi_put32(acc_handle,
1828 &scsi_raid_io->RaidContext.regLockLength, 0);
1829 ddi_put8(acc_handle,
1830 &scsi_raid_io->RaidContext.RAIDFlags,
1831 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1832 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1833 ddi_put16(acc_handle,
1834 &scsi_raid_io->RaidContext.timeoutValue,
1835 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1836 ddi_put16(acc_handle,
1837 &scsi_raid_io->RaidContext.ldTargetId,
1838 acmd->device_id);
1839 ddi_put8(acc_handle,
1840 &scsi_raid_io->LUN[1], acmd->lun);
1841
1842 if (instance->fast_path_io &&
1843 instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1844 uint16_t IoFlags = ddi_get16(acc_handle,
1845 &scsi_raid_io->IoFlags);
1846 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1847 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1848 }
1849 ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1850 local_map_ptr->raidMap.
1851 devHndlInfo[acmd->device_id].curDevHdl);
1852
1853 /* Release SYNC MAP UPDATE lock */
1854 mutex_exit(&instance->sync_map_mtx);
1855 #else
1856 /* If no PD support, return here. */
1857 return (cmd);
1858 #endif
1859 }
1860
1861 /* Set sense buffer physical address/length in scsi_io_request. */
1862 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1863 cmd->sense_phys_addr1);
1864 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1865
1866 /* Construct SGL */
1867 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1868 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1869
1870 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1871 scsi_raid_io, &datalen);
1872
1873 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1874
1875 con_log(CL_ANN, (CE_CONT,
1876 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1877 pkt->pkt_cdbp[0], acmd->device_id));
1878 con_log(CL_DLEVEL1, (CE_CONT,
1879 "data length = %x\n",
1880 scsi_raid_io->DataLength));
1881 con_log(CL_DLEVEL1, (CE_CONT,
1882 "cdb length = %x\n",
1883 acmd->cmd_cdblen));
1884
1885 return (cmd);
1886 }
1887
1888 uint32_t
tbolt_read_fw_status_reg(struct mrsas_instance * instance)1889 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1890 {
1891 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1892 }
1893
1894 void
tbolt_issue_cmd(struct mrsas_cmd * cmd,struct mrsas_instance * instance)1895 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1896 {
1897 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1898 atomic_inc_16(&instance->fw_outstanding);
1899
1900 struct scsi_pkt *pkt;
1901
1902 con_log(CL_ANN1,
1903 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1904
1905 con_log(CL_DLEVEL1, (CE_CONT,
1906 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1907 con_log(CL_DLEVEL1, (CE_CONT,
1908 " [req desc low part] %x \n",
1909 (uint_t)(req_desc->Words & 0xffffffffff)));
1910 con_log(CL_DLEVEL1, (CE_CONT,
1911 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1912 pkt = cmd->pkt;
1913
1914 if (pkt) {
1915 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1916 "ISSUED CMD TO FW : called : cmd:"
1917 ": %p instance : %p pkt : %p pkt_time : %x\n",
1918 gethrtime(), (void *)cmd, (void *)instance,
1919 (void *)pkt, cmd->drv_pkt_time));
1920 if (instance->adapterresetinprogress) {
1921 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1922 con_log(CL_ANN, (CE_NOTE,
1923 "TBOLT Reset the scsi_pkt timer"));
1924 } else {
1925 push_pending_mfi_pkt(instance, cmd);
1926 }
1927
1928 } else {
1929 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1930 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1931 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1932 }
1933
1934 /* Issue the command to the FW */
1935 mutex_enter(&instance->reg_write_mtx);
1936 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1937 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1938 mutex_exit(&instance->reg_write_mtx);
1939 }
1940
1941 /*
1942 * issue_cmd_in_sync_mode
1943 */
1944 int
tbolt_issue_cmd_in_sync_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)1945 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1946 struct mrsas_cmd *cmd)
1947 {
1948 int i;
1949 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1950 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1951
1952 struct mrsas_header *hdr;
1953 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1954
1955 con_log(CL_ANN,
1956 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1957 cmd->SMID));
1958
1959
1960 if (instance->adapterresetinprogress) {
1961 cmd->drv_pkt_time = ddi_get16
1962 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1963 if (cmd->drv_pkt_time < debug_timeout_g)
1964 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1965 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1966 "RESET-IN-PROGRESS, issue cmd & return."));
1967
1968 mutex_enter(&instance->reg_write_mtx);
1969 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1970 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1971 mutex_exit(&instance->reg_write_mtx);
1972
1973 return (DDI_SUCCESS);
1974 } else {
1975 con_log(CL_ANN1, (CE_NOTE,
1976 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1977 push_pending_mfi_pkt(instance, cmd);
1978 }
1979
1980 con_log(CL_DLEVEL2, (CE_NOTE,
1981 "HighQport offset :%p",
1982 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1983 con_log(CL_DLEVEL2, (CE_NOTE,
1984 "LowQport offset :%p",
1985 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1986
1987 cmd->sync_cmd = MRSAS_TRUE;
1988 cmd->cmd_status = ENODATA;
1989
1990
1991 mutex_enter(&instance->reg_write_mtx);
1992 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1993 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1994 mutex_exit(&instance->reg_write_mtx);
1995
1996 con_log(CL_ANN1, (CE_NOTE,
1997 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1998 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1999 (uint_t)(req_desc->Words & 0xffffffff)));
2000
2001 mutex_enter(&instance->int_cmd_mtx);
2002 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2003 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2004 }
2005 mutex_exit(&instance->int_cmd_mtx);
2006
2007
2008 if (i < (msecs -1)) {
2009 return (DDI_SUCCESS);
2010 } else {
2011 return (DDI_FAILURE);
2012 }
2013 }
2014
2015 /*
2016 * issue_cmd_in_poll_mode
2017 */
2018 int
tbolt_issue_cmd_in_poll_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2019 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2020 struct mrsas_cmd *cmd)
2021 {
2022 int i;
2023 uint16_t flags;
2024 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2025 struct mrsas_header *frame_hdr;
2026
2027 con_log(CL_ANN,
2028 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2029 cmd->SMID));
2030
2031 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2032
2033 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2034 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2035 MFI_CMD_STATUS_POLL_MODE);
2036 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2037 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2038 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2039
2040 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2041 (uint_t)(req_desc->Words & 0xffffffff)));
2042 con_log(CL_ANN1, (CE_NOTE,
2043 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2044
2045 /* issue the frame using inbound queue port */
2046 mutex_enter(&instance->reg_write_mtx);
2047 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2048 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2049 mutex_exit(&instance->reg_write_mtx);
2050
2051 for (i = 0; i < msecs && (
2052 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2053 == MFI_CMD_STATUS_POLL_MODE); i++) {
2054 /* wait for cmd_status to change from 0xFF */
2055 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2056 }
2057
2058 DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2059
2060 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2061 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2062 con_log(CL_ANN1, (CE_NOTE,
2063 " cmd failed %" PRIx64, (req_desc->Words)));
2064 return (DDI_FAILURE);
2065 }
2066
2067 return (DDI_SUCCESS);
2068 }
2069
2070 void
tbolt_enable_intr(struct mrsas_instance * instance)2071 tbolt_enable_intr(struct mrsas_instance *instance)
2072 {
2073 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2074 /* writel(~0, ®s->outbound_intr_status); */
2075 /* readl(®s->outbound_intr_status); */
2076
2077 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2078
2079 /* dummy read to force PCI flush */
2080 (void) RD_OB_INTR_MASK(instance);
2081
2082 }
2083
2084 void
tbolt_disable_intr(struct mrsas_instance * instance)2085 tbolt_disable_intr(struct mrsas_instance *instance)
2086 {
2087 uint32_t mask = 0xFFFFFFFF;
2088
2089 WR_OB_INTR_MASK(mask, instance);
2090
2091 /* Dummy readl to force pci flush */
2092
2093 (void) RD_OB_INTR_MASK(instance);
2094 }
2095
2096
2097 int
tbolt_intr_ack(struct mrsas_instance * instance)2098 tbolt_intr_ack(struct mrsas_instance *instance)
2099 {
2100 uint32_t status;
2101
2102 /* check if it is our interrupt */
2103 status = RD_OB_INTR_STATUS(instance);
2104 con_log(CL_ANN1, (CE_NOTE,
2105 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2106
2107 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2108 return (DDI_INTR_UNCLAIMED);
2109 }
2110
2111 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2112 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2113 return (DDI_INTR_UNCLAIMED);
2114 }
2115
2116 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2117 /* clear the interrupt by writing back the same value */
2118 WR_OB_INTR_STATUS(status, instance);
2119 /* dummy READ */
2120 (void) RD_OB_INTR_STATUS(instance);
2121 }
2122 return (DDI_INTR_CLAIMED);
2123 }
2124
2125 /*
2126 * get_raid_msg_pkt : Get a command from the free pool
2127 * After successful allocation, the caller of this routine
2128 * must clear the frame buffer (memset to zero) before
2129 * using the packet further.
2130 *
2131 * ***** Note *****
2132 * After clearing the frame buffer the context id of the
2133 * frame buffer SHOULD be restored back.
2134 */
2135
2136 struct mrsas_cmd *
get_raid_msg_pkt(struct mrsas_instance * instance)2137 get_raid_msg_pkt(struct mrsas_instance *instance)
2138 {
2139 mlist_t *head = &instance->cmd_pool_list;
2140 struct mrsas_cmd *cmd = NULL;
2141
2142 mutex_enter(&instance->cmd_pool_mtx);
2143 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2144
2145
2146 if (!mlist_empty(head)) {
2147 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2148 mlist_del_init(head->next);
2149 }
2150 if (cmd != NULL) {
2151 cmd->pkt = NULL;
2152 cmd->retry_count_for_ocr = 0;
2153 cmd->drv_pkt_time = 0;
2154 }
2155 mutex_exit(&instance->cmd_pool_mtx);
2156
2157 if (cmd != NULL)
2158 bzero(cmd->scsi_io_request,
2159 sizeof (Mpi2RaidSCSIIORequest_t));
2160 return (cmd);
2161 }
2162
2163 struct mrsas_cmd *
get_raid_msg_mfi_pkt(struct mrsas_instance * instance)2164 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2165 {
2166 mlist_t *head = &instance->cmd_app_pool_list;
2167 struct mrsas_cmd *cmd = NULL;
2168
2169 mutex_enter(&instance->cmd_app_pool_mtx);
2170 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2171
2172 if (!mlist_empty(head)) {
2173 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2174 mlist_del_init(head->next);
2175 }
2176 if (cmd != NULL) {
2177 cmd->retry_count_for_ocr = 0;
2178 cmd->drv_pkt_time = 0;
2179 cmd->pkt = NULL;
2180 cmd->request_desc = NULL;
2181
2182 }
2183
2184 mutex_exit(&instance->cmd_app_pool_mtx);
2185
2186 if (cmd != NULL) {
2187 bzero(cmd->scsi_io_request,
2188 sizeof (Mpi2RaidSCSIIORequest_t));
2189 }
2190
2191 return (cmd);
2192 }
2193
2194 /*
2195 * return_raid_msg_pkt : Return a cmd to free command pool
2196 */
2197 void
return_raid_msg_pkt(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2198 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2199 {
2200 mutex_enter(&instance->cmd_pool_mtx);
2201 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2202
2203
2204 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2205
2206 mutex_exit(&instance->cmd_pool_mtx);
2207 }
2208
2209 void
return_raid_msg_mfi_pkt(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2210 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2211 {
2212 mutex_enter(&instance->cmd_app_pool_mtx);
2213 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2214
2215 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2216
2217 mutex_exit(&instance->cmd_app_pool_mtx);
2218 }
2219
2220
2221 void
mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2222 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2223 struct mrsas_cmd *cmd)
2224 {
2225 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2226 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2227 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2228 uint32_t index;
2229 ddi_acc_handle_t acc_handle =
2230 instance->mpi2_frame_pool_dma_obj.acc_handle;
2231
2232 if (!instance->tbolt) {
2233 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2234 return;
2235 }
2236
2237 index = cmd->index;
2238
2239 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2240
2241 if (!ReqDescUnion) {
2242 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2243 return;
2244 }
2245
2246 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2247
2248 ReqDescUnion->Words = 0;
2249
2250 ReqDescUnion->SCSIIO.RequestFlags =
2251 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2252 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2253
2254 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2255
2256 cmd->request_desc = ReqDescUnion;
2257
2258 /* get raid message frame pointer */
2259 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2260
2261 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2262 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2263 &scsi_raid_io->SGL.IeeeChain;
2264 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2265 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2266 }
2267
2268 ddi_put8(acc_handle, &scsi_raid_io->Function,
2269 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2270
2271 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2272 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2273
2274 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2275 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2276
2277 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2278 cmd->sense_phys_addr1);
2279
2280
2281 scsi_raid_io_sgl_ieee =
2282 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2283
2284 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2285 (U64)cmd->frame_phys_addr);
2286
2287 ddi_put8(acc_handle,
2288 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2289 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2290 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2291 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2292
2293 con_log(CL_ANN1, (CE_NOTE,
2294 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2295 scsi_raid_io_sgl_ieee->Address));
2296 con_log(CL_ANN1, (CE_NOTE,
2297 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2298 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2299 scsi_raid_io_sgl_ieee->Flags));
2300 }
2301
2302
2303 void
tbolt_complete_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2304 tbolt_complete_cmd(struct mrsas_instance *instance,
2305 struct mrsas_cmd *cmd)
2306 {
2307 uint8_t status;
2308 uint8_t extStatus;
2309 uint8_t function;
2310 uint8_t arm;
2311 struct scsa_cmd *acmd;
2312 struct scsi_pkt *pkt;
2313 struct scsi_arq_status *arqstat;
2314 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2315 LD_LOAD_BALANCE_INFO *lbinfo;
2316 ddi_acc_handle_t acc_handle =
2317 instance->mpi2_frame_pool_dma_obj.acc_handle;
2318
2319 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2320
2321 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2322 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2323
2324 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2325 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2326
2327 if (status != MFI_STAT_OK) {
2328 con_log(CL_ANN, (CE_WARN,
2329 "IO Cmd Failed SMID %x", cmd->SMID));
2330 } else {
2331 con_log(CL_ANN, (CE_NOTE,
2332 "IO Cmd Success SMID %x", cmd->SMID));
2333 }
2334
2335 /* regular commands */
2336
2337 function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2338 DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2339 uint8_t, status, uint8_t, extStatus);
2340
2341 switch (function) {
2342
2343 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2344 acmd = (struct scsa_cmd *)cmd->cmd;
2345 lbinfo = &instance->load_balance_info[acmd->device_id];
2346
2347 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2348 arm = lbinfo->raid1DevHandle[0] ==
2349 scsi_raid_io->DevHandle ? 0 : 1;
2350
2351 lbinfo->scsi_pending_cmds[arm]--;
2352 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2353 }
2354 con_log(CL_DLEVEL3, (CE_NOTE,
2355 "FastPath IO Completion Success "));
2356 /* FALLTHRU */
2357
2358 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2359 acmd = (struct scsa_cmd *)cmd->cmd;
2360 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2361
2362 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2363 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2364 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2365 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2366 DDI_DMA_SYNC_FORCPU);
2367 }
2368 }
2369
2370 pkt->pkt_reason = CMD_CMPLT;
2371 pkt->pkt_statistics = 0;
2372 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2373 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2374
2375 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2376 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2377 ((acmd->islogical) ? "LD" : "PD"),
2378 acmd->cmd_dmacount, cmd->SMID, status));
2379
2380 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2381 struct scsi_inquiry *inq;
2382
2383 if (acmd->cmd_dmacount != 0) {
2384 bp_mapin(acmd->cmd_buf);
2385 inq = (struct scsi_inquiry *)
2386 acmd->cmd_buf->b_un.b_addr;
2387
2388 /* don't expose physical drives to OS */
2389 if (acmd->islogical &&
2390 (status == MFI_STAT_OK)) {
2391 display_scsi_inquiry((caddr_t)inq);
2392 #ifdef PDSUPPORT
2393 } else if ((status == MFI_STAT_OK) &&
2394 inq->inq_dtype == DTYPE_DIRECT) {
2395 display_scsi_inquiry((caddr_t)inq);
2396 #endif
2397 } else {
2398 /* for physical disk */
2399 status = MFI_STAT_DEVICE_NOT_FOUND;
2400 }
2401 }
2402 }
2403
2404 switch (status) {
2405 case MFI_STAT_OK:
2406 pkt->pkt_scbp[0] = STATUS_GOOD;
2407 break;
2408 case MFI_STAT_LD_CC_IN_PROGRESS:
2409 case MFI_STAT_LD_RECON_IN_PROGRESS:
2410 pkt->pkt_scbp[0] = STATUS_GOOD;
2411 break;
2412 case MFI_STAT_LD_INIT_IN_PROGRESS:
2413 pkt->pkt_reason = CMD_TRAN_ERR;
2414 break;
2415 case MFI_STAT_SCSI_IO_FAILED:
2416 dev_err(instance->dip, CE_WARN,
2417 "tbolt_complete_cmd: scsi_io failed");
2418 pkt->pkt_reason = CMD_TRAN_ERR;
2419 break;
2420 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2421 con_log(CL_ANN, (CE_WARN,
2422 "tbolt_complete_cmd: scsi_done with error"));
2423
2424 pkt->pkt_reason = CMD_CMPLT;
2425 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2426
2427 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2428 con_log(CL_ANN,
2429 (CE_WARN, "TEST_UNIT_READY fail"));
2430 } else {
2431 pkt->pkt_state |= STATE_ARQ_DONE;
2432 arqstat = (void *)(pkt->pkt_scbp);
2433 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2434 arqstat->sts_rqpkt_resid = 0;
2435 arqstat->sts_rqpkt_state |=
2436 STATE_GOT_BUS | STATE_GOT_TARGET
2437 | STATE_SENT_CMD
2438 | STATE_XFERRED_DATA;
2439 *(uint8_t *)&arqstat->sts_rqpkt_status =
2440 STATUS_GOOD;
2441 con_log(CL_ANN1,
2442 (CE_NOTE, "Copying Sense data %x",
2443 cmd->SMID));
2444
2445 ddi_rep_get8(acc_handle,
2446 (uint8_t *)&(arqstat->sts_sensedata),
2447 cmd->sense1,
2448 sizeof (struct scsi_extended_sense),
2449 DDI_DEV_AUTOINCR);
2450
2451 }
2452 break;
2453 case MFI_STAT_LD_OFFLINE:
2454 dev_err(instance->dip, CE_WARN,
2455 "tbolt_complete_cmd: ld offline "
2456 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2457 /* UNDO: */
2458 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2459
2460 ddi_get16(acc_handle,
2461 &scsi_raid_io->RaidContext.ldTargetId),
2462
2463 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2464
2465 pkt->pkt_reason = CMD_DEV_GONE;
2466 pkt->pkt_statistics = STAT_DISCON;
2467 break;
2468 case MFI_STAT_DEVICE_NOT_FOUND:
2469 con_log(CL_ANN, (CE_CONT,
2470 "tbolt_complete_cmd: device not found error"));
2471 pkt->pkt_reason = CMD_DEV_GONE;
2472 pkt->pkt_statistics = STAT_DISCON;
2473 break;
2474
2475 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2476 pkt->pkt_state |= STATE_ARQ_DONE;
2477 pkt->pkt_reason = CMD_CMPLT;
2478 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2479
2480 arqstat = (void *)(pkt->pkt_scbp);
2481 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2482 arqstat->sts_rqpkt_resid = 0;
2483 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2484 | STATE_GOT_TARGET | STATE_SENT_CMD
2485 | STATE_XFERRED_DATA;
2486 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2487
2488 arqstat->sts_sensedata.es_valid = 1;
2489 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2490 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2491
2492 /*
2493 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2494 * ASC: 0x21h; ASCQ: 0x00h;
2495 */
2496 arqstat->sts_sensedata.es_add_code = 0x21;
2497 arqstat->sts_sensedata.es_qual_code = 0x00;
2498 break;
2499 case MFI_STAT_INVALID_CMD:
2500 case MFI_STAT_INVALID_DCMD:
2501 case MFI_STAT_INVALID_PARAMETER:
2502 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2503 default:
2504 dev_err(instance->dip, CE_WARN,
2505 "tbolt_complete_cmd: Unknown status!");
2506 pkt->pkt_reason = CMD_TRAN_ERR;
2507
2508 break;
2509 }
2510
2511 atomic_add_16(&instance->fw_outstanding, (-1));
2512
2513 (void) mrsas_common_check(instance, cmd);
2514 if (acmd->cmd_dmahandle) {
2515 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2516 DDI_SUCCESS) {
2517 ddi_fm_service_impact(instance->dip,
2518 DDI_SERVICE_UNAFFECTED);
2519 pkt->pkt_reason = CMD_TRAN_ERR;
2520 pkt->pkt_statistics = 0;
2521 }
2522 }
2523
2524 /* Call the callback routine */
2525 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2526 (*pkt->pkt_comp)(pkt);
2527
2528 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2529
2530 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2531
2532 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2533
2534 return_raid_msg_pkt(instance, cmd);
2535 break;
2536 }
2537 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2538
2539 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2540 cmd->frame->dcmd.mbox.b[1] == 1) {
2541
2542 mutex_enter(&instance->sync_map_mtx);
2543
2544 con_log(CL_ANN, (CE_NOTE,
2545 "LDMAP sync command SMID RECEIVED 0x%X",
2546 cmd->SMID));
2547 if (cmd->frame->hdr.cmd_status != 0) {
2548 dev_err(instance->dip, CE_WARN,
2549 "map sync failed, status = 0x%x.",
2550 cmd->frame->hdr.cmd_status);
2551 } else {
2552 instance->map_id++;
2553 con_log(CL_ANN1, (CE_NOTE,
2554 "map sync received, switched map_id to %"
2555 PRIu64, instance->map_id));
2556 }
2557
2558 if (MR_ValidateMapInfo(
2559 instance->ld_map[instance->map_id & 1],
2560 instance->load_balance_info)) {
2561 instance->fast_path_io = 1;
2562 } else {
2563 instance->fast_path_io = 0;
2564 }
2565
2566 con_log(CL_ANN, (CE_NOTE,
2567 "instance->fast_path_io %d",
2568 instance->fast_path_io));
2569
2570 instance->unroll.syncCmd = 0;
2571
2572 if (instance->map_update_cmd == cmd) {
2573 return_raid_msg_pkt(instance, cmd);
2574 atomic_add_16(&instance->fw_outstanding, (-1));
2575 (void) mrsas_tbolt_sync_map_info(instance);
2576 }
2577
2578 con_log(CL_ANN1, (CE_NOTE,
2579 "LDMAP sync completed, ldcount=%d",
2580 instance->ld_map[instance->map_id & 1]
2581 ->raidMap.ldCount));
2582 mutex_exit(&instance->sync_map_mtx);
2583 break;
2584 }
2585
2586 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2587 con_log(CL_ANN1, (CE_CONT,
2588 "AEN command SMID RECEIVED 0x%X",
2589 cmd->SMID));
2590 if ((instance->aen_cmd == cmd) &&
2591 (instance->aen_cmd->abort_aen)) {
2592 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2593 "aborted_aen returned"));
2594 } else {
2595 atomic_add_16(&instance->fw_outstanding, (-1));
2596 service_mfi_aen(instance, cmd);
2597 }
2598 }
2599
2600 if (cmd->sync_cmd == MRSAS_TRUE) {
2601 con_log(CL_ANN1, (CE_CONT,
2602 "Sync-mode Command Response SMID RECEIVED 0x%X",
2603 cmd->SMID));
2604
2605 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2606 } else {
2607 con_log(CL_ANN, (CE_CONT,
2608 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2609 cmd->SMID));
2610 }
2611 break;
2612 default:
2613 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2614 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2615
2616 /* free message */
2617 con_log(CL_ANN,
2618 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2619 break;
2620 }
2621 }
2622
2623 uint_t
mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance * instance)2624 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2625 {
2626 uint8_t replyType;
2627 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2628 Mpi2ReplyDescriptorsUnion_t *desc;
2629 uint16_t smid;
2630 union desc_value d_val;
2631 struct mrsas_cmd *cmd;
2632
2633 struct mrsas_header *hdr;
2634 struct scsi_pkt *pkt;
2635
2636 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2637 0, 0, DDI_DMA_SYNC_FORDEV);
2638
2639 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2640 0, 0, DDI_DMA_SYNC_FORCPU);
2641
2642 desc = instance->reply_frame_pool;
2643 desc += instance->reply_read_index;
2644
2645 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2646 replyType = replyDesc->ReplyFlags &
2647 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2648
2649 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2650 return (DDI_INTR_UNCLAIMED);
2651
2652 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2653 != DDI_SUCCESS) {
2654 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2655 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2656 con_log(CL_ANN1,
2657 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2658 "FMA check, returning DDI_INTR_UNCLAIMED"));
2659 return (DDI_INTR_CLAIMED);
2660 }
2661
2662 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2663 (void *)desc, desc->Words));
2664
2665 d_val.word = desc->Words;
2666
2667
2668 /* Read Reply descriptor */
2669 while ((d_val.u1.low != 0xffffffff) &&
2670 (d_val.u1.high != 0xffffffff)) {
2671
2672 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2673 0, 0, DDI_DMA_SYNC_FORCPU);
2674
2675 smid = replyDesc->SMID;
2676
2677 if (!smid || smid > instance->max_fw_cmds + 1) {
2678 con_log(CL_ANN1, (CE_NOTE,
2679 "Reply Desc at Break = %p Words = %" PRIx64,
2680 (void *)desc, desc->Words));
2681 break;
2682 }
2683
2684 cmd = instance->cmd_list[smid - 1];
2685 if (!cmd) {
2686 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2687 "outstanding_cmd: Invalid command "
2688 " or Poll commad Received in completion path"));
2689 } else {
2690 mutex_enter(&instance->cmd_pend_mtx);
2691 if (cmd->sync_cmd == MRSAS_TRUE) {
2692 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2693 if (hdr) {
2694 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2695 "tbolt_process_outstanding_cmd:"
2696 " mlist_del_init(&cmd->list)."));
2697 mlist_del_init(&cmd->list);
2698 }
2699 } else {
2700 pkt = cmd->pkt;
2701 if (pkt) {
2702 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2703 "tbolt_process_outstanding_cmd:"
2704 "mlist_del_init(&cmd->list)."));
2705 mlist_del_init(&cmd->list);
2706 }
2707 }
2708
2709 mutex_exit(&instance->cmd_pend_mtx);
2710
2711 tbolt_complete_cmd(instance, cmd);
2712 }
2713 /* set it back to all 1s. */
2714 desc->Words = -1LL;
2715
2716 instance->reply_read_index++;
2717
2718 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2719 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2720 instance->reply_read_index = 0;
2721 }
2722
2723 /* Get the next reply descriptor */
2724 if (!instance->reply_read_index)
2725 desc = instance->reply_frame_pool;
2726 else
2727 desc++;
2728
2729 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2730
2731 d_val.word = desc->Words;
2732
2733 con_log(CL_ANN1, (CE_NOTE,
2734 "Next Reply Desc = %p Words = %" PRIx64,
2735 (void *)desc, desc->Words));
2736
2737 replyType = replyDesc->ReplyFlags &
2738 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2739
2740 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2741 break;
2742
2743 } /* End of while loop. */
2744
2745 /* update replyIndex to FW */
2746 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2747
2748
2749 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2750 0, 0, DDI_DMA_SYNC_FORDEV);
2751
2752 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2753 0, 0, DDI_DMA_SYNC_FORCPU);
2754 return (DDI_INTR_CLAIMED);
2755 }
2756
2757
2758
2759
2760 /*
2761 * complete_cmd_in_sync_mode - Completes an internal command
2762 * @instance: Adapter soft state
2763 * @cmd: Command to be completed
2764 *
2765 * The issue_cmd_in_sync_mode() function waits for a command to complete
2766 * after it issues a command. This function wakes up that waiting routine by
2767 * calling wake_up() on the wait queue.
2768 */
2769 void
tbolt_complete_cmd_in_sync_mode(struct mrsas_instance * instance,struct mrsas_cmd * cmd)2770 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2771 struct mrsas_cmd *cmd)
2772 {
2773
2774 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2775 &cmd->frame->io.cmd_status);
2776
2777 cmd->sync_cmd = MRSAS_FALSE;
2778
2779 mutex_enter(&instance->int_cmd_mtx);
2780 if (cmd->cmd_status == ENODATA) {
2781 cmd->cmd_status = 0;
2782 }
2783 cv_broadcast(&instance->int_cmd_cv);
2784 mutex_exit(&instance->int_cmd_mtx);
2785
2786 }
2787
2788 /*
2789 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2790 * instance: Adapter soft state
2791 *
2792 * Issues an internal command (DCMD) to get the FW's controller PD
2793 * list structure. This information is mainly used to find out SYSTEM
2794 * supported by the FW.
2795 */
2796 int
mrsas_tbolt_get_ld_map_info(struct mrsas_instance * instance)2797 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2798 {
2799 int ret = 0;
2800 struct mrsas_cmd *cmd = NULL;
2801 struct mrsas_dcmd_frame *dcmd;
2802 MR_FW_RAID_MAP_ALL *ci;
2803 uint32_t ci_h = 0;
2804 U32 size_map_info;
2805
2806 cmd = get_raid_msg_pkt(instance);
2807
2808 if (cmd == NULL) {
2809 dev_err(instance->dip, CE_WARN,
2810 "Failed to get a cmd from free-pool in get_ld_map_info()");
2811 return (DDI_FAILURE);
2812 }
2813
2814 dcmd = &cmd->frame->dcmd;
2815
2816 size_map_info = sizeof (MR_FW_RAID_MAP) +
2817 (sizeof (MR_LD_SPAN_MAP) *
2818 (MAX_LOGICAL_DRIVES - 1));
2819
2820 con_log(CL_ANN, (CE_NOTE,
2821 "size_map_info : 0x%x", size_map_info));
2822
2823 ci = instance->ld_map[instance->map_id & 1];
2824 ci_h = instance->ld_map_phy[instance->map_id & 1];
2825
2826 if (!ci) {
2827 dev_err(instance->dip, CE_WARN,
2828 "Failed to alloc mem for ld_map_info");
2829 return_raid_msg_pkt(instance, cmd);
2830 return (-1);
2831 }
2832
2833 bzero(ci, sizeof (*ci));
2834 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2835
2836 dcmd->cmd = MFI_CMD_OP_DCMD;
2837 dcmd->cmd_status = 0xFF;
2838 dcmd->sge_count = 1;
2839 dcmd->flags = MFI_FRAME_DIR_READ;
2840 dcmd->timeout = 0;
2841 dcmd->pad_0 = 0;
2842 dcmd->data_xfer_len = size_map_info;
2843 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2844 dcmd->sgl.sge32[0].phys_addr = ci_h;
2845 dcmd->sgl.sge32[0].length = size_map_info;
2846
2847
2848 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2849
2850 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2851 ret = 0;
2852 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2853 } else {
2854 dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2855 ret = -1;
2856 }
2857
2858 return_raid_msg_pkt(instance, cmd);
2859
2860 return (ret);
2861 }
2862
2863 void
mrsas_dump_reply_desc(struct mrsas_instance * instance)2864 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2865 {
2866 uint32_t i;
2867 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2868 union desc_value d_val;
2869
2870 reply_desc = instance->reply_frame_pool;
2871
2872 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2873 d_val.word = reply_desc->Words;
2874 con_log(CL_DLEVEL3, (CE_NOTE,
2875 "i=%d, %x:%x",
2876 i, d_val.u1.high, d_val.u1.low));
2877 }
2878 }
2879
2880 /*
2881 * mrsas_tbolt_command_create - Create command for fast path.
2882 * @io_info: MegaRAID IO request packet pointer.
2883 * @ref_tag: Reference tag for RD/WRPROTECT
2884 *
2885 * Create the command for fast path.
2886 */
2887 void
mrsas_tbolt_prepare_cdb(struct mrsas_instance * instance,U8 cdb[],struct IO_REQUEST_INFO * io_info,Mpi2RaidSCSIIORequest_t * scsi_io_request,U32 ref_tag)2888 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2889 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2890 U32 ref_tag)
2891 {
2892 uint16_t EEDPFlags;
2893 uint32_t Control;
2894 ddi_acc_handle_t acc_handle =
2895 instance->mpi2_frame_pool_dma_obj.acc_handle;
2896
2897 /* Prepare 32-byte CDB if DIF is supported on this device */
2898 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2899
2900 bzero(cdb, 32);
2901
2902 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2903
2904
2905 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2906
2907 if (io_info->isRead)
2908 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2909 else
2910 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2911
2912 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2913 cdb[10] = MRSAS_RD_WR_PROTECT;
2914
2915 /* LOGICAL BLOCK ADDRESS */
2916 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2917 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2918 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2919 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2920 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2921 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2922 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2923 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2924
2925 /* Logical block reference tag */
2926 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2927 BE_32(ref_tag));
2928
2929 ddi_put16(acc_handle,
2930 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2931
2932 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2933 ((io_info->numBlocks)*512));
2934 /* Specify 32-byte cdb */
2935 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2936
2937 /* Transfer length */
2938 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2939 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2940 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2941 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2942
2943 /* set SCSI IO EEDPFlags */
2944 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2945 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2946
2947 /* set SCSI IO EEDPFlags bits */
2948 if (io_info->isRead) {
2949 /*
2950 * For READ commands, the EEDPFlags shall be set to specify to
2951 * Increment the Primary Reference Tag, to Check the Reference
2952 * Tag, and to Check and Remove the Protection Information
2953 * fields.
2954 */
2955 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2956 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2957 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2958 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2959 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2960 } else {
2961 /*
2962 * For WRITE commands, the EEDPFlags shall be set to specify to
2963 * Increment the Primary Reference Tag, and to Insert
2964 * Protection Information fields.
2965 */
2966 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2967 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2968 }
2969 Control |= (0x4 << 26);
2970
2971 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2972 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2973 ddi_put32(acc_handle,
2974 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2975 }
2976
2977
2978 /*
2979 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2980 * @cdb: CDB
2981 * @cdb_len: cdb length
2982 * @start_blk: Start block of IO
2983 *
2984 * Used to set the PD LBA in CDB for FP IOs
2985 */
2986 static void
mrsas_tbolt_set_pd_lba(U8 cdb[],uint8_t * cdb_len_ptr,U64 start_blk,U32 num_blocks)2987 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2988 U32 num_blocks)
2989 {
2990 U8 cdb_len = *cdb_len_ptr;
2991 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2992
2993 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2994 if (((cdb_len == 12) || (cdb_len == 16)) &&
2995 (start_blk <= 0xffffffff)) {
2996 if (cdb_len == 16) {
2997 con_log(CL_ANN,
2998 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2999 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3000 flagvals = cdb[1];
3001 groupnum = cdb[14];
3002 control = cdb[15];
3003 } else {
3004 con_log(CL_ANN,
3005 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3006 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3007 flagvals = cdb[1];
3008 groupnum = cdb[10];
3009 control = cdb[11];
3010 }
3011
3012 bzero(cdb, sizeof (cdb));
3013
3014 cdb[0] = opcode;
3015 cdb[1] = flagvals;
3016 cdb[6] = groupnum;
3017 cdb[9] = control;
3018 /* Set transfer length */
3019 cdb[8] = (U8)(num_blocks & 0xff);
3020 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3021 cdb_len = 10;
3022 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3023 /* Convert to 16 byte CDB for large LBA's */
3024 con_log(CL_ANN,
3025 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3026 switch (cdb_len) {
3027 case 6:
3028 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3029 control = cdb[5];
3030 break;
3031 case 10:
3032 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3033 flagvals = cdb[1];
3034 groupnum = cdb[6];
3035 control = cdb[9];
3036 break;
3037 case 12:
3038 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3039 flagvals = cdb[1];
3040 groupnum = cdb[10];
3041 control = cdb[11];
3042 break;
3043 }
3044
3045 bzero(cdb, sizeof (cdb));
3046
3047 cdb[0] = opcode;
3048 cdb[1] = flagvals;
3049 cdb[14] = groupnum;
3050 cdb[15] = control;
3051
3052 /* Transfer length */
3053 cdb[13] = (U8)(num_blocks & 0xff);
3054 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3055 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3056 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3057
3058 /* Specify 16-byte cdb */
3059 cdb_len = 16;
3060 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3061 /* convert to 10 byte CDB */
3062 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3063 control = cdb[5];
3064
3065 bzero(cdb, sizeof (cdb));
3066 cdb[0] = opcode;
3067 cdb[9] = control;
3068
3069 /* Set transfer length */
3070 cdb[8] = (U8)(num_blocks & 0xff);
3071 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3072
3073 /* Specify 10-byte cdb */
3074 cdb_len = 10;
3075 }
3076
3077
3078 /* Fall through Normal case, just load LBA here */
3079 switch (cdb_len) {
3080 case 6:
3081 {
3082 U8 val = cdb[1] & 0xE0;
3083 cdb[3] = (U8)(start_blk & 0xff);
3084 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3085 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3086 break;
3087 }
3088 case 10:
3089 cdb[5] = (U8)(start_blk & 0xff);
3090 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3091 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3092 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3093 break;
3094 case 12:
3095 cdb[5] = (U8)(start_blk & 0xff);
3096 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3097 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3098 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3099 break;
3100
3101 case 16:
3102 cdb[9] = (U8)(start_blk & 0xff);
3103 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3104 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3105 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3106 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3107 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3108 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3109 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3110 break;
3111 }
3112
3113 *cdb_len_ptr = cdb_len;
3114 }
3115
3116
3117 static int
mrsas_tbolt_check_map_info(struct mrsas_instance * instance)3118 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3119 {
3120 MR_FW_RAID_MAP_ALL *ld_map;
3121
3122 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3123
3124 ld_map = instance->ld_map[instance->map_id & 1];
3125
3126 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3127 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3128
3129 if (MR_ValidateMapInfo(
3130 instance->ld_map[instance->map_id & 1],
3131 instance->load_balance_info)) {
3132 con_log(CL_ANN,
3133 (CE_CONT, "MR_ValidateMapInfo success"));
3134
3135 instance->fast_path_io = 1;
3136 con_log(CL_ANN,
3137 (CE_NOTE, "instance->fast_path_io %d",
3138 instance->fast_path_io));
3139
3140 return (DDI_SUCCESS);
3141 }
3142
3143 }
3144
3145 instance->fast_path_io = 0;
3146 dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3147 con_log(CL_ANN, (CE_NOTE,
3148 "instance->fast_path_io %d", instance->fast_path_io));
3149
3150 return (DDI_FAILURE);
3151 }
3152
3153 /*
3154 * Marks HBA as bad. This will be called either when an
3155 * IO packet times out even after 3 FW resets
3156 * or FW is found to be fault even after 3 continuous resets.
3157 */
3158
3159 void
mrsas_tbolt_kill_adapter(struct mrsas_instance * instance)3160 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3161 {
3162 dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3163
3164 if (instance->deadadapter == 1)
3165 return;
3166
3167 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3168 "Writing to doorbell with MFI_STOP_ADP "));
3169 mutex_enter(&instance->ocr_flags_mtx);
3170 instance->deadadapter = 1;
3171 mutex_exit(&instance->ocr_flags_mtx);
3172 instance->func_ptr->disable_intr(instance);
3173 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3174 /* Flush */
3175 (void) RD_RESERVED0_REGISTER(instance);
3176
3177 (void) mrsas_print_pending_cmds(instance);
3178 (void) mrsas_complete_pending_cmds(instance);
3179 }
3180
3181 void
mrsas_reset_reply_desc(struct mrsas_instance * instance)3182 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3183 {
3184 int i;
3185 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3186 instance->reply_read_index = 0;
3187
3188 /* initializing reply address to 0xFFFFFFFF */
3189 reply_desc = instance->reply_frame_pool;
3190
3191 for (i = 0; i < instance->reply_q_depth; i++) {
3192 reply_desc->Words = (uint64_t)~0;
3193 reply_desc++;
3194 }
3195 }
3196
3197 int
mrsas_tbolt_reset_ppc(struct mrsas_instance * instance)3198 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3199 {
3200 uint32_t status = 0x00;
3201 uint32_t retry = 0;
3202 uint32_t cur_abs_reg_val;
3203 uint32_t fw_state;
3204 uint32_t abs_state;
3205 uint32_t i;
3206
3207 con_log(CL_ANN, (CE_NOTE,
3208 "mrsas_tbolt_reset_ppc entered"));
3209
3210 if (instance->deadadapter == 1) {
3211 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3212 "no more resets as HBA has been marked dead ");
3213 return (DDI_FAILURE);
3214 }
3215
3216 mutex_enter(&instance->ocr_flags_mtx);
3217 instance->adapterresetinprogress = 1;
3218 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3219 "adpterresetinprogress flag set, time %llx", gethrtime()));
3220 mutex_exit(&instance->ocr_flags_mtx);
3221
3222 instance->func_ptr->disable_intr(instance);
3223
3224 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3225 for (i = 0; i < 3000; i++) {
3226 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3227 }
3228
3229 instance->reply_read_index = 0;
3230
3231 retry_reset:
3232 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3233 ":Resetting TBOLT "));
3234
3235 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3236 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3237 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3238 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3239 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3240 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3241 con_log(CL_ANN1, (CE_NOTE,
3242 "mrsas_tbolt_reset_ppc: magic number written "
3243 "to write sequence register"));
3244 delay(100 * drv_usectohz(MILLISEC));
3245 status = RD_TBOLT_HOST_DIAG(instance);
3246 con_log(CL_ANN1, (CE_NOTE,
3247 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3248 "to write sequence register"));
3249
3250 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3251 delay(100 * drv_usectohz(MILLISEC));
3252 status = RD_TBOLT_HOST_DIAG(instance);
3253 if (retry++ == 100) {
3254 dev_err(instance->dip, CE_WARN,
3255 "mrsas_tbolt_reset_ppc:"
3256 "resetadapter bit is set already "
3257 "check retry count %d", retry);
3258 return (DDI_FAILURE);
3259 }
3260 }
3261
3262 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3263 delay(100 * drv_usectohz(MILLISEC));
3264
3265 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3266 (uint8_t *)((uintptr_t)(instance)->regmap +
3267 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3268
3269 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3270 delay(100 * drv_usectohz(MILLISEC));
3271 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3272 (uint8_t *)((uintptr_t)(instance)->regmap +
3273 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3274 if (retry++ == 100) {
3275 /* Dont call kill adapter here */
3276 /* RESET BIT ADAPTER is cleared by firmare */
3277 /* mrsas_tbolt_kill_adapter(instance); */
3278 dev_err(instance->dip, CE_WARN,
3279 "%s(): RESET FAILED; return failure!!!", __func__);
3280 return (DDI_FAILURE);
3281 }
3282 }
3283
3284 con_log(CL_ANN,
3285 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3286 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3287 "Calling mfi_state_transition_to_ready"));
3288
3289 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3290 retry = 0;
3291 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3292 delay(100 * drv_usectohz(MILLISEC));
3293 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3294 }
3295 if (abs_state <= MFI_STATE_FW_INIT) {
3296 dev_err(instance->dip, CE_WARN,
3297 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3298 "state = 0x%x, RETRY RESET.", abs_state);
3299 goto retry_reset;
3300 }
3301
3302 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3303 if (mfi_state_transition_to_ready(instance) ||
3304 debug_tbolt_fw_faults_after_ocr_g == 1) {
3305 cur_abs_reg_val =
3306 instance->func_ptr->read_fw_status_reg(instance);
3307 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3308
3309 con_log(CL_ANN1, (CE_NOTE,
3310 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3311 "FW state = 0x%x", fw_state));
3312 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3313 fw_state = MFI_STATE_FAULT;
3314
3315 con_log(CL_ANN,
3316 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3317 "FW state = 0x%x", fw_state));
3318
3319 if (fw_state == MFI_STATE_FAULT) {
3320 /* increment the count */
3321 instance->fw_fault_count_after_ocr++;
3322 if (instance->fw_fault_count_after_ocr
3323 < MAX_FW_RESET_COUNT) {
3324 dev_err(instance->dip, CE_WARN,
3325 "mrsas_tbolt_reset_ppc: "
3326 "FW is in fault after OCR count %d "
3327 "Retry Reset",
3328 instance->fw_fault_count_after_ocr);
3329 goto retry_reset;
3330
3331 } else {
3332 dev_err(instance->dip, CE_WARN, "%s:"
3333 "Max Reset Count exceeded >%d"
3334 "Mark HBA as bad, KILL adapter",
3335 __func__, MAX_FW_RESET_COUNT);
3336
3337 mrsas_tbolt_kill_adapter(instance);
3338 return (DDI_FAILURE);
3339 }
3340 }
3341 }
3342
3343 /* reset the counter as FW is up after OCR */
3344 instance->fw_fault_count_after_ocr = 0;
3345
3346 mrsas_reset_reply_desc(instance);
3347
3348
3349 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 "Calling mrsas_issue_init_mpi2"));
3351 abs_state = mrsas_issue_init_mpi2(instance);
3352 if (abs_state == (uint32_t)DDI_FAILURE) {
3353 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3354 "INIT failed Retrying Reset");
3355 goto retry_reset;
3356 }
3357 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3358 "mrsas_issue_init_mpi2 Done"));
3359
3360 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361 "Calling mrsas_print_pending_cmd"));
3362 (void) mrsas_print_pending_cmds(instance);
3363 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3364 "mrsas_print_pending_cmd done"));
3365
3366 instance->func_ptr->enable_intr(instance);
3367 instance->fw_outstanding = 0;
3368
3369 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3370 "Calling mrsas_issue_pending_cmds"));
3371 (void) mrsas_issue_pending_cmds(instance);
3372 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3373 "issue_pending_cmds done."));
3374
3375 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3376 "Calling aen registration"));
3377
3378 instance->aen_cmd->retry_count_for_ocr = 0;
3379 instance->aen_cmd->drv_pkt_time = 0;
3380
3381 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3382
3383 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3384 mutex_enter(&instance->ocr_flags_mtx);
3385 instance->adapterresetinprogress = 0;
3386 mutex_exit(&instance->ocr_flags_mtx);
3387 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3388 "adpterresetinprogress flag unset"));
3389
3390 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3391 return (DDI_SUCCESS);
3392
3393 }
3394
3395
3396 /*
3397 * mrsas_sync_map_info - Returns FW's ld_map structure
3398 * @instance: Adapter soft state
3399 *
3400 * Issues an internal command (DCMD) to get the FW's controller PD
3401 * list structure. This information is mainly used to find out SYSTEM
3402 * supported by the FW.
3403 */
3404
3405 static int
mrsas_tbolt_sync_map_info(struct mrsas_instance * instance)3406 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3407 {
3408 int ret = 0, i;
3409 struct mrsas_cmd *cmd = NULL;
3410 struct mrsas_dcmd_frame *dcmd;
3411 uint32_t size_sync_info, num_lds;
3412 LD_TARGET_SYNC *ci = NULL;
3413 MR_FW_RAID_MAP_ALL *map;
3414 MR_LD_RAID *raid;
3415 LD_TARGET_SYNC *ld_sync;
3416 uint32_t ci_h = 0;
3417 uint32_t size_map_info;
3418
3419 cmd = get_raid_msg_pkt(instance);
3420
3421 if (cmd == NULL) {
3422 dev_err(instance->dip, CE_WARN,
3423 "Failed to get a cmd from free-pool in "
3424 "mrsas_tbolt_sync_map_info().");
3425 return (DDI_FAILURE);
3426 }
3427
3428 /* Clear the frame buffer and assign back the context id */
3429 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3430 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3431 cmd->index);
3432 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3433
3434
3435 map = instance->ld_map[instance->map_id & 1];
3436
3437 num_lds = map->raidMap.ldCount;
3438
3439 dcmd = &cmd->frame->dcmd;
3440
3441 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3442
3443 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3444 size_sync_info, num_lds));
3445
3446 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3447
3448 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3449 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3450
3451 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3452
3453 ld_sync = (LD_TARGET_SYNC *)ci;
3454
3455 for (i = 0; i < num_lds; i++, ld_sync++) {
3456 raid = MR_LdRaidGet(i, map);
3457
3458 con_log(CL_ANN1,
3459 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3460 i, raid->seqNum, raid->flags.ldSyncRequired));
3461
3462 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3463
3464 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3465 i, ld_sync->ldTargetId));
3466
3467 ld_sync->seqNum = raid->seqNum;
3468 }
3469
3470
3471 size_map_info = sizeof (MR_FW_RAID_MAP) +
3472 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3473
3474 dcmd->cmd = MFI_CMD_OP_DCMD;
3475 dcmd->cmd_status = 0xFF;
3476 dcmd->sge_count = 1;
3477 dcmd->flags = MFI_FRAME_DIR_WRITE;
3478 dcmd->timeout = 0;
3479 dcmd->pad_0 = 0;
3480 dcmd->data_xfer_len = size_map_info;
3481 ASSERT(num_lds <= 255);
3482 dcmd->mbox.b[0] = (U8)num_lds;
3483 dcmd->mbox.b[1] = 1; /* Pend */
3484 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3485 dcmd->sgl.sge32[0].phys_addr = ci_h;
3486 dcmd->sgl.sge32[0].length = size_map_info;
3487
3488
3489 instance->map_update_cmd = cmd;
3490 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3491
3492 instance->func_ptr->issue_cmd(cmd, instance);
3493
3494 instance->unroll.syncCmd = 1;
3495 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3496
3497 return (ret);
3498 }
3499
3500 /*
3501 * abort_syncmap_cmd
3502 */
3503 int
abort_syncmap_cmd(struct mrsas_instance * instance,struct mrsas_cmd * cmd_to_abort)3504 abort_syncmap_cmd(struct mrsas_instance *instance,
3505 struct mrsas_cmd *cmd_to_abort)
3506 {
3507 int ret = 0;
3508
3509 struct mrsas_cmd *cmd;
3510 struct mrsas_abort_frame *abort_fr;
3511
3512 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3513
3514 cmd = get_raid_msg_mfi_pkt(instance);
3515
3516 if (!cmd) {
3517 dev_err(instance->dip, CE_WARN,
3518 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3519 return (DDI_FAILURE);
3520 }
3521 /* Clear the frame buffer and assign back the context id */
3522 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3523 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3524 cmd->index);
3525
3526 abort_fr = &cmd->frame->abort;
3527
3528 /* prepare and issue the abort frame */
3529 ddi_put8(cmd->frame_dma_obj.acc_handle,
3530 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3531 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3532 MFI_CMD_STATUS_SYNC_MODE);
3533 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3534 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3535 cmd_to_abort->index);
3536 ddi_put32(cmd->frame_dma_obj.acc_handle,
3537 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3538 ddi_put32(cmd->frame_dma_obj.acc_handle,
3539 &abort_fr->abort_mfi_phys_addr_hi, 0);
3540
3541 cmd->frame_count = 1;
3542
3543 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3544
3545 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3546 con_log(CL_ANN1, (CE_WARN,
3547 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3548 ret = -1;
3549 } else {
3550 ret = 0;
3551 }
3552
3553 return_raid_msg_mfi_pkt(instance, cmd);
3554
3555 atomic_add_16(&instance->fw_outstanding, (-1));
3556
3557 return (ret);
3558 }
3559
3560
3561 #ifdef PDSUPPORT
3562 /*
3563 * Even though these functions were originally intended for 2208 only, it
3564 * turns out they're useful for "Skinny" support as well. In a perfect world,
3565 * these two functions would be either in mr_sas.c, or in their own new source
3566 * file. Since this driver needs some cleanup anyway, keep this portion in
3567 * mind as well.
3568 */
3569
3570 int
mrsas_tbolt_config_pd(struct mrsas_instance * instance,uint16_t tgt,uint8_t lun,dev_info_t ** ldip)3571 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3572 uint8_t lun, dev_info_t **ldip)
3573 {
3574 struct scsi_device *sd;
3575 dev_info_t *child;
3576 int rval, dtype;
3577 struct mrsas_tbolt_pd_info *pds = NULL;
3578
3579 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3580 tgt, lun));
3581
3582 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3583 if (ldip) {
3584 *ldip = child;
3585 }
3586 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3587 rval = mrsas_service_evt(instance, tgt, 1,
3588 MRSAS_EVT_UNCONFIG_TGT, NULL);
3589 con_log(CL_ANN1, (CE_WARN,
3590 "mr_sas:DELETING STALE ENTRY rval = %d "
3591 "tgt id = %d", rval, tgt));
3592 return (NDI_FAILURE);
3593 }
3594 return (NDI_SUCCESS);
3595 }
3596
3597 pds = (struct mrsas_tbolt_pd_info *)
3598 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3599 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3600 dtype = pds->scsiDevType;
3601
3602 /* Check for Disk */
3603 if ((dtype == DTYPE_DIRECT)) {
3604 if ((dtype == DTYPE_DIRECT) &&
3605 (LE_16(pds->fwState) != PD_SYSTEM)) {
3606 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3607 return (NDI_FAILURE);
3608 }
3609 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3610 sd->sd_address.a_hba_tran = instance->tran;
3611 sd->sd_address.a_target = (uint16_t)tgt;
3612 sd->sd_address.a_lun = (uint8_t)lun;
3613
3614 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3615 rval = mrsas_config_scsi_device(instance, sd, ldip);
3616 dev_err(instance->dip, CE_CONT,
3617 "?Phys. device found: tgt %d dtype %d: %s\n",
3618 tgt, dtype, sd->sd_inq->inq_vid);
3619 } else {
3620 rval = NDI_FAILURE;
3621 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3622 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3623 tgt, dtype, sd->sd_inq->inq_vid));
3624 }
3625
3626 /* sd_unprobe is blank now. Free buffer manually */
3627 if (sd->sd_inq) {
3628 kmem_free(sd->sd_inq, SUN_INQSIZE);
3629 sd->sd_inq = (struct scsi_inquiry *)NULL;
3630 }
3631 kmem_free(sd, sizeof (struct scsi_device));
3632 } else {
3633 con_log(CL_ANN1, (CE_NOTE,
3634 "?Device not supported: tgt %d lun %d dtype %d",
3635 tgt, lun, dtype));
3636 rval = NDI_FAILURE;
3637 }
3638
3639 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3640 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3641 rval));
3642 return (rval);
3643 }
3644
3645 static void
mrsas_tbolt_get_pd_info(struct mrsas_instance * instance,struct mrsas_tbolt_pd_info * pds,int tgt)3646 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3647 struct mrsas_tbolt_pd_info *pds, int tgt)
3648 {
3649 struct mrsas_cmd *cmd;
3650 struct mrsas_dcmd_frame *dcmd;
3651 dma_obj_t dcmd_dma_obj;
3652
3653 ASSERT(instance->tbolt || instance->skinny);
3654
3655 if (instance->tbolt)
3656 cmd = get_raid_msg_pkt(instance);
3657 else
3658 cmd = mrsas_get_mfi_pkt(instance);
3659
3660 if (!cmd) {
3661 con_log(CL_ANN1,
3662 (CE_WARN, "Failed to get a cmd for get pd info"));
3663 return;
3664 }
3665
3666 /* Clear the frame buffer and assign back the context id */
3667 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3668 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3669 cmd->index);
3670
3671
3672 dcmd = &cmd->frame->dcmd;
3673 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3674 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3675 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3676 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3677 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3678 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3679
3680 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3681 DDI_STRUCTURE_LE_ACC);
3682 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3683 bzero(dcmd->mbox.b, 12);
3684 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3685 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3686 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3687 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3688 MFI_FRAME_DIR_READ);
3689 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3690 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3691 sizeof (struct mrsas_tbolt_pd_info));
3692 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3693 MR_DCMD_PD_GET_INFO);
3694 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3695 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3696 sizeof (struct mrsas_tbolt_pd_info));
3697 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3698 dcmd_dma_obj.dma_cookie[0].dmac_address);
3699
3700 cmd->sync_cmd = MRSAS_TRUE;
3701 cmd->frame_count = 1;
3702
3703 if (instance->tbolt)
3704 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3705
3706 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3707
3708 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3709 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3710 DDI_DEV_AUTOINCR);
3711 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3712
3713 if (instance->tbolt)
3714 return_raid_msg_pkt(instance, cmd);
3715 else
3716 mrsas_return_mfi_pkt(instance, cmd);
3717 }
3718 #endif
3719