1 /*-
2 * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26
27 #include "smartpqi_includes.h"
28
29 /*
30 * Process internal RAID response in the case of success.
31 */
32 void
pqisrc_process_internal_raid_response_success(pqisrc_softstate_t * softs,rcb_t * rcb)33 pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,rcb_t *rcb)
34 {
35 DBG_FUNC("IN\n");
36
37 rcb->status = PQI_STATUS_SUCCESS;
38 rcb->req_pending = false;
39
40 DBG_FUNC("OUT\n");
41 }
42
43 /* Safely determines if cdb is available and if so, will return SCSI opcode or
44 BMIC cmd if BMIC op code is detected */
45 uint8_t
pqisrc_get_cmd_from_rcb(rcb_t * rcb)46 pqisrc_get_cmd_from_rcb(rcb_t *rcb)
47 {
48 uint8_t opcode = 0xFF;
49
50 if (rcb && rcb->cdbp)
51 {
52 opcode = rcb->cdbp[0];
53 if (IS_BMIC_OPCODE(opcode))
54 return rcb->cdbp[6];
55 }
56
57 return opcode;
58 }
59
60 /*
61 * Process internal RAID response in the case of failure.
62 */
63 void
pqisrc_process_internal_raid_response_error(pqisrc_softstate_t * softs,rcb_t * rcb,uint16_t err_idx)64 pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
65 rcb_t *rcb, uint16_t err_idx)
66 {
67 raid_path_error_info_elem_t error_info;
68
69 DBG_FUNC("IN\n");
70
71 rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
72 (err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
73
74 memcpy(&error_info, rcb->error_info, sizeof(error_info));
75
76 rcb->status = PQI_STATUS_TIMEOUT;
77
78 switch (error_info.data_out_result) {
79 case PQI_RAID_DATA_IN_OUT_GOOD:
80 if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD)
81 rcb->status = PQI_STATUS_SUCCESS;
82 break;
83 case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
84 if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD ||
85 error_info.status == PQI_RAID_STATUS_CHECK_CONDITION)
86 rcb->status = PQI_STATUS_SUCCESS;
87 break;
88 default:
89 DBG_WARN("error_status 0x%x data_in_result 0x%x data_out_result 0x%x cmd rcb tag 0x%x\n",
90 error_info.status, error_info.data_in_result, error_info.data_out_result, rcb->tag);
91 }
92
93 if (rcb->status != PQI_STATUS_SUCCESS)
94 {
95 DBG_INFO("error_status=0x%x data_in=0x%x data_out=0x%x detail=0x%x\n",
96 error_info.status, error_info.data_in_result, error_info.data_out_result,
97 pqisrc_get_cmd_from_rcb(rcb));
98 }
99
100 rcb->req_pending = false;
101
102 DBG_FUNC("OUT\n");
103 }
104
105 /*
106 * Process the AIO/RAID IO in the case of success.
107 */
108 void
pqisrc_process_io_response_success(pqisrc_softstate_t * softs,rcb_t * rcb)109 pqisrc_process_io_response_success(pqisrc_softstate_t *softs, rcb_t *rcb)
110 {
111 DBG_FUNC("IN\n");
112
113 os_io_response_success(rcb);
114
115 DBG_FUNC("OUT\n");
116 }
117
118 static void
pqisrc_extract_sense_data(sense_data_u_t * sense_data,uint8_t * key,uint8_t * asc,uint8_t * ascq)119 pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc, uint8_t *ascq)
120 {
121 if (sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_70 ||
122 sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_71)
123 {
124 sense_data_fixed_t *fixed = &sense_data->fixed_format;
125
126 *key = fixed->sense_key;
127 *asc = fixed->sense_code;
128 *ascq = fixed->sense_qual;
129 }
130 else if (sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_72 ||
131 sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_73)
132 {
133 sense_data_descriptor_t *desc = &sense_data->descriptor_format;
134
135 *key = desc->sense_key;
136 *asc = desc->sense_code;
137 *ascq = desc->sense_qual;
138 }
139 else
140 {
141 *key = 0xFF;
142 *asc = 0xFF;
143 *ascq = 0xFF;
144 }
145 }
146
147 /* Suppress common errors unless verbose debug flag is on */
148 boolean_t
suppress_innocuous_error_prints(pqisrc_softstate_t * softs,rcb_t * rcb)149 suppress_innocuous_error_prints(pqisrc_softstate_t *softs, rcb_t *rcb)
150 {
151 uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
152
153 if ((opcode == SCSI_INQUIRY || /* 0x12 */
154 opcode == SCSI_MODE_SENSE || /* 0x1a */
155 opcode == SCSI_REPORT_LUNS || /* 0xa0 */
156 opcode == SCSI_LOG_SENSE || /* 0x4d */
157 opcode == SCSI_ATA_PASSTHRU16) /* 0x85 */
158 && (softs->err_resp_verbose == false))
159 return true;
160
161 return false;
162 }
163
164 static void
pqisrc_show_sense_data_simple(pqisrc_softstate_t * softs,rcb_t * rcb,sense_data_u_t * sense_data)165 pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
166 {
167 uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
168 char *path = io_path_to_ascii(rcb->path);
169 uint8_t key, asc, ascq;
170 pqisrc_extract_sense_data(sense_data, &key, &asc, &ascq);
171
172 DBG_NOTE("[ERR INFO] BTL: %d:%d:%d op=0x%x path=%s K:C:Q: %x:%x:%x\n",
173 rcb->dvp->bus, rcb->dvp->target, rcb->dvp->lun,
174 opcode, path, key, asc, ascq);
175 }
176
177 void
pqisrc_show_sense_data_full(pqisrc_softstate_t * softs,rcb_t * rcb,sense_data_u_t * sense_data)178 pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
179 {
180 if (suppress_innocuous_error_prints(softs, rcb))
181 return;
182
183 pqisrc_print_buffer(softs, "sense data", sense_data, 32, 0);
184
185 pqisrc_show_sense_data_simple(softs, rcb, sense_data);
186
187 /* add more detail here as needed */
188 }
189
190
191 /* dumps the aio error info and sense data then breaks down the output */
192 void
pqisrc_show_aio_error_info(pqisrc_softstate_t * softs,rcb_t * rcb,aio_path_error_info_elem_t * aio_err)193 pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, aio_path_error_info_elem_t *aio_err)
194 {
195 DBG_NOTE("\n");
196 DBG_NOTE("aio err: status=0x%x serv_resp=0x%x data_pres=0x%x data_len=0x%x\n",
197 aio_err->status, aio_err->service_resp, aio_err->data_pres, aio_err->data_len);
198
199 pqisrc_print_buffer(softs, "aio err info", aio_err,
200 offsetof(aio_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN);
201
202 pqisrc_show_sense_data_full(softs, rcb, &aio_err->sense_data);
203 }
204
205
206 /* dumps the raid error info and sense data then breaks down the output */
207 void
pqisrc_show_raid_error_info(pqisrc_softstate_t * softs,rcb_t * rcb,raid_path_error_info_elem_t * raid_err)208 pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, raid_path_error_info_elem_t *raid_err)
209 {
210 DBG_NOTE("\n");
211 DBG_NOTE("raid err: data_in=0x%x out=0x%x status=0x%x sense_len=0x%x resp_len=0x%x\n",
212 raid_err->data_in_result, raid_err->data_in_result,
213 raid_err->status, raid_err->sense_data_len, raid_err->resp_data_len);
214
215 pqisrc_print_buffer(softs, "raid err info", raid_err,
216 offsetof(raid_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN);
217
218 pqisrc_show_sense_data_full(softs, rcb, &raid_err->sense_data);
219 }
220
221 /* return true if this an innocuous error */
222 boolean_t
pqisrc_is_innocuous_error(pqisrc_softstate_t * softs,rcb_t * rcb,void * err_info)223 pqisrc_is_innocuous_error(pqisrc_softstate_t *softs, rcb_t *rcb, void *err_info)
224 {
225 uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
226
227 /* These SCSI cmds are frequently cause "underrun" and other minor "error"
228 conditions while determining log page length, support, etc. */
229 if (opcode != SCSI_INQUIRY && /* 0x12 */
230 opcode != SCSI_MODE_SENSE && /* 0x1a */
231 opcode != SCSI_REPORT_LUNS && /* 0xa0 */
232 opcode != SCSI_LOG_SENSE && /* 0x4d */
233 opcode != SCSI_ATA_PASSTHRU16) /* 0x85 */
234 {
235 return false;
236 }
237
238 /* treat all cmds above as innocuous unless verbose flag is set. */
239 if (softs->err_resp_verbose == false)
240 return true;
241
242 if (rcb->path == AIO_PATH)
243 {
244 aio_path_error_info_elem_t *aio_err = err_info;
245 uint8_t key, asc, ascq;
246
247 /* Byte[0]=Status=0x51, Byte[1]=service_resp=0x01 */
248 if (aio_err->status == PQI_AIO_STATUS_UNDERRUN &&
249 aio_err->service_resp == PQI_AIO_SERV_RESPONSE_FAILURE)
250 {
251 return true;
252 }
253
254 /* get the key info so we can apply more filters... */
255 pqisrc_extract_sense_data(&aio_err->sense_data, &key, &asc, &ascq);
256
257 /* Seeing a lot of invalid field in CDB for REPORT LUNs on AIO path.
258 Example CDB = a0 00 11 00 00 00 00 00 20 08 00 00
259 So filter out the full dump info for now. Also wonder if we should
260 just send REPORT LUNS to raid path? */
261 if (opcode == SCSI_REPORT_LUNS &&
262 key == 5 && asc == 0x24)
263 {
264 pqisrc_show_sense_data_simple(softs, rcb, &aio_err->sense_data);
265 return true;
266 }
267
268 /* may want to return true here eventually? */
269 }
270 else
271 {
272 raid_path_error_info_elem_t *raid_err = err_info;
273
274 /* Byte[1]=data_out=0x01 */
275 if (raid_err->data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW)
276 return true;
277
278 /* We get these a alot: leave a tiny breadcrumb about the error,
279 but don't do full spew about it */
280 if (raid_err->status == PQI_AIO_STATUS_CHECK_CONDITION)
281 {
282 pqisrc_show_sense_data_simple(softs, rcb, &raid_err->sense_data);
283 return true;
284 }
285 }
286
287 return false;
288 }
289
290 /*
291 * Process the error info for AIO in the case of failure.
292 */
293 void
pqisrc_process_aio_response_error(pqisrc_softstate_t * softs,rcb_t * rcb,uint16_t err_idx)294 pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
295 rcb_t *rcb, uint16_t err_idx)
296 {
297 aio_path_error_info_elem_t *err_info = NULL;
298
299 DBG_FUNC("IN\n");
300
301 ASSERT(rcb->path == AIO_PATH);
302
303 err_info = (aio_path_error_info_elem_t*)
304 softs->err_buf_dma_mem.virt_addr +
305 err_idx;
306
307 if(err_info == NULL) {
308 DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx);
309 return;
310 }
311
312 /* filter out certain underrun/success "errors" from printing */
313 if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) {
314
315 if (softs->err_resp_verbose == true)
316 pqisrc_show_rcb_details(softs, rcb,
317 "aio error", err_info);
318 }
319
320 os_aio_response_error(rcb, err_info);
321
322 DBG_FUNC("OUT\n");
323 }
324
325 /*
326 * Process the error info for RAID IO in the case of failure.
327 */
328 void
pqisrc_process_raid_response_error(pqisrc_softstate_t * softs,rcb_t * rcb,uint16_t err_idx)329 pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
330 rcb_t *rcb, uint16_t err_idx)
331 {
332 raid_path_error_info_elem_t *err_info = NULL;
333
334 DBG_FUNC("IN\n");
335
336 ASSERT(rcb->path == RAID_PATH);
337
338 err_info = (raid_path_error_info_elem_t*)
339 softs->err_buf_dma_mem.virt_addr +
340 err_idx;
341
342 if(err_info == NULL) {
343 DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx);
344 return;
345 }
346
347 /* filter out certain underrun/success "errors" from printing */
348 if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) {
349
350 if( softs->err_resp_verbose == true )
351 pqisrc_show_rcb_details(softs, rcb,
352 "raid error", err_info);
353
354 }
355
356 os_raid_response_error(rcb, err_info);
357
358 DBG_FUNC("OUT\n");
359 }
360
361 /*
362 * Process the Task Management function response.
363 */
364 int
pqisrc_process_task_management_response(pqisrc_softstate_t * softs,pqi_tmf_resp_t * tmf_resp)365 pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
366 pqi_tmf_resp_t *tmf_resp)
367 {
368 int ret = PQI_STATUS_SUCCESS;
369 uint32_t tag = (uint32_t)tmf_resp->req_id;
370 rcb_t *rcb = &softs->rcb[tag];
371
372 ASSERT(rcb->tag == tag);
373
374 DBG_FUNC("IN\n");
375
376 switch (tmf_resp->resp_code) {
377 case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE:
378 case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED:
379 ret = PQI_STATUS_SUCCESS;
380 break;
381 default:
382 DBG_ERR("Tag #0x%08x TMF Failed, Response code : 0x%x\n",
383 rcb->tag, tmf_resp->resp_code);
384 ret = PQI_STATUS_TIMEOUT;
385 break;
386 }
387
388 rcb->status = ret;
389 rcb->req_pending = false;
390
391 DBG_FUNC("OUT\n");
392 return ret;
393 }
394
395 static int
pqisrc_process_vendor_general_response(pqi_vendor_general_response_t * response)396 pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
397 {
398
399 int ret = PQI_STATUS_SUCCESS;
400
401 switch(response->status) {
402 case PQI_VENDOR_RESPONSE_IU_SUCCESS:
403 break;
404 case PQI_VENDOR_RESPONSE_IU_UNSUCCESS:
405 case PQI_VENDOR_RESPONSE_IU_INVALID_PARAM:
406 case PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC:
407 ret = PQI_STATUS_TIMEOUT;
408 break;
409 }
410
411 return ret;
412 }
413
414 /*
415 * Function used to process the response from the adapter
416 * which is invoked by IRQ handler.
417 */
418 void
pqisrc_process_response_queue(pqisrc_softstate_t * softs,int oq_id)419 pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
420 {
421 ob_queue_t *ob_q;
422 struct pqi_io_response *response;
423 uint32_t oq_pi, oq_ci;
424 pqi_scsi_dev_t *dvp = NULL;
425
426
427 DBG_FUNC("IN\n");
428
429 ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
430 oq_ci = ob_q->ci_local;
431 oq_pi = *(ob_q->pi_virt_addr);
432
433 DBG_IO("ci : %u pi : %u qid : %u\n", oq_ci, oq_pi, ob_q->q_id);
434
435 while (1) {
436 boolean_t os_scsi_cmd = false;
437 rcb_t *rcb = NULL;
438 uint32_t tag = 0;
439 uint32_t offset;
440
441 if (oq_pi == oq_ci)
442 break;
443 /* Get the response */
444 offset = oq_ci * ob_q->elem_size;
445 response = (struct pqi_io_response *)(ob_q->array_virt_addr +
446 offset);
447 tag = response->request_id;
448 rcb = &softs->rcb[tag];
449 /* Make sure we are processing a valid response. */
450 if ((rcb->tag != tag) || (rcb->req_pending == false)) {
451 DBG_ERR("No such request pending with tag : %x rcb->tag : %x", tag, rcb->tag);
452 oq_ci = (oq_ci + 1) % ob_q->num_elem;
453 break;
454 }
455 /* Timedout request has been completed. This should not hit,
456 * if timeout is set as TIMEOUT_INFINITE while calling
457 * pqisrc_wait_on_condition(softs,rcb,timeout).
458 */
459 if (rcb->timedout) {
460 DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag 0x%x\n", tag);
461 oq_ci = (oq_ci + 1) % ob_q->num_elem;
462 os_reset_rcb(rcb);
463 pqisrc_put_tag(&softs->taglist, tag);
464 break;
465 }
466
467 if (rcb->host_wants_to_abort_this)
468 {
469 DBG_INFO("cmd that was aborted came back. tag=%u\n", rcb->tag);
470 }
471 if (rcb->is_abort_cmd_from_host)
472 {
473 DBG_INFO("abort cmd came back. tag=%u\n", rcb->tag);
474 }
475 if (IS_OS_SCSICMD(rcb)) {
476 dvp = rcb->dvp;
477 if (dvp)
478 os_scsi_cmd = true;
479 else
480 DBG_WARN("Received IO completion for the Null device!!!\n");
481 }
482
483 DBG_IO("response.header.iu_type : %x \n", response->header.iu_type);
484
485 switch (response->header.iu_type) {
486 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
487 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
488 rcb->success_cmp_callback(softs, rcb);
489 if (os_scsi_cmd)
490 pqisrc_decrement_device_active_io(softs, dvp);
491 break;
492 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
493 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
494 rcb->error_cmp_callback(softs, rcb, LE_16(response->error_index));
495 if (os_scsi_cmd)
496 pqisrc_decrement_device_active_io(softs, dvp);
497 break;
498 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
499 rcb->req_pending = false;
500 break;
501 case PQI_RESPONSE_IU_VENDOR_GENERAL:
502 rcb->req_pending = false;
503 rcb->status = pqisrc_process_vendor_general_response(
504 (pqi_vendor_general_response_t *)response);
505 break;
506 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
507 rcb->status = pqisrc_process_task_management_response(softs, (void *)response);
508 break;
509
510 default:
511 DBG_ERR("Invalid Response IU 0x%x\n",response->header.iu_type);
512 break;
513 }
514
515 oq_ci = (oq_ci + 1) % ob_q->num_elem;
516 }
517
518 ob_q->ci_local = oq_ci;
519 PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
520 ob_q->ci_register_offset, ob_q->ci_local );
521 DBG_FUNC("OUT\n");
522 }
523