1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2021 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/pci.h>
13 #include <linux/bnxt/hsi.h>
14 #include "bnxt.h"
15 #include "bnxt_hwrm.h"
16 #include "bnxt_coredump.h"
17
18 static const u16 bnxt_bstore_to_seg_id[] = {
19 [BNXT_CTX_QP] = BNXT_CTX_MEM_SEG_QP,
20 [BNXT_CTX_SRQ] = BNXT_CTX_MEM_SEG_SRQ,
21 [BNXT_CTX_CQ] = BNXT_CTX_MEM_SEG_CQ,
22 [BNXT_CTX_VNIC] = BNXT_CTX_MEM_SEG_VNIC,
23 [BNXT_CTX_STAT] = BNXT_CTX_MEM_SEG_STAT,
24 [BNXT_CTX_STQM] = BNXT_CTX_MEM_SEG_STQM,
25 [BNXT_CTX_FTQM] = BNXT_CTX_MEM_SEG_FTQM,
26 [BNXT_CTX_MRAV] = BNXT_CTX_MEM_SEG_MRAV,
27 [BNXT_CTX_TIM] = BNXT_CTX_MEM_SEG_TIM,
28 [BNXT_CTX_SRT] = BNXT_CTX_MEM_SEG_SRT,
29 [BNXT_CTX_SRT2] = BNXT_CTX_MEM_SEG_SRT2,
30 [BNXT_CTX_CRT] = BNXT_CTX_MEM_SEG_CRT,
31 [BNXT_CTX_CRT2] = BNXT_CTX_MEM_SEG_CRT2,
32 [BNXT_CTX_RIGP0] = BNXT_CTX_MEM_SEG_RIGP0,
33 [BNXT_CTX_L2HWRM] = BNXT_CTX_MEM_SEG_L2HWRM,
34 [BNXT_CTX_REHWRM] = BNXT_CTX_MEM_SEG_REHWRM,
35 [BNXT_CTX_CA0] = BNXT_CTX_MEM_SEG_CA0,
36 [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
37 [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
38 [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
39 [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG,
40 [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC,
41 };
42
bnxt_dbg_hwrm_log_buffer_flush(struct bnxt * bp,u16 type,u32 flags,u32 * offset)43 static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
44 u32 *offset)
45 {
46 struct hwrm_dbg_log_buffer_flush_output *resp;
47 struct hwrm_dbg_log_buffer_flush_input *req;
48 int rc;
49
50 rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
51 if (rc)
52 return rc;
53
54 req->flags = cpu_to_le32(flags);
55 req->type = cpu_to_le16(type);
56 resp = hwrm_req_hold(bp, req);
57 rc = hwrm_req_send(bp, req);
58 if (!rc)
59 *offset = le32_to_cpu(resp->current_buffer_offset);
60 hwrm_req_drop(bp, req);
61 return rc;
62 }
63
bnxt_hwrm_dbg_dma_data(struct bnxt * bp,void * msg,struct bnxt_hwrm_dbg_dma_info * info)64 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
65 struct bnxt_hwrm_dbg_dma_info *info)
66 {
67 struct hwrm_dbg_cmn_input *cmn_req = msg;
68 __le16 *seq_ptr = msg + info->seq_off;
69 struct hwrm_dbg_cmn_output *cmn_resp;
70 u16 seq = 0, len, segs_off;
71 dma_addr_t dma_handle;
72 void *dma_buf, *resp;
73 int rc, off = 0;
74
75 dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
76 if (!dma_buf) {
77 hwrm_req_drop(bp, msg);
78 return -ENOMEM;
79 }
80
81 hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
82 cmn_resp = hwrm_req_hold(bp, msg);
83 resp = cmn_resp;
84
85 segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
86 total_segments);
87 cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
88 cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
89 while (1) {
90 *seq_ptr = cpu_to_le16(seq);
91 rc = hwrm_req_send(bp, msg);
92 if (rc)
93 break;
94
95 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
96 if (!seq &&
97 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
98 info->segs = le16_to_cpu(*((__le16 *)(resp +
99 segs_off)));
100 if (!info->segs) {
101 rc = -EIO;
102 break;
103 }
104
105 info->dest_buf_size = info->segs *
106 sizeof(struct coredump_segment_record);
107 info->dest_buf = kmalloc(info->dest_buf_size,
108 GFP_KERNEL);
109 if (!info->dest_buf) {
110 rc = -ENOMEM;
111 break;
112 }
113 }
114
115 if (cmn_req->req_type ==
116 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
117 info->dest_buf_size += len;
118
119 if (info->dest_buf) {
120 if ((info->seg_start + off + len) <=
121 BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
122 u16 copylen = min_t(u16, len,
123 info->dest_buf_size - off);
124
125 memcpy(info->dest_buf + off, dma_buf, copylen);
126 if (copylen < len)
127 break;
128 } else {
129 rc = -ENOBUFS;
130 if (cmn_req->req_type ==
131 cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
132 kfree(info->dest_buf);
133 info->dest_buf = NULL;
134 }
135 break;
136 }
137 }
138
139 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
140 break;
141
142 seq++;
143 off += len;
144 }
145 hwrm_req_drop(bp, msg);
146 return rc;
147 }
148
bnxt_hwrm_dbg_coredump_list(struct bnxt * bp,struct bnxt_coredump * coredump)149 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
150 struct bnxt_coredump *coredump)
151 {
152 struct bnxt_hwrm_dbg_dma_info info = {NULL};
153 struct hwrm_dbg_coredump_list_input *req;
154 int rc;
155
156 rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
157 if (rc)
158 return rc;
159
160 info.dma_len = COREDUMP_LIST_BUF_LEN;
161 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
162 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
163 data_len);
164
165 rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
166 if (!rc) {
167 coredump->data = info.dest_buf;
168 coredump->data_size = info.dest_buf_size;
169 coredump->total_segs = info.segs;
170 }
171 return rc;
172 }
173
bnxt_hwrm_dbg_coredump_initiate(struct bnxt * bp,u16 dump_type,u16 component_id,u16 segment_id)174 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
175 u16 component_id, u16 segment_id)
176 {
177 struct hwrm_dbg_coredump_initiate_input *req;
178 int rc;
179
180 rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
181 if (rc)
182 return rc;
183
184 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
185 req->component_id = cpu_to_le16(component_id);
186 req->segment_id = cpu_to_le16(segment_id);
187 if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
188 req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
189
190 return hwrm_req_send(bp, req);
191 }
192
bnxt_hwrm_dbg_coredump_retrieve(struct bnxt * bp,u16 component_id,u16 segment_id,u32 * seg_len,void * buf,u32 buf_len,u32 offset)193 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
194 u16 segment_id, u32 *seg_len,
195 void *buf, u32 buf_len, u32 offset)
196 {
197 struct hwrm_dbg_coredump_retrieve_input *req;
198 struct bnxt_hwrm_dbg_dma_info info = {NULL};
199 int rc;
200
201 rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
202 if (rc)
203 return rc;
204
205 req->component_id = cpu_to_le16(component_id);
206 req->segment_id = cpu_to_le16(segment_id);
207
208 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
209 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
210 seq_no);
211 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
212 data_len);
213 if (buf) {
214 info.dest_buf = buf + offset;
215 info.buf_len = buf_len;
216 info.seg_start = offset;
217 }
218
219 rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
220 if (!rc)
221 *seg_len = info.dest_buf_size;
222
223 return rc;
224 }
225
226 void
bnxt_fill_coredump_seg_hdr(struct bnxt * bp,struct bnxt_coredump_segment_hdr * seg_hdr,struct coredump_segment_record * seg_rec,u32 seg_len,int status,u32 duration,u32 instance,u32 comp_id,u32 seg_id)227 bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
228 struct bnxt_coredump_segment_hdr *seg_hdr,
229 struct coredump_segment_record *seg_rec, u32 seg_len,
230 int status, u32 duration, u32 instance, u32 comp_id,
231 u32 seg_id)
232 {
233 memset(seg_hdr, 0, sizeof(*seg_hdr));
234 memcpy(seg_hdr->signature, "sEgM", 4);
235 if (seg_rec) {
236 seg_hdr->component_id = (__force __le32)seg_rec->component_id;
237 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
238 seg_hdr->low_version = seg_rec->version_low;
239 seg_hdr->high_version = seg_rec->version_hi;
240 seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
241 } else {
242 seg_hdr->component_id = cpu_to_le32(comp_id);
243 seg_hdr->segment_id = cpu_to_le32(seg_id);
244 }
245 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
246 seg_hdr->length = cpu_to_le32(seg_len);
247 seg_hdr->status = cpu_to_le32(status);
248 seg_hdr->duration = cpu_to_le32(duration);
249 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
250 seg_hdr->instance = cpu_to_le32(instance);
251 }
252
bnxt_fill_cmdline(struct bnxt_coredump_record * record)253 static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
254 {
255 struct mm_struct *mm = current->mm;
256 int i, len, last = 0;
257
258 if (mm) {
259 len = min_t(int, mm->arg_end - mm->arg_start,
260 sizeof(record->commandline) - 1);
261 if (len && !copy_from_user(record->commandline,
262 (char __user *)mm->arg_start, len)) {
263 for (i = 0; i < len; i++) {
264 if (record->commandline[i])
265 last = i;
266 else
267 record->commandline[i] = ' ';
268 }
269 record->commandline[last + 1] = 0;
270 return;
271 }
272 }
273
274 strscpy(record->commandline, current->comm, TASK_COMM_LEN);
275 }
276
277 static void
bnxt_fill_coredump_record(struct bnxt * bp,struct bnxt_coredump_record * record,time64_t start,s16 start_utc,u16 total_segs,int status)278 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
279 time64_t start, s16 start_utc, u16 total_segs,
280 int status)
281 {
282 time64_t end = ktime_get_real_seconds();
283 u32 os_ver_major = 0, os_ver_minor = 0;
284 struct tm tm;
285
286 time64_to_tm(start, 0, &tm);
287 memset(record, 0, sizeof(*record));
288 memcpy(record->signature, "cOrE", 4);
289 record->flags = 0;
290 record->low_version = 0;
291 record->high_version = 1;
292 record->asic_state = 0;
293 strscpy(record->system_name, utsname()->nodename,
294 sizeof(record->system_name));
295 record->year = cpu_to_le16(tm.tm_year + 1900);
296 record->month = cpu_to_le16(tm.tm_mon + 1);
297 record->day = cpu_to_le16(tm.tm_mday);
298 record->hour = cpu_to_le16(tm.tm_hour);
299 record->minute = cpu_to_le16(tm.tm_min);
300 record->second = cpu_to_le16(tm.tm_sec);
301 record->utc_bias = cpu_to_le16(start_utc);
302 bnxt_fill_cmdline(record);
303 record->total_segments = cpu_to_le32(total_segs);
304
305 if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
306 netdev_warn(bp->dev, "Unknown OS release in coredump\n");
307 record->os_ver_major = cpu_to_le32(os_ver_major);
308 record->os_ver_minor = cpu_to_le32(os_ver_minor);
309
310 strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
311 time64_to_tm(end, 0, &tm);
312 record->end_year = cpu_to_le16(tm.tm_year + 1900);
313 record->end_month = cpu_to_le16(tm.tm_mon + 1);
314 record->end_day = cpu_to_le16(tm.tm_mday);
315 record->end_hour = cpu_to_le16(tm.tm_hour);
316 record->end_minute = cpu_to_le16(tm.tm_min);
317 record->end_second = cpu_to_le16(tm.tm_sec);
318 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
319 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
320 bp->ver_resp.chip_rev << 8 |
321 bp->ver_resp.chip_metal);
322 record->asic_id2 = 0;
323 record->coredump_status = cpu_to_le32(status);
324 record->ioctl_low_version = 0;
325 record->ioctl_high_version = 0;
326 }
327
bnxt_fill_drv_seg_record(struct bnxt * bp,struct bnxt_driver_segment_record * record,struct bnxt_ctx_mem_type * ctxm,u16 type)328 static void bnxt_fill_drv_seg_record(struct bnxt *bp,
329 struct bnxt_driver_segment_record *record,
330 struct bnxt_ctx_mem_type *ctxm, u16 type)
331 {
332 struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
333 u32 offset = 0;
334 int rc = 0;
335
336 record->max_entries = cpu_to_le32(ctxm->max_entries);
337 record->entry_size = cpu_to_le32(ctxm->entry_size);
338
339 rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
340 if (rc)
341 return;
342
343 bnxt_bs_trace_check_wrap(bs_trace, offset);
344 record->offset = cpu_to_le32(bs_trace->last_offset);
345 record->wrapped = bs_trace->wrapped;
346 }
347
bnxt_get_ctx_coredump(struct bnxt * bp,void * buf,u32 offset,u32 * segs)348 static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
349 u32 *segs)
350 {
351 struct bnxt_driver_segment_record record = {};
352 struct bnxt_coredump_segment_hdr seg_hdr;
353 struct bnxt_ctx_mem_info *ctx = bp->ctx;
354 u32 comp_id = BNXT_DRV_COMP_ID;
355 void *data = NULL;
356 size_t len = 0;
357 u16 type;
358
359 *segs = 0;
360 if (!ctx)
361 return 0;
362
363 if (buf)
364 buf += offset;
365 for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
366 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
367 bool trace = bnxt_bs_trace_avail(bp, type);
368 u32 seg_id = bnxt_bstore_to_seg_id[type];
369 size_t seg_len, extra_hlen = 0;
370
371 if (!ctxm->mem_valid || !seg_id)
372 continue;
373
374 if (trace) {
375 extra_hlen = BNXT_SEG_RCD_LEN;
376 if (buf) {
377 u16 trace_type = bnxt_bstore_to_trace[type];
378
379 bnxt_fill_drv_seg_record(bp, &record, ctxm,
380 trace_type);
381 }
382 }
383
384 if (buf)
385 data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
386
387 seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
388 if (buf) {
389 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
390 0, 0, 0, comp_id, seg_id);
391 memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
392 buf += BNXT_SEG_HDR_LEN;
393 if (trace)
394 memcpy(buf, &record, BNXT_SEG_RCD_LEN);
395 buf += seg_len;
396 }
397 len += BNXT_SEG_HDR_LEN + seg_len;
398 *segs += 1;
399 }
400 return len;
401 }
402
__bnxt_get_coredump(struct bnxt * bp,u16 dump_type,void * buf,u32 * dump_len)403 static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
404 u32 *dump_len)
405 {
406 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
407 u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
408 struct coredump_segment_record *seg_record = NULL;
409 struct bnxt_coredump_segment_hdr seg_hdr;
410 struct bnxt_coredump coredump = {NULL};
411 time64_t start_time;
412 u16 start_utc;
413 int rc = 0, i;
414
415 if (buf)
416 buf_len = *dump_len;
417
418 start_time = ktime_get_real_seconds();
419 start_utc = sys_tz.tz_minuteswest * 60;
420 seg_hdr_len = sizeof(seg_hdr);
421
422 /* First segment should be hwrm_ver_get response.
423 * For hwrm_ver_get response Component id = 2 and Segment id = 0.
424 */
425 *dump_len = seg_hdr_len + ver_get_resp_len;
426 if (buf) {
427 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
428 0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
429 memcpy(buf + offset, &seg_hdr, seg_hdr_len);
430 offset += seg_hdr_len;
431 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
432 offset += ver_get_resp_len;
433 }
434
435 if (dump_type == BNXT_DUMP_DRIVER) {
436 u32 drv_len, segs = 0;
437
438 drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
439 *dump_len += drv_len;
440 offset += drv_len;
441 if (buf)
442 coredump.total_segs += segs;
443 goto err;
444 }
445
446 seg_record_len = sizeof(*seg_record);
447 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
448 if (rc) {
449 netdev_err(bp->dev, "Failed to get coredump segment list\n");
450 goto err;
451 }
452
453 *dump_len += seg_hdr_len * coredump.total_segs;
454
455 seg_record = (struct coredump_segment_record *)coredump.data;
456 seg_record_len = sizeof(*seg_record);
457
458 for (i = 0; i < coredump.total_segs; i++) {
459 u16 comp_id = le16_to_cpu(seg_record->component_id);
460 u16 seg_id = le16_to_cpu(seg_record->segment_id);
461 u32 duration = 0, seg_len = 0;
462 unsigned long start, end;
463
464 if (buf && ((offset + seg_hdr_len) >
465 BNXT_COREDUMP_BUF_LEN(buf_len))) {
466 rc = -ENOBUFS;
467 goto err;
468 }
469
470 start = jiffies;
471
472 rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
473 seg_id);
474 if (rc) {
475 netdev_err(bp->dev,
476 "Failed to initiate coredump for seg = %d\n",
477 seg_record->segment_id);
478 goto next_seg;
479 }
480
481 /* Write segment data into the buffer */
482 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
483 &seg_len, buf, buf_len,
484 offset + seg_hdr_len);
485 if (rc && rc == -ENOBUFS)
486 goto err;
487 else if (rc)
488 netdev_err(bp->dev,
489 "Failed to retrieve coredump for seg = %d\n",
490 seg_record->segment_id);
491
492 next_seg:
493 end = jiffies;
494 duration = jiffies_to_msecs(end - start);
495 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
496 rc, duration, 0, 0, 0);
497
498 if (buf) {
499 /* Write segment header into the buffer */
500 memcpy(buf + offset, &seg_hdr, seg_hdr_len);
501 offset += seg_hdr_len + seg_len;
502 }
503
504 *dump_len += seg_len;
505 seg_record =
506 (struct coredump_segment_record *)((u8 *)seg_record +
507 seg_record_len);
508 }
509
510 err:
511 if (buf)
512 bnxt_fill_coredump_record(bp, buf + offset, start_time,
513 start_utc, coredump.total_segs + 1,
514 rc);
515 kfree(coredump.data);
516 if (!rc) {
517 *dump_len += sizeof(struct bnxt_coredump_record);
518 /* The actual coredump length can be smaller than the FW
519 * reported length earlier. Use the ethtool provided length.
520 */
521 if (buf_len)
522 *dump_len = buf_len;
523 } else if (rc == -ENOBUFS) {
524 netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
525 }
526 return rc;
527 }
528
bnxt_copy_crash_data(struct bnxt_ring_mem_info * rmem,void * buf,u32 dump_len)529 static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
530 u32 dump_len)
531 {
532 u32 data_copied = 0;
533 u32 data_len;
534 int i;
535
536 for (i = 0; i < rmem->nr_pages; i++) {
537 data_len = rmem->page_size;
538 if (data_copied + data_len > dump_len)
539 data_len = dump_len - data_copied;
540 memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
541 data_copied += data_len;
542 if (data_copied >= dump_len)
543 break;
544 }
545 return data_copied;
546 }
547
bnxt_copy_crash_dump(struct bnxt * bp,void * buf,u32 dump_len)548 static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
549 {
550 struct bnxt_ring_mem_info *rmem;
551 u32 offset = 0;
552
553 if (!bp->fw_crash_mem)
554 return -ENOENT;
555
556 rmem = &bp->fw_crash_mem->ring_mem;
557
558 if (rmem->depth > 1) {
559 int i;
560
561 for (i = 0; i < rmem->nr_pages; i++) {
562 struct bnxt_ctx_pg_info *pg_tbl;
563
564 pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
565 offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
566 buf + offset,
567 dump_len - offset);
568 if (offset >= dump_len)
569 break;
570 }
571 } else {
572 bnxt_copy_crash_data(rmem, buf, dump_len);
573 }
574
575 return 0;
576 }
577
bnxt_crash_dump_avail(struct bnxt * bp)578 static bool bnxt_crash_dump_avail(struct bnxt *bp)
579 {
580 u32 sig = 0;
581
582 /* First 4 bytes(signature) of crash dump is always non-zero */
583 bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
584 return !!sig;
585 }
586
bnxt_get_coredump(struct bnxt * bp,u16 dump_type,void * buf,u32 * dump_len)587 int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
588 {
589 if (dump_type == BNXT_DUMP_CRASH) {
590 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
591 return bnxt_copy_crash_dump(bp, buf, *dump_len);
592 #ifdef CONFIG_TEE_BNXT_FW
593 else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
594 return tee_bnxt_copy_coredump(buf, 0, *dump_len);
595 #endif
596 else
597 return -EOPNOTSUPP;
598 } else {
599 return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
600 }
601 }
602
bnxt_hwrm_get_dump_len(struct bnxt * bp,u16 dump_type,u32 * dump_len)603 int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
604 {
605 struct hwrm_dbg_qcfg_output *resp;
606 struct hwrm_dbg_qcfg_input *req;
607 int rc, hdr_len = 0;
608
609 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
610 return -EOPNOTSUPP;
611
612 if (dump_type == BNXT_DUMP_CRASH &&
613 !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
614 (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
615 return -EOPNOTSUPP;
616
617 rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
618 if (rc)
619 return rc;
620
621 req->fid = cpu_to_le16(0xffff);
622 if (dump_type == BNXT_DUMP_CRASH) {
623 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
624 req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
625 else
626 req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
627 }
628
629 resp = hwrm_req_hold(bp, req);
630 rc = hwrm_req_send(bp, req);
631 if (rc)
632 goto get_dump_len_exit;
633
634 if (dump_type == BNXT_DUMP_CRASH) {
635 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
636 *dump_len = BNXT_CRASH_DUMP_LEN;
637 else
638 *dump_len = le32_to_cpu(resp->crashdump_size);
639 } else {
640 /* Driver adds coredump header and "HWRM_VER_GET response"
641 * segment additionally to coredump.
642 */
643 hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
644 sizeof(struct hwrm_ver_get_output) +
645 sizeof(struct bnxt_coredump_record);
646 *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
647 }
648 if (*dump_len <= hdr_len)
649 rc = -EINVAL;
650
651 get_dump_len_exit:
652 hwrm_req_drop(bp, req);
653 return rc;
654 }
655
bnxt_get_coredump_length(struct bnxt * bp,u16 dump_type)656 u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
657 {
658 u32 len = 0;
659
660 if (dump_type == BNXT_DUMP_CRASH &&
661 bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
662 bp->fw_crash_mem) {
663 if (!bnxt_crash_dump_avail(bp))
664 return 0;
665
666 return bp->fw_crash_len;
667 }
668
669 if (dump_type != BNXT_DUMP_DRIVER) {
670 if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
671 return len;
672 }
673 if (dump_type != BNXT_DUMP_CRASH)
674 __bnxt_get_coredump(bp, dump_type, NULL, &len);
675
676 return len;
677 }
678