xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2021 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/pci.h>
13 #include "bnxt_hsi.h"
14 #include "bnxt.h"
15 #include "bnxt_hwrm.h"
16 #include "bnxt_coredump.h"
17 
18 static const u16 bnxt_bstore_to_seg_id[] = {
19 	[BNXT_CTX_QP]			= BNXT_CTX_MEM_SEG_QP,
20 	[BNXT_CTX_SRQ]			= BNXT_CTX_MEM_SEG_SRQ,
21 	[BNXT_CTX_CQ]			= BNXT_CTX_MEM_SEG_CQ,
22 	[BNXT_CTX_VNIC]			= BNXT_CTX_MEM_SEG_VNIC,
23 	[BNXT_CTX_STAT]			= BNXT_CTX_MEM_SEG_STAT,
24 	[BNXT_CTX_STQM]			= BNXT_CTX_MEM_SEG_STQM,
25 	[BNXT_CTX_FTQM]			= BNXT_CTX_MEM_SEG_FTQM,
26 	[BNXT_CTX_MRAV]			= BNXT_CTX_MEM_SEG_MRAV,
27 	[BNXT_CTX_TIM]			= BNXT_CTX_MEM_SEG_TIM,
28 	[BNXT_CTX_SRT]			= BNXT_CTX_MEM_SEG_SRT,
29 	[BNXT_CTX_SRT2]			= BNXT_CTX_MEM_SEG_SRT2,
30 	[BNXT_CTX_CRT]			= BNXT_CTX_MEM_SEG_CRT,
31 	[BNXT_CTX_CRT2]			= BNXT_CTX_MEM_SEG_CRT2,
32 	[BNXT_CTX_RIGP0]		= BNXT_CTX_MEM_SEG_RIGP0,
33 	[BNXT_CTX_L2HWRM]		= BNXT_CTX_MEM_SEG_L2HWRM,
34 	[BNXT_CTX_REHWRM]		= BNXT_CTX_MEM_SEG_REHWRM,
35 	[BNXT_CTX_CA0]			= BNXT_CTX_MEM_SEG_CA0,
36 	[BNXT_CTX_CA1]			= BNXT_CTX_MEM_SEG_CA1,
37 	[BNXT_CTX_CA2]			= BNXT_CTX_MEM_SEG_CA2,
38 	[BNXT_CTX_RIGP1]		= BNXT_CTX_MEM_SEG_RIGP1,
39 };
40 
bnxt_dbg_hwrm_log_buffer_flush(struct bnxt * bp,u16 type,u32 flags,u32 * offset)41 static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
42 					  u32 *offset)
43 {
44 	struct hwrm_dbg_log_buffer_flush_output *resp;
45 	struct hwrm_dbg_log_buffer_flush_input *req;
46 	int rc;
47 
48 	rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
49 	if (rc)
50 		return rc;
51 
52 	req->flags = cpu_to_le32(flags);
53 	req->type = cpu_to_le16(type);
54 	resp = hwrm_req_hold(bp, req);
55 	rc = hwrm_req_send(bp, req);
56 	if (!rc)
57 		*offset = le32_to_cpu(resp->current_buffer_offset);
58 	hwrm_req_drop(bp, req);
59 	return rc;
60 }
61 
bnxt_hwrm_dbg_dma_data(struct bnxt * bp,void * msg,struct bnxt_hwrm_dbg_dma_info * info)62 static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
63 				  struct bnxt_hwrm_dbg_dma_info *info)
64 {
65 	struct hwrm_dbg_cmn_input *cmn_req = msg;
66 	__le16 *seq_ptr = msg + info->seq_off;
67 	struct hwrm_dbg_cmn_output *cmn_resp;
68 	u16 seq = 0, len, segs_off;
69 	dma_addr_t dma_handle;
70 	void *dma_buf, *resp;
71 	int rc, off = 0;
72 
73 	dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
74 	if (!dma_buf) {
75 		hwrm_req_drop(bp, msg);
76 		return -ENOMEM;
77 	}
78 
79 	hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
80 	cmn_resp = hwrm_req_hold(bp, msg);
81 	resp = cmn_resp;
82 
83 	segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
84 			    total_segments);
85 	cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
86 	cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
87 	while (1) {
88 		*seq_ptr = cpu_to_le16(seq);
89 		rc = hwrm_req_send(bp, msg);
90 		if (rc)
91 			break;
92 
93 		len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
94 		if (!seq &&
95 		    cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
96 			info->segs = le16_to_cpu(*((__le16 *)(resp +
97 							      segs_off)));
98 			if (!info->segs) {
99 				rc = -EIO;
100 				break;
101 			}
102 
103 			info->dest_buf_size = info->segs *
104 					sizeof(struct coredump_segment_record);
105 			info->dest_buf = kmalloc(info->dest_buf_size,
106 						 GFP_KERNEL);
107 			if (!info->dest_buf) {
108 				rc = -ENOMEM;
109 				break;
110 			}
111 		}
112 
113 		if (info->dest_buf) {
114 			if ((info->seg_start + off + len) <=
115 			    BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
116 				memcpy(info->dest_buf + off, dma_buf, len);
117 			} else {
118 				rc = -ENOBUFS;
119 				break;
120 			}
121 		}
122 
123 		if (cmn_req->req_type ==
124 				cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
125 			info->dest_buf_size += len;
126 
127 		if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
128 			break;
129 
130 		seq++;
131 		off += len;
132 	}
133 	hwrm_req_drop(bp, msg);
134 	return rc;
135 }
136 
bnxt_hwrm_dbg_coredump_list(struct bnxt * bp,struct bnxt_coredump * coredump)137 static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
138 				       struct bnxt_coredump *coredump)
139 {
140 	struct bnxt_hwrm_dbg_dma_info info = {NULL};
141 	struct hwrm_dbg_coredump_list_input *req;
142 	int rc;
143 
144 	rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
145 	if (rc)
146 		return rc;
147 
148 	info.dma_len = COREDUMP_LIST_BUF_LEN;
149 	info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
150 	info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
151 				     data_len);
152 
153 	rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
154 	if (!rc) {
155 		coredump->data = info.dest_buf;
156 		coredump->data_size = info.dest_buf_size;
157 		coredump->total_segs = info.segs;
158 	}
159 	return rc;
160 }
161 
bnxt_hwrm_dbg_coredump_initiate(struct bnxt * bp,u16 component_id,u16 segment_id)162 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
163 					   u16 segment_id)
164 {
165 	struct hwrm_dbg_coredump_initiate_input *req;
166 	int rc;
167 
168 	rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
169 	if (rc)
170 		return rc;
171 
172 	hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
173 	req->component_id = cpu_to_le16(component_id);
174 	req->segment_id = cpu_to_le16(segment_id);
175 
176 	return hwrm_req_send(bp, req);
177 }
178 
bnxt_hwrm_dbg_coredump_retrieve(struct bnxt * bp,u16 component_id,u16 segment_id,u32 * seg_len,void * buf,u32 buf_len,u32 offset)179 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
180 					   u16 segment_id, u32 *seg_len,
181 					   void *buf, u32 buf_len, u32 offset)
182 {
183 	struct hwrm_dbg_coredump_retrieve_input *req;
184 	struct bnxt_hwrm_dbg_dma_info info = {NULL};
185 	int rc;
186 
187 	rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
188 	if (rc)
189 		return rc;
190 
191 	req->component_id = cpu_to_le16(component_id);
192 	req->segment_id = cpu_to_le16(segment_id);
193 
194 	info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
195 	info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
196 				seq_no);
197 	info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
198 				     data_len);
199 	if (buf) {
200 		info.dest_buf = buf + offset;
201 		info.buf_len = buf_len;
202 		info.seg_start = offset;
203 	}
204 
205 	rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
206 	if (!rc)
207 		*seg_len = info.dest_buf_size;
208 
209 	return rc;
210 }
211 
212 void
bnxt_fill_coredump_seg_hdr(struct bnxt * bp,struct bnxt_coredump_segment_hdr * seg_hdr,struct coredump_segment_record * seg_rec,u32 seg_len,int status,u32 duration,u32 instance,u32 comp_id,u32 seg_id)213 bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
214 			   struct bnxt_coredump_segment_hdr *seg_hdr,
215 			   struct coredump_segment_record *seg_rec, u32 seg_len,
216 			   int status, u32 duration, u32 instance, u32 comp_id,
217 			   u32 seg_id)
218 {
219 	memset(seg_hdr, 0, sizeof(*seg_hdr));
220 	memcpy(seg_hdr->signature, "sEgM", 4);
221 	if (seg_rec) {
222 		seg_hdr->component_id = (__force __le32)seg_rec->component_id;
223 		seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
224 		seg_hdr->low_version = seg_rec->version_low;
225 		seg_hdr->high_version = seg_rec->version_hi;
226 		seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
227 	} else {
228 		seg_hdr->component_id = cpu_to_le32(comp_id);
229 		seg_hdr->segment_id = cpu_to_le32(seg_id);
230 	}
231 	seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
232 	seg_hdr->length = cpu_to_le32(seg_len);
233 	seg_hdr->status = cpu_to_le32(status);
234 	seg_hdr->duration = cpu_to_le32(duration);
235 	seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
236 	seg_hdr->instance = cpu_to_le32(instance);
237 }
238 
bnxt_fill_cmdline(struct bnxt_coredump_record * record)239 static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
240 {
241 	struct mm_struct *mm = current->mm;
242 	int i, len, last = 0;
243 
244 	if (mm) {
245 		len = min_t(int, mm->arg_end - mm->arg_start,
246 			    sizeof(record->commandline) - 1);
247 		if (len && !copy_from_user(record->commandline,
248 					   (char __user *)mm->arg_start, len)) {
249 			for (i = 0; i < len; i++) {
250 				if (record->commandline[i])
251 					last = i;
252 				else
253 					record->commandline[i] = ' ';
254 			}
255 			record->commandline[last + 1] = 0;
256 			return;
257 		}
258 	}
259 
260 	strscpy(record->commandline, current->comm, TASK_COMM_LEN);
261 }
262 
263 static void
bnxt_fill_coredump_record(struct bnxt * bp,struct bnxt_coredump_record * record,time64_t start,s16 start_utc,u16 total_segs,int status)264 bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
265 			  time64_t start, s16 start_utc, u16 total_segs,
266 			  int status)
267 {
268 	time64_t end = ktime_get_real_seconds();
269 	u32 os_ver_major = 0, os_ver_minor = 0;
270 	struct tm tm;
271 
272 	time64_to_tm(start, 0, &tm);
273 	memset(record, 0, sizeof(*record));
274 	memcpy(record->signature, "cOrE", 4);
275 	record->flags = 0;
276 	record->low_version = 0;
277 	record->high_version = 1;
278 	record->asic_state = 0;
279 	strscpy(record->system_name, utsname()->nodename,
280 		sizeof(record->system_name));
281 	record->year = cpu_to_le16(tm.tm_year + 1900);
282 	record->month = cpu_to_le16(tm.tm_mon + 1);
283 	record->day = cpu_to_le16(tm.tm_mday);
284 	record->hour = cpu_to_le16(tm.tm_hour);
285 	record->minute = cpu_to_le16(tm.tm_min);
286 	record->second = cpu_to_le16(tm.tm_sec);
287 	record->utc_bias = cpu_to_le16(start_utc);
288 	bnxt_fill_cmdline(record);
289 	record->total_segments = cpu_to_le32(total_segs);
290 
291 	if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
292 		netdev_warn(bp->dev, "Unknown OS release in coredump\n");
293 	record->os_ver_major = cpu_to_le32(os_ver_major);
294 	record->os_ver_minor = cpu_to_le32(os_ver_minor);
295 
296 	strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
297 	time64_to_tm(end, 0, &tm);
298 	record->end_year = cpu_to_le16(tm.tm_year + 1900);
299 	record->end_month = cpu_to_le16(tm.tm_mon + 1);
300 	record->end_day = cpu_to_le16(tm.tm_mday);
301 	record->end_hour = cpu_to_le16(tm.tm_hour);
302 	record->end_minute = cpu_to_le16(tm.tm_min);
303 	record->end_second = cpu_to_le16(tm.tm_sec);
304 	record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
305 	record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
306 				       bp->ver_resp.chip_rev << 8 |
307 				       bp->ver_resp.chip_metal);
308 	record->asic_id2 = 0;
309 	record->coredump_status = cpu_to_le32(status);
310 	record->ioctl_low_version = 0;
311 	record->ioctl_high_version = 0;
312 }
313 
bnxt_fill_drv_seg_record(struct bnxt * bp,struct bnxt_driver_segment_record * record,struct bnxt_ctx_mem_type * ctxm,u16 type)314 static void bnxt_fill_drv_seg_record(struct bnxt *bp,
315 				     struct bnxt_driver_segment_record *record,
316 				     struct bnxt_ctx_mem_type *ctxm, u16 type)
317 {
318 	struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
319 	u32 offset = 0;
320 	int rc = 0;
321 
322 	rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
323 	if (rc)
324 		return;
325 
326 	bnxt_bs_trace_check_wrap(bs_trace, offset);
327 	record->max_entries = cpu_to_le32(ctxm->max_entries);
328 	record->entry_size = cpu_to_le32(ctxm->entry_size);
329 	record->offset = cpu_to_le32(bs_trace->last_offset);
330 	record->wrapped = bs_trace->wrapped;
331 }
332 
bnxt_get_ctx_coredump(struct bnxt * bp,void * buf,u32 offset,u32 * segs)333 static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
334 				 u32 *segs)
335 {
336 	struct bnxt_driver_segment_record record = {};
337 	struct bnxt_coredump_segment_hdr seg_hdr;
338 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
339 	u32 comp_id = BNXT_DRV_COMP_ID;
340 	void *data = NULL;
341 	size_t len = 0;
342 	u16 type;
343 
344 	*segs = 0;
345 	if (!ctx)
346 		return 0;
347 
348 	if (buf)
349 		buf += offset;
350 	for (type = 0 ; type <= BNXT_CTX_RIGP1; type++) {
351 		struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
352 		bool trace = bnxt_bs_trace_avail(bp, type);
353 		u32 seg_id = bnxt_bstore_to_seg_id[type];
354 		size_t seg_len, extra_hlen = 0;
355 
356 		if (!ctxm->mem_valid || !seg_id)
357 			continue;
358 
359 		if (trace)
360 			extra_hlen = BNXT_SEG_RCD_LEN;
361 		if (buf)
362 			data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
363 		seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
364 		if (buf) {
365 			bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
366 						   0, 0, 0, comp_id, seg_id);
367 			memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
368 			buf += BNXT_SEG_HDR_LEN;
369 			if (trace) {
370 				u16 trace_type = bnxt_bstore_to_trace[type];
371 
372 				bnxt_fill_drv_seg_record(bp, &record, ctxm,
373 							 trace_type);
374 				memcpy(buf, &record, BNXT_SEG_RCD_LEN);
375 			}
376 			buf += seg_len;
377 		}
378 		len += BNXT_SEG_HDR_LEN + seg_len;
379 		*segs += 1;
380 	}
381 	return len;
382 }
383 
__bnxt_get_coredump(struct bnxt * bp,u16 dump_type,void * buf,u32 * dump_len)384 static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
385 			       u32 *dump_len)
386 {
387 	u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
388 	u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
389 	struct coredump_segment_record *seg_record = NULL;
390 	struct bnxt_coredump_segment_hdr seg_hdr;
391 	struct bnxt_coredump coredump = {NULL};
392 	time64_t start_time;
393 	u16 start_utc;
394 	int rc = 0, i;
395 
396 	if (buf)
397 		buf_len = *dump_len;
398 
399 	start_time = ktime_get_real_seconds();
400 	start_utc = sys_tz.tz_minuteswest * 60;
401 	seg_hdr_len = sizeof(seg_hdr);
402 
403 	/* First segment should be hwrm_ver_get response.
404 	 * For hwrm_ver_get response Component id = 2 and Segment id = 0.
405 	 */
406 	*dump_len = seg_hdr_len + ver_get_resp_len;
407 	if (buf) {
408 		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
409 					   0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
410 		memcpy(buf + offset, &seg_hdr, seg_hdr_len);
411 		offset += seg_hdr_len;
412 		memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
413 		offset += ver_get_resp_len;
414 	}
415 
416 	if (dump_type == BNXT_DUMP_DRIVER) {
417 		u32 drv_len, segs = 0;
418 
419 		drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
420 		*dump_len += drv_len;
421 		offset += drv_len;
422 		if (buf)
423 			coredump.total_segs += segs;
424 		goto err;
425 	}
426 
427 	seg_record_len = sizeof(*seg_record);
428 	rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
429 	if (rc) {
430 		netdev_err(bp->dev, "Failed to get coredump segment list\n");
431 		goto err;
432 	}
433 
434 	*dump_len += seg_hdr_len * coredump.total_segs;
435 
436 	seg_record = (struct coredump_segment_record *)coredump.data;
437 	seg_record_len = sizeof(*seg_record);
438 
439 	for (i = 0; i < coredump.total_segs; i++) {
440 		u16 comp_id = le16_to_cpu(seg_record->component_id);
441 		u16 seg_id = le16_to_cpu(seg_record->segment_id);
442 		u32 duration = 0, seg_len = 0;
443 		unsigned long start, end;
444 
445 		if (buf && ((offset + seg_hdr_len) >
446 			    BNXT_COREDUMP_BUF_LEN(buf_len))) {
447 			rc = -ENOBUFS;
448 			goto err;
449 		}
450 
451 		start = jiffies;
452 
453 		rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
454 		if (rc) {
455 			netdev_err(bp->dev,
456 				   "Failed to initiate coredump for seg = %d\n",
457 				   seg_record->segment_id);
458 			goto next_seg;
459 		}
460 
461 		/* Write segment data into the buffer */
462 		rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
463 						     &seg_len, buf, buf_len,
464 						     offset + seg_hdr_len);
465 		if (rc && rc == -ENOBUFS)
466 			goto err;
467 		else if (rc)
468 			netdev_err(bp->dev,
469 				   "Failed to retrieve coredump for seg = %d\n",
470 				   seg_record->segment_id);
471 
472 next_seg:
473 		end = jiffies;
474 		duration = jiffies_to_msecs(end - start);
475 		bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
476 					   rc, duration, 0, 0, 0);
477 
478 		if (buf) {
479 			/* Write segment header into the buffer */
480 			memcpy(buf + offset, &seg_hdr, seg_hdr_len);
481 			offset += seg_hdr_len + seg_len;
482 		}
483 
484 		*dump_len += seg_len;
485 		seg_record =
486 			(struct coredump_segment_record *)((u8 *)seg_record +
487 							   seg_record_len);
488 	}
489 
490 err:
491 	if (buf)
492 		bnxt_fill_coredump_record(bp, buf + offset, start_time,
493 					  start_utc, coredump.total_segs + 1,
494 					  rc);
495 	kfree(coredump.data);
496 	*dump_len += sizeof(struct bnxt_coredump_record);
497 	if (rc == -ENOBUFS)
498 		netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
499 	return rc;
500 }
501 
bnxt_copy_crash_data(struct bnxt_ring_mem_info * rmem,void * buf,u32 dump_len)502 static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
503 				u32 dump_len)
504 {
505 	u32 data_copied = 0;
506 	u32 data_len;
507 	int i;
508 
509 	for (i = 0; i < rmem->nr_pages; i++) {
510 		data_len = rmem->page_size;
511 		if (data_copied + data_len > dump_len)
512 			data_len = dump_len - data_copied;
513 		memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
514 		data_copied += data_len;
515 		if (data_copied >= dump_len)
516 			break;
517 	}
518 	return data_copied;
519 }
520 
bnxt_copy_crash_dump(struct bnxt * bp,void * buf,u32 dump_len)521 static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
522 {
523 	struct bnxt_ring_mem_info *rmem;
524 	u32 offset = 0;
525 
526 	if (!bp->fw_crash_mem)
527 		return -ENOENT;
528 
529 	rmem = &bp->fw_crash_mem->ring_mem;
530 
531 	if (rmem->depth > 1) {
532 		int i;
533 
534 		for (i = 0; i < rmem->nr_pages; i++) {
535 			struct bnxt_ctx_pg_info *pg_tbl;
536 
537 			pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
538 			offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
539 						       buf + offset,
540 						       dump_len - offset);
541 			if (offset >= dump_len)
542 				break;
543 		}
544 	} else {
545 		bnxt_copy_crash_data(rmem, buf, dump_len);
546 	}
547 
548 	return 0;
549 }
550 
bnxt_crash_dump_avail(struct bnxt * bp)551 static bool bnxt_crash_dump_avail(struct bnxt *bp)
552 {
553 	u32 sig = 0;
554 
555 	/* First 4 bytes(signature) of crash dump is always non-zero */
556 	bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
557 	return !!sig;
558 }
559 
bnxt_get_coredump(struct bnxt * bp,u16 dump_type,void * buf,u32 * dump_len)560 int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
561 {
562 	if (dump_type == BNXT_DUMP_CRASH) {
563 		if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
564 			return bnxt_copy_crash_dump(bp, buf, *dump_len);
565 #ifdef CONFIG_TEE_BNXT_FW
566 		else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
567 			return tee_bnxt_copy_coredump(buf, 0, *dump_len);
568 #endif
569 		else
570 			return -EOPNOTSUPP;
571 	} else {
572 		return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
573 	}
574 }
575 
bnxt_hwrm_get_dump_len(struct bnxt * bp,u16 dump_type,u32 * dump_len)576 int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
577 {
578 	struct hwrm_dbg_qcfg_output *resp;
579 	struct hwrm_dbg_qcfg_input *req;
580 	int rc, hdr_len = 0;
581 
582 	if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
583 		return -EOPNOTSUPP;
584 
585 	if (dump_type == BNXT_DUMP_CRASH &&
586 	    !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
587 	     (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
588 		return -EOPNOTSUPP;
589 
590 	rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
591 	if (rc)
592 		return rc;
593 
594 	req->fid = cpu_to_le16(0xffff);
595 	if (dump_type == BNXT_DUMP_CRASH) {
596 		if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
597 			req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
598 		else
599 			req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
600 	}
601 
602 	resp = hwrm_req_hold(bp, req);
603 	rc = hwrm_req_send(bp, req);
604 	if (rc)
605 		goto get_dump_len_exit;
606 
607 	if (dump_type == BNXT_DUMP_CRASH) {
608 		if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
609 			*dump_len = BNXT_CRASH_DUMP_LEN;
610 		else
611 			*dump_len = le32_to_cpu(resp->crashdump_size);
612 	} else {
613 		/* Driver adds coredump header and "HWRM_VER_GET response"
614 		 * segment additionally to coredump.
615 		 */
616 		hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
617 		sizeof(struct hwrm_ver_get_output) +
618 		sizeof(struct bnxt_coredump_record);
619 		*dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
620 	}
621 	if (*dump_len <= hdr_len)
622 		rc = -EINVAL;
623 
624 get_dump_len_exit:
625 	hwrm_req_drop(bp, req);
626 	return rc;
627 }
628 
bnxt_get_coredump_length(struct bnxt * bp,u16 dump_type)629 u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
630 {
631 	u32 len = 0;
632 
633 	if (dump_type == BNXT_DUMP_CRASH &&
634 	    bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
635 	    bp->fw_crash_mem) {
636 		if (!bnxt_crash_dump_avail(bp))
637 			return 0;
638 
639 		return bp->fw_crash_len;
640 	}
641 
642 	if (dump_type != BNXT_DUMP_DRIVER) {
643 		if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
644 			return len;
645 	}
646 	if (dump_type != BNXT_DUMP_CRASH)
647 		__bnxt_get_coredump(bp, dump_type, NULL, &len);
648 
649 	return len;
650 }
651