xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File : ecore_dbg_fw_funcs.c
30  */
31 #include <sys/cdefs.h>
32 #include "bcm_osal.h"
33 #include "ecore.h"
34 #include "ecore_hw.h"
35 #include "ecore_mcp.h"
36 #include "spad_layout.h"
37 #include "nvm_map.h"
38 #include "reg_addr.h"
39 #include "ecore_hsi_common.h"
40 #include "ecore_hsi_debug_tools.h"
41 #include "mcp_public.h"
42 #include "nvm_map.h"
43 #ifndef USE_DBG_BIN_FILE
44 #include "ecore_dbg_values.h"
45 #endif
46 #include "ecore_dbg_fw_funcs.h"
47 
48 /* Memory groups enum */
49 enum mem_groups {
50 	MEM_GROUP_PXP_MEM,
51 	MEM_GROUP_DMAE_MEM,
52 	MEM_GROUP_CM_MEM,
53 	MEM_GROUP_QM_MEM,
54 	MEM_GROUP_DORQ_MEM,
55 	MEM_GROUP_BRB_RAM,
56 	MEM_GROUP_BRB_MEM,
57 	MEM_GROUP_PRS_MEM,
58 	MEM_GROUP_IOR,
59 	MEM_GROUP_BTB_RAM,
60 	MEM_GROUP_CONN_CFC_MEM,
61 	MEM_GROUP_TASK_CFC_MEM,
62 	MEM_GROUP_CAU_PI,
63 	MEM_GROUP_CAU_MEM,
64 	MEM_GROUP_PXP_ILT,
65 	MEM_GROUP_TM_MEM,
66 	MEM_GROUP_SDM_MEM,
67 	MEM_GROUP_PBUF,
68 	MEM_GROUP_RAM,
69 	MEM_GROUP_MULD_MEM,
70 	MEM_GROUP_BTB_MEM,
71 	MEM_GROUP_RDIF_CTX,
72 	MEM_GROUP_TDIF_CTX,
73 	MEM_GROUP_CFC_MEM,
74 	MEM_GROUP_IGU_MEM,
75 	MEM_GROUP_IGU_MSIX,
76 	MEM_GROUP_CAU_SB,
77 	MEM_GROUP_BMB_RAM,
78 	MEM_GROUP_BMB_MEM,
79 	MEM_GROUPS_NUM
80 };
81 
82 /* Memory groups names */
83 static const char* s_mem_group_names[] = {
84 	"PXP_MEM",
85 	"DMAE_MEM",
86 	"CM_MEM",
87 	"QM_MEM",
88 	"DORQ_MEM",
89 	"BRB_RAM",
90 	"BRB_MEM",
91 	"PRS_MEM",
92 	"IOR",
93 	"BTB_RAM",
94 	"CONN_CFC_MEM",
95 	"TASK_CFC_MEM",
96 	"CAU_PI",
97 	"CAU_MEM",
98 	"PXP_ILT",
99 	"TM_MEM",
100 	"SDM_MEM",
101 	"PBUF",
102 	"RAM",
103 	"MULD_MEM",
104 	"BTB_MEM",
105 	"RDIF_CTX",
106 	"TDIF_CTX",
107 	"CFC_MEM",
108 	"IGU_MEM",
109 	"IGU_MSIX",
110 	"CAU_SB",
111 	"BMB_RAM",
112 	"BMB_MEM",
113 };
114 
115 /* Idle check conditions */
116 
117 #ifndef __PREVENT_COND_ARR__
118 
cond5(const u32 * r,const u32 * imm)119 static u32 cond5(const u32 *r, const u32 *imm) {
120 	return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
121 }
122 
cond7(const u32 * r,const u32 * imm)123 static u32 cond7(const u32 *r, const u32 *imm) {
124 	return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
125 }
126 
cond6(const u32 * r,const u32 * imm)127 static u32 cond6(const u32 *r, const u32 *imm) {
128 	return ((r[0] & imm[0]) != imm[1]);
129 }
130 
cond9(const u32 * r,const u32 * imm)131 static u32 cond9(const u32 *r, const u32 *imm) {
132 	return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
133 }
134 
cond10(const u32 * r,const u32 * imm)135 static u32 cond10(const u32 *r, const u32 *imm) {
136 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
137 }
138 
cond4(const u32 * r,const u32 * imm)139 static u32 cond4(const u32 *r, const u32 *imm) {
140 	return ((r[0] & ~imm[0]) != imm[1]);
141 }
142 
cond0(const u32 * r,const u32 * imm)143 static u32 cond0(const u32 *r, const u32 *imm) {
144 	return ((r[0] & ~r[1]) != imm[0]);
145 }
146 
cond1(const u32 * r,const u32 * imm)147 static u32 cond1(const u32 *r, const u32 *imm) {
148 	return (r[0] != imm[0]);
149 }
150 
cond11(const u32 * r,const u32 * imm)151 static u32 cond11(const u32 *r, const u32 *imm) {
152 	return (r[0] != r[1] && r[2] == imm[0]);
153 }
154 
cond12(const u32 * r,const u32 * imm)155 static u32 cond12(const u32 *r, const u32 *imm) {
156 	return (r[0] != r[1] && r[2] > imm[0]);
157 }
158 
cond3(const u32 * r,const u32 OSAL_UNUSED * imm)159 static u32 cond3(const u32 *r, const u32 OSAL_UNUSED *imm) {
160 	return (r[0] != r[1]);
161 }
162 
cond13(const u32 * r,const u32 * imm)163 static u32 cond13(const u32 *r, const u32 *imm) {
164 	return (r[0] & imm[0]);
165 }
166 
cond8(const u32 * r,const u32 * imm)167 static u32 cond8(const u32 *r, const u32 *imm) {
168 	return (r[0] < (r[1] - imm[0]));
169 }
170 
cond2(const u32 * r,const u32 * imm)171 static u32 cond2(const u32 *r, const u32 *imm) {
172 	return (r[0] > imm[0]);
173 }
174 
175 /* Array of Idle Check conditions */
176 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
177 	cond0,
178 	cond1,
179 	cond2,
180 	cond3,
181 	cond4,
182 	cond5,
183 	cond6,
184 	cond7,
185 	cond8,
186 	cond9,
187 	cond10,
188 	cond11,
189 	cond12,
190 	cond13,
191 };
192 
193 #endif /* __PREVENT_COND_ARR__ */
194 
195 /******************************* Data Types **********************************/
196 
197 enum platform_ids {
198 	PLATFORM_ASIC,
199 	PLATFORM_EMUL_FULL,
200 	PLATFORM_EMUL_REDUCED,
201 	PLATFORM_FPGA,
202 	MAX_PLATFORM_IDS
203 };
204 
205 struct chip_platform_defs {
206 	u8 num_ports;
207 	u8 num_pfs;
208 	u8 num_vfs;
209 };
210 
211 /* Chip constant definitions */
212 struct chip_defs {
213 	const char *name;
214 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
215 };
216 
217 /* Platform constant definitions */
218 struct platform_defs {
219 	const char *name;
220 	u32 delay_factor;
221 	u32 dmae_thresh;
222 	u32 log_thresh;
223 };
224 
225 /* Storm constant definitions.
226  * Addresses are in bytes, sizes are in quad-regs.
227  */
228 struct storm_defs {
229 	char letter;
230 	enum block_id block_id;
231 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
232 	bool has_vfc;
233 	u32 sem_fast_mem_addr;
234 	u32 sem_frame_mode_addr;
235 	u32 sem_slow_enable_addr;
236 	u32 sem_slow_mode_addr;
237 	u32 sem_slow_mode1_conf_addr;
238 	u32 sem_sync_dbg_empty_addr;
239 	u32 sem_slow_dbg_empty_addr;
240 	u32 cm_ctx_wr_addr;
241 	u32 cm_conn_ag_ctx_lid_size;
242 	u32 cm_conn_ag_ctx_rd_addr;
243 	u32 cm_conn_st_ctx_lid_size;
244 	u32 cm_conn_st_ctx_rd_addr;
245 	u32 cm_task_ag_ctx_lid_size;
246 	u32 cm_task_ag_ctx_rd_addr;
247 	u32 cm_task_st_ctx_lid_size;
248 	u32 cm_task_st_ctx_rd_addr;
249 };
250 
251 /* Block constant definitions */
252 struct block_defs {
253 	const char *name;
254 	bool exists[MAX_CHIP_IDS];
255 	bool associated_to_storm;
256 
257 	/* Valid only if associated_to_storm is true */
258 	u32 storm_id;
259 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
260 	u32 dbg_select_addr;
261 	u32 dbg_enable_addr;
262 	u32 dbg_shift_addr;
263 	u32 dbg_force_valid_addr;
264 	u32 dbg_force_frame_addr;
265 	bool has_reset_bit;
266 
267 	/* If true, block is taken out of reset before dump */
268 	bool unreset;
269 	enum dbg_reset_regs reset_reg;
270 
271 	/* Bit offset in reset register */
272 	u8 reset_bit_offset;
273 };
274 
275 /* Reset register definitions */
276 struct reset_reg_defs {
277 	u32 addr;
278 	bool exists[MAX_CHIP_IDS];
279 	u32 unreset_val[MAX_CHIP_IDS];
280 };
281 
282 /* Debug Bus Constraint operation constant definitions */
283 struct dbg_bus_constraint_op_defs {
284 	u8 hw_op_val;
285 	bool is_cyclic;
286 };
287 
288 /* Storm Mode definitions */
289 struct storm_mode_defs {
290 	const char *name;
291 	bool is_fast_dbg;
292 	u8 id_in_hw;
293 };
294 
295 struct grc_param_defs {
296 	u32 default_val[MAX_CHIP_IDS];
297 	u32 min;
298 	u32 max;
299 	bool is_preset;
300 	u32 exclude_all_preset_val;
301 	u32 crash_preset_val;
302 };
303 
304 /* address is in 128b units. Width is in bits. */
305 struct rss_mem_defs {
306 	const char *mem_name;
307 	const char *type_name;
308 	u32 addr;
309 	u32 entry_width;
310 	u32 num_entries[MAX_CHIP_IDS];
311 };
312 
313 struct vfc_ram_defs {
314 	const char *mem_name;
315 	const char *type_name;
316 	u32 base_row;
317 	u32 num_rows;
318 };
319 
320 struct big_ram_defs {
321 	const char *instance_name;
322 	enum mem_groups mem_group_id;
323 	enum mem_groups ram_mem_group_id;
324 	enum dbg_grc_params grc_param;
325 	u32 addr_reg_addr;
326 	u32 data_reg_addr;
327 	u32 is_256b_reg_addr;
328 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
329 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
330 };
331 
332 struct phy_defs {
333 	const char *phy_name;
334 
335 	/* PHY base GRC address */
336 	u32 base_addr;
337 
338 	/* Relative address of indirect TBUS address register (bits 0..7) */
339 	u32 tbus_addr_lo_addr;
340 
341 	/* Relative address of indirect TBUS address register (bits 8..10) */
342 	u32 tbus_addr_hi_addr;
343 
344 	/* Relative address of indirect TBUS data register (bits 0..7) */
345 	u32 tbus_data_lo_addr;
346 
347 	/* Relative address of indirect TBUS data register (bits 8..11) */
348 	u32 tbus_data_hi_addr;
349 };
350 
351 /******************************** Constants **********************************/
352 
353 #define MAX_LCIDS			320
354 #define MAX_LTIDS			320
355 
356 #define NUM_IOR_SETS			2
357 #define IORS_PER_SET			176
358 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
359 
360 #define BYTES_IN_DWORD			sizeof(u32)
361 
362 /* Cyclic  right */
363 #define SHR(val, val_width, amount)	(((val) | ((val) << (val_width))) 					>> (amount)) & ((1 << (val_width)) - 1)
364 
365 /* In the macros below, size and offset are specified in bits */
366 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
367 #define FIELD_BIT_OFFSET(type, field)	type##_##field##_##OFFSET
368 #define FIELD_BIT_SIZE(type, field)	type##_##field##_##SIZE
369 #define FIELD_DWORD_OFFSET(type, field)		(int)(FIELD_BIT_OFFSET(type, field) / 32)
370 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
371 #define FIELD_BIT_MASK(type, field)		(((1 << FIELD_BIT_SIZE(type, field)) - 1) 	<< FIELD_DWORD_SHIFT(type, field))
372 
373 #define SET_VAR_FIELD(var, type, field, val) 	var[FIELD_DWORD_OFFSET(type, field)] &= 		(~FIELD_BIT_MASK(type, field)); 	var[FIELD_DWORD_OFFSET(type, field)] |= 		(val) << FIELD_DWORD_SHIFT(type, field)
374 
375 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		ecore_wr(dev, ptt, addr, (arr)[i])
376 
377 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		(arr)[i] = ecore_rd(dev, ptt, addr)
378 
379 #define CHECK_ARR_SIZE(arr, size) 	OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
380 
381 #ifndef DWORDS_TO_BYTES
382 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
383 #endif
384 #ifndef BYTES_TO_DWORDS
385 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
386 #endif
387 
388 /* extra lines include a signature line + optional latency events line */
389 #ifndef NUM_DBG_LINES
390 #define NUM_EXTRA_DBG_LINES(block_desc)		(1 + (block_desc->has_latency_events ? 1 : 0))
391 #define NUM_DBG_LINES(block_desc)		(block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
392 #endif
393 
394 #define USE_DMAE				true
395 #define PROTECT_WIDE_BUS		true
396 
397 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
398 #define RAM_LINES_TO_BYTES(lines)		DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
399 
400 #define REG_DUMP_LEN_SHIFT		24
401 #define MEM_DUMP_ENTRY_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
402 
403 #define IDLE_CHK_RULE_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
404 
405 #define IDLE_CHK_RESULT_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
406 
407 #define IDLE_CHK_RESULT_REG_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
408 
409 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
410 
411 /* The sizes and offsets below are specified in bits */
412 #define VFC_CAM_CMD_STRUCT_SIZE		64
413 #define VFC_CAM_CMD_ROW_OFFSET		48
414 #define VFC_CAM_CMD_ROW_SIZE		9
415 #define VFC_CAM_ADDR_STRUCT_SIZE	16
416 #define VFC_CAM_ADDR_OP_OFFSET		0
417 #define VFC_CAM_ADDR_OP_SIZE		4
418 #define VFC_CAM_RESP_STRUCT_SIZE	256
419 #define VFC_RAM_ADDR_STRUCT_SIZE	16
420 #define VFC_RAM_ADDR_OP_OFFSET		0
421 #define VFC_RAM_ADDR_OP_SIZE		2
422 #define VFC_RAM_ADDR_ROW_OFFSET		2
423 #define VFC_RAM_ADDR_ROW_SIZE		10
424 #define VFC_RAM_RESP_STRUCT_SIZE	256
425 
426 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
427 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
428 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
429 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
430 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
431 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
432 
433 #define NUM_VFC_RAM_TYPES		4
434 
435 #define VFC_CAM_NUM_ROWS		512
436 
437 #define VFC_OPCODE_CAM_RD		14
438 #define VFC_OPCODE_RAM_RD		0
439 
440 #define NUM_RSS_MEM_TYPES		5
441 
442 #define NUM_BIG_RAM_TYPES		3
443 
444 #define NUM_PHY_TBUS_ADDRESSES		2048
445 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
446 
447 #define SEM_FAST_MODE23_SRC_ENABLE_VAL	0x0
448 #define SEM_FAST_MODE23_SRC_DISABLE_VAL	0x7
449 #define SEM_FAST_MODE4_SRC_ENABLE_VAL	0x0
450 #define SEM_FAST_MODE4_SRC_DISABLE_VAL	0x3
451 #define SEM_FAST_MODE6_SRC_ENABLE_VAL	0x10
452 #define SEM_FAST_MODE6_SRC_DISABLE_VAL	0x3f
453 
454 #define SEM_SLOW_MODE1_DATA_ENABLE	0x1
455 
456 #define VALUES_PER_CYCLE		4
457 #define MAX_CYCLE_VALUES_MASK		((1 << VALUES_PER_CYCLE) - 1)
458 
459 #define MAX_DWORDS_PER_CYCLE		8
460 
461 #define HW_ID_BITS			3
462 
463 #define NUM_CALENDAR_SLOTS		16
464 
465 #define MAX_TRIGGER_STATES		3
466 #define TRIGGER_SETS_PER_STATE		2
467 #define MAX_CONSTRAINTS			4
468 
469 #define SEM_FILTER_CID_EN_MASK		0x00b
470 #define SEM_FILTER_EID_MASK_EN_MASK	0x013
471 #define SEM_FILTER_EID_RANGE_EN_MASK	0x113
472 
473 #define CHUNK_SIZE_IN_DWORDS		64
474 #define CHUNK_SIZE_IN_BYTES		DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
475 
476 #define INT_BUF_NUM_OF_LINES		192
477 #define INT_BUF_LINE_SIZE_IN_DWORDS	16
478 #define INT_BUF_SIZE_IN_DWORDS			(INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
479 #define INT_BUF_SIZE_IN_CHUNKS			(INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
480 
481 #define PCI_BUF_LINE_SIZE_IN_DWORDS	8
482 #define PCI_BUF_LINE_SIZE_IN_BYTES		DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
483 
484 #define TARGET_EN_MASK_PCI		0x3
485 #define TARGET_EN_MASK_NIG		0x4
486 
487 #define PCI_REQ_CREDIT			1
488 #define PCI_PHYS_ADDR_TYPE		0
489 
490 #define OPAQUE_FID(pci_func)		((pci_func << 4) | 0xff00)
491 
492 #define RESET_REG_UNRESET_OFFSET	4
493 
494 #define PCI_PKT_SIZE_IN_CHUNKS		1
495 #define PCI_PKT_SIZE_IN_BYTES			(PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
496 
497 #define NIG_PKT_SIZE_IN_CHUNKS		4
498 
499 #define FLUSH_DELAY_MS			500
500 #define STALL_DELAY_MS			500
501 
502 #define SRC_MAC_ADDR_LO16		0x0a0b
503 #define SRC_MAC_ADDR_HI32		0x0c0d0e0f
504 #define ETH_TYPE			0x1000
505 
506 #define STATIC_DEBUG_LINE_DWORDS	9
507 
508 #define NUM_COMMON_GLOBAL_PARAMS	8
509 
510 #define FW_IMG_KUKU			0
511 #define FW_IMG_MAIN			1
512 #define FW_IMG_L2B			2
513 
514 #ifndef REG_FIFO_ELEMENT_DWORDS
515 #define REG_FIFO_ELEMENT_DWORDS		2
516 #endif
517 #define REG_FIFO_DEPTH_ELEMENTS		32
518 #define REG_FIFO_DEPTH_DWORDS			(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
519 
520 #ifndef IGU_FIFO_ELEMENT_DWORDS
521 #define IGU_FIFO_ELEMENT_DWORDS		4
522 #endif
523 #define IGU_FIFO_DEPTH_ELEMENTS		64
524 #define IGU_FIFO_DEPTH_DWORDS			(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
525 
526 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS	5
527 #define SEMI_SYNC_FIFO_POLLING_COUNT	20
528 
529 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
530 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
531 #endif
532 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
533 #define PROTECTION_OVERRIDE_DEPTH_DWORDS   	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS 	* PROTECTION_OVERRIDE_ELEMENT_DWORDS)
534 
535 #define MCP_SPAD_TRACE_OFFSIZE_ADDR		(MCP_REG_SCRATCH + 	OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
536 
537 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
538 #define EMPTY_FW_IMAGE_STR		"???????????????"
539 
540 /***************************** Constant Arrays *******************************/
541 
542 struct dbg_array {
543 	const u32 *ptr;
544 	u32 size_in_dwords;
545 };
546 
547 /* Debug arrays */
548 #ifdef USE_DBG_BIN_FILE
549 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
550 #else
551 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
552 	/* BIN_BUF_DBG_MODE_TREE */
553 	{ (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
554 
555 	/* BIN_BUF_DBG_DUMP_REG */
556 	{ dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
557 
558 	/* BIN_BUF_DBG_DUMP_MEM */
559 	{ dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
560 
561 	/* BIN_BUF_DBG_IDLE_CHK_REGS */
562 	{ idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
563 
564 	/* BIN_BUF_DBG_IDLE_CHK_IMMS */
565 	{ idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
566 
567 	/* BIN_BUF_DBG_IDLE_CHK_RULES */
568 	{ idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
569 
570 	/* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
571 	{ OSAL_NULL, 0 },
572 
573 	/* BIN_BUF_DBG_ATTN_BLOCKS */
574 	{ attn_block, OSAL_ARRAY_SIZE(attn_block) },
575 
576 	/* BIN_BUF_DBG_ATTN_REGSS */
577 	{ attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
578 
579 	/* BIN_BUF_DBG_ATTN_INDEXES */
580 	{ OSAL_NULL, 0 },
581 
582 	/* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
583 	{ OSAL_NULL, 0 },
584 
585 	/* BIN_BUF_DBG_BUS_BLOCKS */
586 	{ dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
587 
588 	/* BIN_BUF_DBG_BUS_LINES */
589 	{ dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
590 
591 	/* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
592 	{ OSAL_NULL, 0 },
593 
594 	/* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
595 	{ OSAL_NULL, 0 },
596 
597 	/* BIN_BUF_DBG_PARSING_STRINGS */
598 	{ OSAL_NULL, 0 }
599 };
600 #endif
601 
602 /* Chip constant definitions array */
603 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
604 	{ "bb",
605 
606 		/* ASIC */
607 		{ { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
608 
609 		/* EMUL_FULL */
610 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
611 
612 		/* EMUL_REDUCED */
613 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
614 
615 		/* FPGA */
616 		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
617 
618 	{ "ah",
619 
620 		/* ASIC */
621 		{ { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
622 
623 		/* EMUL_FULL */
624 		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
625 
626 		/* EMUL_REDUCED */
627 		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
628 
629 		/* FPGA */
630 		{ MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } },
631 
632 	{ "e5",
633 
634 		/* ASIC */
635 		{ { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
636 
637 		/* EMUL_FULL */
638 		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
639 
640 		/* EMUL_REDUCED */
641 		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
642 
643 		/* FPGA */
644 		{ MAX_NUM_PORTS_E5, 8, MAX_NUM_VFS_E5 } } }
645 };
646 
647 /* Storm constant definitions array */
648 static struct storm_defs s_storm_defs[] = {
649 	/* Tstorm */
650 	{	'T', BLOCK_TSEM,
651 		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
652 		TSEM_REG_FAST_MEMORY,
653 		TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
654 		TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
655 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
656 		TCM_REG_CTX_RBC_ACCS,
657 		4, TCM_REG_AGG_CON_CTX,
658 		16, TCM_REG_SM_CON_CTX,
659 		2, TCM_REG_AGG_TASK_CTX,
660 		4, TCM_REG_SM_TASK_CTX },
661 
662 	/* Mstorm */
663 	{	'M', BLOCK_MSEM,
664 		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM }, false,
665 		MSEM_REG_FAST_MEMORY,
666 		MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
667 		MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
668 		MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
669 		MCM_REG_CTX_RBC_ACCS,
670 		1, MCM_REG_AGG_CON_CTX,
671 		10, MCM_REG_SM_CON_CTX,
672 		2, MCM_REG_AGG_TASK_CTX,
673 		7, MCM_REG_SM_TASK_CTX },
674 
675 	/* Ustorm */
676 	{	'U', BLOCK_USEM,
677 		{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
678 		USEM_REG_FAST_MEMORY,
679 		USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
680 		USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
681 		USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
682 		UCM_REG_CTX_RBC_ACCS,
683 		2, UCM_REG_AGG_CON_CTX,
684 		13, UCM_REG_SM_CON_CTX,
685 		3, UCM_REG_AGG_TASK_CTX,
686 		3, UCM_REG_SM_TASK_CTX },
687 
688 	/* Xstorm */
689 	{	'X', BLOCK_XSEM,
690 		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
691 		XSEM_REG_FAST_MEMORY,
692 		XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
693 		XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
694 		XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
695 		XCM_REG_CTX_RBC_ACCS,
696 		9, XCM_REG_AGG_CON_CTX,
697 		15, XCM_REG_SM_CON_CTX,
698 		0, 0,
699 		0, 0 },
700 
701 	/* Ystorm */
702 	{	'Y', BLOCK_YSEM,
703 		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY }, false,
704 		YSEM_REG_FAST_MEMORY,
705 		YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
706 		YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
707 		YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
708 		YCM_REG_CTX_RBC_ACCS,
709 		2, YCM_REG_AGG_CON_CTX,
710 		3, YCM_REG_SM_CON_CTX,
711 		2, YCM_REG_AGG_TASK_CTX,
712 		12, YCM_REG_SM_TASK_CTX },
713 
714 	/* Pstorm */
715 	{	'P', BLOCK_PSEM,
716 		{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
717 		PSEM_REG_FAST_MEMORY,
718 		PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
719 		PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
720 		PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
721 		PCM_REG_CTX_RBC_ACCS,
722 		0, 0,
723 		10, PCM_REG_SM_CON_CTX,
724 		0, 0,
725 		0, 0 }
726 };
727 
728 /* Block definitions array */
729 
730 static struct block_defs block_grc_defs = {
731 	"grc", { true, true, true }, false, 0,
732 	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
733 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
734 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
735 	GRC_REG_DBG_FORCE_FRAME,
736 	true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
737 
738 static struct block_defs block_miscs_defs = {
739 	"miscs", { true, true, true }, false, 0,
740 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
741 	0, 0, 0, 0, 0,
742 	false, false, MAX_DBG_RESET_REGS, 0 };
743 
744 static struct block_defs block_misc_defs = {
745 	"misc", { true, true, true }, false, 0,
746 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
747 	0, 0, 0, 0, 0,
748 	false, false, MAX_DBG_RESET_REGS, 0 };
749 
750 static struct block_defs block_dbu_defs = {
751 	"dbu", { true, true, true }, false, 0,
752 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
753 	0, 0, 0, 0, 0,
754 	false, false, MAX_DBG_RESET_REGS, 0 };
755 
756 static struct block_defs block_pglue_b_defs = {
757 	"pglue_b", { true, true, true }, false, 0,
758 	{ DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
759 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
760 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
761 	PGLUE_B_REG_DBG_FORCE_FRAME,
762 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
763 
764 static struct block_defs block_cnig_defs = {
765 	"cnig", { true, true, true }, false, 0,
766 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
767 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
768 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
769 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
770 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
771 
772 static struct block_defs block_cpmu_defs = {
773 	"cpmu", { true, true, true }, false, 0,
774 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
775 	0, 0, 0, 0, 0,
776 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
777 
778 static struct block_defs block_ncsi_defs = {
779 	"ncsi", { true, true, true }, false, 0,
780 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
781 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
782 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
783 	NCSI_REG_DBG_FORCE_FRAME,
784 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
785 
786 static struct block_defs block_opte_defs = {
787 	"opte", { true, true, false }, false, 0,
788 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
789 	0, 0, 0, 0, 0,
790 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
791 
792 static struct block_defs block_bmb_defs = {
793 	"bmb", { true, true, true }, false, 0,
794 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
795 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
796 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
797 	BMB_REG_DBG_FORCE_FRAME,
798 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
799 
800 static struct block_defs block_pcie_defs = {
801 	"pcie", { true, true, true }, false, 0,
802 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
803 	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
804 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
805 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
806 	false, false, MAX_DBG_RESET_REGS, 0 };
807 
808 static struct block_defs block_mcp_defs = {
809 	"mcp", { true, true, true }, false, 0,
810 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
811 	0, 0, 0, 0, 0,
812 	false, false, MAX_DBG_RESET_REGS, 0 };
813 
814 static struct block_defs block_mcp2_defs = {
815 	"mcp2", { true, true, true }, false, 0,
816 	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
817 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
818 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
819 	MCP2_REG_DBG_FORCE_FRAME,
820 	false, false, MAX_DBG_RESET_REGS, 0 };
821 
822 static struct block_defs block_pswhst_defs = {
823 	"pswhst", { true, true, true }, false, 0,
824 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
825 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
826 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
827 	PSWHST_REG_DBG_FORCE_FRAME,
828 	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
829 
830 static struct block_defs block_pswhst2_defs = {
831 	"pswhst2", { true, true, true }, false, 0,
832 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
833 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
834 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
835 	PSWHST2_REG_DBG_FORCE_FRAME,
836 	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
837 
838 static struct block_defs block_pswrd_defs = {
839 	"pswrd", { true, true, true }, false, 0,
840 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
841 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
842 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
843 	PSWRD_REG_DBG_FORCE_FRAME,
844 	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
845 
846 static struct block_defs block_pswrd2_defs = {
847 	"pswrd2", { true, true, true }, false, 0,
848 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
849 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
850 	PSWRD2_REG_DBG_SHIFT,	PSWRD2_REG_DBG_FORCE_VALID,
851 	PSWRD2_REG_DBG_FORCE_FRAME,
852 	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
853 
854 static struct block_defs block_pswwr_defs = {
855 	"pswwr", { true, true, true }, false, 0,
856 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
857 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
858 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
859 	PSWWR_REG_DBG_FORCE_FRAME,
860 	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
861 
862 static struct block_defs block_pswwr2_defs = {
863 	"pswwr2", { true, true, true }, false, 0,
864 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
865 	0, 0, 0, 0, 0,
866 	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
867 
868 static struct block_defs block_pswrq_defs = {
869 	"pswrq", { true, true, true }, false, 0,
870 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
871 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
872 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
873 	PSWRQ_REG_DBG_FORCE_FRAME,
874 	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
875 
876 static struct block_defs block_pswrq2_defs = {
877 	"pswrq2", { true, true, true }, false, 0,
878 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
879 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
880 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
881 	PSWRQ2_REG_DBG_FORCE_FRAME,
882 	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
883 
884 static struct block_defs block_pglcs_defs =	{
885 	"pglcs", { true, true, true }, false, 0,
886 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
887 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
888 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
889 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
890 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
891 
892 static struct block_defs block_ptu_defs ={
893 	"ptu", { true, true, true }, false, 0,
894 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
895 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
896 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
897 	PTU_REG_DBG_FORCE_FRAME,
898 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
899 
900 static struct block_defs block_dmae_defs = {
901 	"dmae", { true, true, true }, false, 0,
902 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
903 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
904 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
905 	DMAE_REG_DBG_FORCE_FRAME,
906 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
907 
908 static struct block_defs block_tcm_defs = {
909 	"tcm", { true, true, true }, true, DBG_TSTORM_ID,
910 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
911 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
912 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
913 	TCM_REG_DBG_FORCE_FRAME,
914 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
915 
916 static struct block_defs block_mcm_defs = {
917 	"mcm", { true, true, true }, true, DBG_MSTORM_ID,
918 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
919 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
920 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
921 	MCM_REG_DBG_FORCE_FRAME,
922 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
923 
924 static struct block_defs block_ucm_defs = {
925 	"ucm", { true, true, true }, true, DBG_USTORM_ID,
926 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
927 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
928 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
929 	UCM_REG_DBG_FORCE_FRAME,
930 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
931 
932 static struct block_defs block_xcm_defs = {
933 	"xcm", { true, true, true }, true, DBG_XSTORM_ID,
934 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
935 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
936 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
937 	XCM_REG_DBG_FORCE_FRAME,
938 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
939 
940 static struct block_defs block_ycm_defs = {
941 	"ycm", { true, true, true }, true, DBG_YSTORM_ID,
942 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
943 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
944 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
945 	YCM_REG_DBG_FORCE_FRAME,
946 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
947 
948 static struct block_defs block_pcm_defs = {
949 	"pcm", { true, true, true }, true, DBG_PSTORM_ID,
950 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
951 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
952 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
953 	PCM_REG_DBG_FORCE_FRAME,
954 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
955 
956 static struct block_defs block_qm_defs = {
957 	"qm", { true, true, true }, false, 0,
958 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ },
959 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
960 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
961 	QM_REG_DBG_FORCE_FRAME,
962 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
963 
964 static struct block_defs block_tm_defs = {
965 	"tm", { true, true, true }, false, 0,
966 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
967 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
968 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
969 	TM_REG_DBG_FORCE_FRAME,
970 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
971 
972 static struct block_defs block_dorq_defs = {
973 	"dorq", { true, true, true }, false, 0,
974 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
975 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
976 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
977 	DORQ_REG_DBG_FORCE_FRAME,
978 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
979 
980 static struct block_defs block_brb_defs = {
981 	"brb", { true, true, true }, false, 0,
982 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
983 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
984 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
985 	BRB_REG_DBG_FORCE_FRAME,
986 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
987 
988 static struct block_defs block_src_defs = {
989 	"src", { true, true, true }, false, 0,
990 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
991 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
992 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
993 	SRC_REG_DBG_FORCE_FRAME,
994 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
995 
996 static struct block_defs block_prs_defs = {
997 	"prs", { true, true, true }, false, 0,
998 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
999 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
1000 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
1001 	PRS_REG_DBG_FORCE_FRAME,
1002 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
1003 
1004 static struct block_defs block_tsdm_defs = {
1005 	"tsdm", { true, true, true }, true, DBG_TSTORM_ID,
1006 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1007 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
1008 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
1009 	TSDM_REG_DBG_FORCE_FRAME,
1010 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
1011 
1012 static struct block_defs block_msdm_defs = {
1013 	"msdm", { true, true, true }, true, DBG_MSTORM_ID,
1014 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1015 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1016 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1017 	MSDM_REG_DBG_FORCE_FRAME,
1018 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1019 
1020 static struct block_defs block_usdm_defs = {
1021 	"usdm", { true, true, true }, true, DBG_USTORM_ID,
1022 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1023 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1024 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1025 	USDM_REG_DBG_FORCE_FRAME,
1026 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1027 	};
1028 static struct block_defs block_xsdm_defs = {
1029 	"xsdm", { true, true, true }, true, DBG_XSTORM_ID,
1030 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1031 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1032 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1033 	XSDM_REG_DBG_FORCE_FRAME,
1034 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1035 
1036 static struct block_defs block_ysdm_defs = {
1037 	"ysdm", { true, true, true }, true, DBG_YSTORM_ID,
1038 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1039 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1040 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1041 	YSDM_REG_DBG_FORCE_FRAME,
1042 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1043 
1044 static struct block_defs block_psdm_defs = {
1045 	"psdm", { true, true, true }, true, DBG_PSTORM_ID,
1046 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1047 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1048 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1049 	PSDM_REG_DBG_FORCE_FRAME,
1050 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1051 
1052 static struct block_defs block_tsem_defs = {
1053 	"tsem", { true, true, true }, true, DBG_TSTORM_ID,
1054 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1055 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1056 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1057 	TSEM_REG_DBG_FORCE_FRAME,
1058 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1059 
1060 static struct block_defs block_msem_defs = {
1061 	"msem", { true, true, true }, true, DBG_MSTORM_ID,
1062 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1063 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1064 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1065 	MSEM_REG_DBG_FORCE_FRAME,
1066 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1067 
1068 static struct block_defs block_usem_defs = {
1069 	"usem", { true, true, true }, true, DBG_USTORM_ID,
1070 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1071 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1072 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1073 	USEM_REG_DBG_FORCE_FRAME,
1074 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1075 
1076 static struct block_defs block_xsem_defs = {
1077 	"xsem", { true, true, true }, true, DBG_XSTORM_ID,
1078 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1079 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1080 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1081 	XSEM_REG_DBG_FORCE_FRAME,
1082 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1083 
1084 static struct block_defs block_ysem_defs = {
1085 	"ysem", { true, true, true }, true, DBG_YSTORM_ID,
1086 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1087 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1088 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1089 	YSEM_REG_DBG_FORCE_FRAME,
1090 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1091 
1092 static struct block_defs block_psem_defs = {
1093 	"psem", { true, true, true }, true, DBG_PSTORM_ID,
1094 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1095 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1096 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1097 	PSEM_REG_DBG_FORCE_FRAME,
1098 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1099 
1100 static struct block_defs block_rss_defs = {
1101 	"rss", { true, true, true }, false, 0,
1102 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1103 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1104 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1105 	RSS_REG_DBG_FORCE_FRAME,
1106 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1107 
1108 static struct block_defs block_tmld_defs = {
1109 	"tmld", { true, true, true }, false, 0,
1110 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1111 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1112 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1113 	TMLD_REG_DBG_FORCE_FRAME,
1114 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1115 
1116 static struct block_defs block_muld_defs = {
1117 	"muld", { true, true, true }, false, 0,
1118 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1119 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1120 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1121 	MULD_REG_DBG_FORCE_FRAME,
1122 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1123 
1124 static struct block_defs block_yuld_defs = {
1125 	"yuld", { true, true, false }, false, 0,
1126 	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, MAX_DBG_BUS_CLIENTS },
1127 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1128 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1129 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1130 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1131 
1132 static struct block_defs block_xyld_defs = {
1133 	"xyld", { true, true, true }, false, 0,
1134 	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1135 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1136 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1137 	XYLD_REG_DBG_FORCE_FRAME,
1138 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1139 
1140 static struct block_defs block_ptld_defs = {
1141 	"ptld", { false, false, true }, false, 0,
1142 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT },
1143 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1144 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1145 	PTLD_REG_DBG_FORCE_FRAME_E5,
1146 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 28 };
1147 
1148 static struct block_defs block_ypld_defs = {
1149 	"ypld", { false, false, true }, false, 0,
1150 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS },
1151 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1152 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1153 	YPLD_REG_DBG_FORCE_FRAME_E5,
1154 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 27 };
1155 
1156 static struct block_defs block_prm_defs = {
1157 	"prm", { true, true, true }, false, 0,
1158 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1159 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1160 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1161 	PRM_REG_DBG_FORCE_FRAME,
1162 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1163 
1164 static struct block_defs block_pbf_pb1_defs = {
1165 	"pbf_pb1", { true, true, true }, false, 0,
1166 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1167 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1168 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1169 	PBF_PB1_REG_DBG_FORCE_FRAME,
1170 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1171 
1172 static struct block_defs block_pbf_pb2_defs = {
1173 	"pbf_pb2", { true, true, true }, false, 0,
1174 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1175 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1176 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1177 	PBF_PB2_REG_DBG_FORCE_FRAME,
1178 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1179 
1180 static struct block_defs block_rpb_defs = {
1181 	"rpb", { true, true, true }, false, 0,
1182 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1183 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1184 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1185 	RPB_REG_DBG_FORCE_FRAME,
1186 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1187 
1188 static struct block_defs block_btb_defs = {
1189 	"btb", { true, true, true }, false, 0,
1190 	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1191 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1192 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1193 	BTB_REG_DBG_FORCE_FRAME,
1194 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1195 
1196 static struct block_defs block_pbf_defs = {
1197 	"pbf", { true, true, true }, false, 0,
1198 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1199 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1200 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1201 	PBF_REG_DBG_FORCE_FRAME,
1202 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1203 
1204 static struct block_defs block_rdif_defs = {
1205 	"rdif", { true, true, true }, false, 0,
1206 	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1207 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1208 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1209 	RDIF_REG_DBG_FORCE_FRAME,
1210 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1211 
1212 static struct block_defs block_tdif_defs = {
1213 	"tdif", { true, true, true }, false, 0,
1214 	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1215 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1216 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1217 	TDIF_REG_DBG_FORCE_FRAME,
1218 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1219 
1220 static struct block_defs block_cdu_defs = {
1221 	"cdu", { true, true, true }, false, 0,
1222 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1223 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1224 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1225 	CDU_REG_DBG_FORCE_FRAME,
1226 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1227 
1228 static struct block_defs block_ccfc_defs = {
1229 	"ccfc", { true, true, true }, false, 0,
1230 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1231 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1232 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1233 	CCFC_REG_DBG_FORCE_FRAME,
1234 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1235 
1236 static struct block_defs block_tcfc_defs = {
1237 	"tcfc", { true, true, true }, false, 0,
1238 	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1239 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1240 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1241 	TCFC_REG_DBG_FORCE_FRAME,
1242 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1243 
1244 static struct block_defs block_igu_defs = {
1245 	"igu", { true, true, true }, false, 0,
1246 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1247 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1248 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1249 	IGU_REG_DBG_FORCE_FRAME,
1250 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1251 
1252 static struct block_defs block_cau_defs = {
1253 	"cau", { true, true, true }, false, 0,
1254 	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1255 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1256 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1257 	CAU_REG_DBG_FORCE_FRAME,
1258 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1259 
1260 /* TODO: add debug bus parameters when E5 RGFS RF is added */
1261 static struct block_defs block_rgfs_defs = {
1262 	"rgfs", { false, false, true }, false, 0,
1263 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1264 	0, 0, 0, 0, 0,
1265 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 };
1266 
1267 static struct block_defs block_rgsrc_defs = {
1268 	"rgsrc", { false, false, true }, false, 0,
1269 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1270 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1271 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1272 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1273 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 30 };
1274 
1275 /* TODO: add debug bus parameters when E5 TGFS RF is added */
1276 static struct block_defs block_tgfs_defs = {
1277 	"tgfs", { false, false, true }, false, 0,
1278 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1279 	0, 0, 0, 0, 0,
1280 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 };
1281 
1282 static struct block_defs block_tgsrc_defs = {
1283 	"tgsrc", { false, false, true }, false, 0,
1284 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV },
1285 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1286 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1287 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1288 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 31 };
1289 
1290 static struct block_defs block_umac_defs = {
1291 	"umac", { true, true, true }, false, 0,
1292 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1293 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1294 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1295 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1296 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1297 
1298 static struct block_defs block_xmac_defs = {
1299 	"xmac", { true, false, false }, false, 0,
1300 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1301 	0, 0, 0, 0, 0,
1302 	false, false, MAX_DBG_RESET_REGS, 0	};
1303 
1304 static struct block_defs block_dbg_defs = {
1305 	"dbg", { true, true, true }, false, 0,
1306 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1307 	0, 0, 0, 0, 0,
1308 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1309 
1310 static struct block_defs block_nig_defs = {
1311 	"nig", { true, true, true }, false, 0,
1312 	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1313 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1314 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1315 	NIG_REG_DBG_FORCE_FRAME,
1316 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1317 
1318 static struct block_defs block_wol_defs = {
1319 	"wol", { false, true, true }, false, 0,
1320 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1321 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1322 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1323 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1324 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1325 
1326 static struct block_defs block_bmbn_defs = {
1327 	"bmbn", { false, true, true }, false, 0,
1328 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
1329 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1330 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1331 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1332 	false, false, MAX_DBG_RESET_REGS, 0 };
1333 
1334 static struct block_defs block_ipc_defs = {
1335 	"ipc", { true, true, true }, false, 0,
1336 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1337 	0, 0, 0, 0, 0,
1338 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1339 
1340 static struct block_defs block_nwm_defs = {
1341 	"nwm", { false, true, true }, false, 0,
1342 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1343 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1344 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1345 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1346 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1347 
1348 static struct block_defs block_nws_defs = {
1349 	"nws", { false, true, true }, false, 0,
1350 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1351 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1352 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1353 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1354 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1355 
1356 static struct block_defs block_ms_defs = {
1357 	"ms", { false, true, true }, false, 0,
1358 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1359 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1360 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1361 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1362 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1363 
1364 static struct block_defs block_phy_pcie_defs = {
1365 	"phy_pcie", { false, true, true }, false, 0,
1366 	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
1367 	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1368 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1369 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1370 	false, false, MAX_DBG_RESET_REGS, 0 };
1371 
1372 static struct block_defs block_led_defs = {
1373 	"led", { false, true, true }, false, 0,
1374 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1375 	0, 0, 0, 0, 0,
1376 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1377 
1378 static struct block_defs block_avs_wrap_defs = {
1379 	"avs_wrap", { false, true, false }, false, 0,
1380 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1381 	0, 0, 0, 0, 0,
1382 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1383 
1384 static struct block_defs block_pxpreqbus_defs = {
1385 	"pxpreqbus", { false, false, false }, false, 0,
1386 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1387 	0, 0, 0, 0, 0,
1388 	false, false, MAX_DBG_RESET_REGS, 0 };
1389 
1390 static struct block_defs block_misc_aeu_defs = {
1391 	"misc_aeu", { true, true, true }, false, 0,
1392 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1393 	0, 0, 0, 0, 0,
1394 	false, false, MAX_DBG_RESET_REGS, 0 };
1395 
1396 static struct block_defs block_bar0_map_defs = {
1397 	"bar0_map", { true, true, true }, false, 0,
1398 	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1399 	0, 0, 0, 0, 0,
1400 	false, false, MAX_DBG_RESET_REGS, 0 };
1401 
1402 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1403 	&block_grc_defs,
1404  	&block_miscs_defs,
1405  	&block_misc_defs,
1406  	&block_dbu_defs,
1407  	&block_pglue_b_defs,
1408  	&block_cnig_defs,
1409  	&block_cpmu_defs,
1410  	&block_ncsi_defs,
1411  	&block_opte_defs,
1412  	&block_bmb_defs,
1413  	&block_pcie_defs,
1414  	&block_mcp_defs,
1415  	&block_mcp2_defs,
1416  	&block_pswhst_defs,
1417  	&block_pswhst2_defs,
1418  	&block_pswrd_defs,
1419  	&block_pswrd2_defs,
1420  	&block_pswwr_defs,
1421  	&block_pswwr2_defs,
1422  	&block_pswrq_defs,
1423  	&block_pswrq2_defs,
1424  	&block_pglcs_defs,
1425  	&block_dmae_defs,
1426  	&block_ptu_defs,
1427  	&block_tcm_defs,
1428  	&block_mcm_defs,
1429  	&block_ucm_defs,
1430  	&block_xcm_defs,
1431  	&block_ycm_defs,
1432  	&block_pcm_defs,
1433  	&block_qm_defs,
1434  	&block_tm_defs,
1435  	&block_dorq_defs,
1436  	&block_brb_defs,
1437  	&block_src_defs,
1438  	&block_prs_defs,
1439  	&block_tsdm_defs,
1440  	&block_msdm_defs,
1441  	&block_usdm_defs,
1442  	&block_xsdm_defs,
1443  	&block_ysdm_defs,
1444  	&block_psdm_defs,
1445  	&block_tsem_defs,
1446  	&block_msem_defs,
1447  	&block_usem_defs,
1448  	&block_xsem_defs,
1449  	&block_ysem_defs,
1450  	&block_psem_defs,
1451  	&block_rss_defs,
1452  	&block_tmld_defs,
1453  	&block_muld_defs,
1454  	&block_yuld_defs,
1455  	&block_xyld_defs,
1456  	&block_ptld_defs,
1457  	&block_ypld_defs,
1458  	&block_prm_defs,
1459  	&block_pbf_pb1_defs,
1460  	&block_pbf_pb2_defs,
1461  	&block_rpb_defs,
1462  	&block_btb_defs,
1463  	&block_pbf_defs,
1464  	&block_rdif_defs,
1465  	&block_tdif_defs,
1466  	&block_cdu_defs,
1467  	&block_ccfc_defs,
1468  	&block_tcfc_defs,
1469  	&block_igu_defs,
1470  	&block_cau_defs,
1471  	&block_rgfs_defs,
1472  	&block_rgsrc_defs,
1473  	&block_tgfs_defs,
1474  	&block_tgsrc_defs,
1475  	&block_umac_defs,
1476  	&block_xmac_defs,
1477  	&block_dbg_defs,
1478  	&block_nig_defs,
1479  	&block_wol_defs,
1480  	&block_bmbn_defs,
1481  	&block_ipc_defs,
1482  	&block_nwm_defs,
1483  	&block_nws_defs,
1484  	&block_ms_defs,
1485  	&block_phy_pcie_defs,
1486  	&block_led_defs,
1487  	&block_avs_wrap_defs,
1488  	&block_pxpreqbus_defs,
1489  	&block_misc_aeu_defs,
1490  	&block_bar0_map_defs,
1491 
1492 };
1493 
1494 /* Constraint operation types */
1495 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1496 	/* DBG_BUS_CONSTRAINT_OP_EQ */
1497 	{ 0, false },
1498 
1499 	/* DBG_BUS_CONSTRAINT_OP_NE */
1500 	{ 5, false },
1501 
1502 	/* DBG_BUS_CONSTRAINT_OP_LT */
1503 	{ 1, false },
1504 
1505 	/* DBG_BUS_CONSTRAINT_OP_LTC */
1506 	{ 1, true },
1507 
1508 	/* DBG_BUS_CONSTRAINT_OP_LE */
1509 	{ 2, false },
1510 
1511 	/* DBG_BUS_CONSTRAINT_OP_LEC */
1512 	{ 2, true },
1513 
1514 	/* DBG_BUS_CONSTRAINT_OP_GT */
1515 	{ 4, false },
1516 
1517 	/* DBG_BUS_CONSTRAINT_OP_GTC */
1518 	{ 4, true },
1519 
1520 	/* DBG_BUS_CONSTRAINT_OP_GE */
1521 	{ 3, false },
1522 
1523 	/* DBG_BUS_CONSTRAINT_OP_GEC */
1524 	{ 3, true }
1525 };
1526 
1527 static const char* s_dbg_target_names[] = {
1528 	/* DBG_BUS_TARGET_ID_INT_BUF */
1529 	"int-buf",
1530 
1531 	/* DBG_BUS_TARGET_ID_NIG */
1532 	"nw",
1533 
1534 	/* DBG_BUS_TARGET_ID_PCI */
1535 	"pci-buf"
1536 };
1537 
1538 static struct storm_mode_defs s_storm_mode_defs[] = {
1539 	/* DBG_BUS_STORM_MODE_PRINTF */
1540 	{ "printf", true, 0 },
1541 
1542 	/* DBG_BUS_STORM_MODE_PRAM_ADDR */
1543 	{ "pram_addr", true, 1 },
1544 
1545 	/* DBG_BUS_STORM_MODE_DRA_RW */
1546 	{ "dra_rw", true, 2 },
1547 
1548 	/* DBG_BUS_STORM_MODE_DRA_W */
1549 	{ "dra_w", true, 3 },
1550 
1551 	/* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1552 	{ "ld_st_addr", true, 4 },
1553 
1554 	/* DBG_BUS_STORM_MODE_DRA_FSM */
1555 	{ "dra_fsm", true, 5 },
1556 
1557 	/* DBG_BUS_STORM_MODE_RH */
1558 	{ "rh", true, 6 },
1559 
1560 	/* DBG_BUS_STORM_MODE_FOC */
1561 	{ "foc", false, 1 },
1562 
1563 	/* DBG_BUS_STORM_MODE_EXT_STORE */
1564 	{ "ext_store", false, 3 }
1565 };
1566 
1567 static struct platform_defs s_platform_defs[] = {
1568 	/* PLATFORM_ASIC */
1569 	{ "asic", 1, 256, 32768 },
1570 
1571 	/* PLATFORM_EMUL_FULL */
1572 	{ "emul_full", 2000, 8, 4096 },
1573 
1574 	/* PLATFORM_EMUL_REDUCED */
1575 	{ "emul_reduced", 2000, 8, 4096 },
1576 
1577 	/* PLATFORM_FPGA */
1578 	{ "fpga", 200, 32, 8192 }
1579 };
1580 
1581 static struct grc_param_defs s_grc_param_defs[] = {
1582 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1583 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1584 
1585 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1586 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1587 
1588 	/* DBG_GRC_PARAM_DUMP_USTORM */
1589 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1590 
1591 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1592 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1593 
1594 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1595 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1596 
1597 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1598 	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1599 
1600 	/* DBG_GRC_PARAM_DUMP_REGS */
1601 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1602 
1603 	/* DBG_GRC_PARAM_DUMP_RAM */
1604 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1605 
1606 	/* DBG_GRC_PARAM_DUMP_PBUF */
1607 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1608 
1609 	/* DBG_GRC_PARAM_DUMP_IOR */
1610 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1611 
1612 	/* DBG_GRC_PARAM_DUMP_VFC */
1613 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1614 
1615 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1616 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1617 
1618 	/* DBG_GRC_PARAM_DUMP_ILT */
1619 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1620 
1621 	/* DBG_GRC_PARAM_DUMP_RSS */
1622 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1623 
1624 	/* DBG_GRC_PARAM_DUMP_CAU */
1625 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1626 
1627 	/* DBG_GRC_PARAM_DUMP_QM */
1628 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1629 
1630 	/* DBG_GRC_PARAM_DUMP_MCP */
1631 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1632 
1633 	/* DBG_GRC_PARAM_RESERVED */
1634 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1635 
1636 	/* DBG_GRC_PARAM_DUMP_CFC */
1637 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1638 
1639 	/* DBG_GRC_PARAM_DUMP_IGU */
1640 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1641 
1642 	/* DBG_GRC_PARAM_DUMP_BRB */
1643 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1644 
1645 	/* DBG_GRC_PARAM_DUMP_BTB */
1646 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1647 
1648 	/* DBG_GRC_PARAM_DUMP_BMB */
1649 	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1650 
1651 	/* DBG_GRC_PARAM_DUMP_NIG */
1652 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1653 
1654 	/* DBG_GRC_PARAM_DUMP_MULD */
1655 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1656 
1657 	/* DBG_GRC_PARAM_DUMP_PRS */
1658 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1659 
1660 	/* DBG_GRC_PARAM_DUMP_DMAE */
1661 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1662 
1663 	/* DBG_GRC_PARAM_DUMP_TM */
1664 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1665 
1666 	/* DBG_GRC_PARAM_DUMP_SDM */
1667 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1668 
1669 	/* DBG_GRC_PARAM_DUMP_DIF */
1670 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1671 
1672 	/* DBG_GRC_PARAM_DUMP_STATIC */
1673 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1674 
1675 	/* DBG_GRC_PARAM_UNSTALL */
1676 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1677 
1678 	/* DBG_GRC_PARAM_NUM_LCIDS */
1679 	{ { MAX_LCIDS, MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1680 
1681 	/* DBG_GRC_PARAM_NUM_LTIDS */
1682 	{ { MAX_LTIDS, MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1683 
1684 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1685 	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1686 
1687 	/* DBG_GRC_PARAM_CRASH */
1688 	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1689 
1690 	/* DBG_GRC_PARAM_PARITY_SAFE */
1691 	{ { 0, 0, 0 }, 0, 1, false, 1, 0 },
1692 
1693 	/* DBG_GRC_PARAM_DUMP_CM */
1694 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1695 
1696 	/* DBG_GRC_PARAM_DUMP_PHY */
1697 	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1698 
1699 	/* DBG_GRC_PARAM_NO_MCP */
1700 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1701 
1702 	/* DBG_GRC_PARAM_NO_FW_VER */
1703 	{ { 0, 0, 0 }, 0, 1, false, 0, 0 }
1704 };
1705 
1706 static struct rss_mem_defs s_rss_mem_defs[] = {
1707 	{ "rss_mem_cid", "rss_cid", 0, 32,
1708 	{ 256, 320, 512 } },
1709 
1710 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1711 	{ 128, 208, 257 } },
1712 
1713 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1714 	{ 128, 208, 257 } },
1715 
1716 	{ "rss_mem_info", "rss_info", 3072, 16,
1717 	{ 128, 208, 256 } },
1718 
1719 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1720 	{ 16384, 26624, 32768 } }
1721 };
1722 
1723 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1724 	{ "vfc_ram_tt1", "vfc_ram", 0, 512 },
1725 	{ "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1726 	{ "vfc_ram_stt2", "vfc_ram", 640, 32 },
1727 	{ "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1728 };
1729 
1730 static struct big_ram_defs s_big_ram_defs[] = {
1731 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1732 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 0, 0 },
1733 	  { 153600, 180224, 282624 } },
1734 
1735 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1736 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 1, 1 },
1737 	  { 92160, 117760, 168960 } },
1738 
1739 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1740 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, MISCS_REG_BLOCK_256B_EN, { 0, 0, 0 },
1741 	  { 36864, 36864, 36864 } }
1742 };
1743 
1744 static struct reset_reg_defs s_reset_regs_defs[] = {
1745 	/* DBG_RESET_REG_MISCS_PL_UA */
1746 	{ MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1747 
1748 	/* DBG_RESET_REG_MISCS_PL_HV */
1749 	{ MISCS_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x400, 0x600 } },
1750 
1751 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1752 	{ MISCS_REG_RESET_PL_HV_2_K2_E5, { false, true, true }, { 0x0, 0x0, 0x0 } },
1753 
1754 	/* DBG_RESET_REG_MISC_PL_UA */
1755 	{ MISC_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1756 
1757 	/* DBG_RESET_REG_MISC_PL_HV */
1758 	{ MISC_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x0, 0x0 } },
1759 
1760 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1761 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, { true, true, true }, { 0x4404040, 0x4404040, 0x404040 } },
1762 
1763 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1764 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, { true, true, true }, { 0x7, 0x7c00007, 0x5c08007 } },
1765 
1766 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1767 	{ MISC_REG_RESET_PL_PDA_VAUX, { true, true, true }, { 0x2, 0x2, 0x2 } },
1768 };
1769 
1770 static struct phy_defs s_phy_defs[] = {
1771 	{ "nw_phy", NWS_REG_NWS_CMU_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1772 	{ "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1773 	{ "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1774 	{ "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1775 };
1776 
1777 /* The order of indexes that should be applied to a PCI buffer line */
1778 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1779 
1780 /******************************** Variables **********************************/
1781 
1782 /* The version of the calling app */
1783 static u32 s_app_ver;
1784 
1785 /**************************** Private Functions ******************************/
1786 
ecore_static_asserts(void)1787 static void ecore_static_asserts(void)
1788 {
1789 	CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1790 	CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1791 	CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1792 	CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1793 	CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1794 	CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1795 	CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1796 	CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1797 	CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1798 	CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1799 	CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1800 	CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1801 }
1802 
1803 /* Reads and returns a single dword from the specified unaligned buffer. */
ecore_read_unaligned_dword(u8 * buf)1804 static u32 ecore_read_unaligned_dword(u8 *buf)
1805 {
1806 	u32 dword;
1807 
1808 	OSAL_MEMCPY((u8 *)&dword, buf, sizeof(dword));
1809 	return dword;
1810 }
1811 
1812 /* Returns the difference in bytes between the specified physical addresses.
1813  * Assumes that the first address is bigger then the second, and that the
1814  * difference is a 32-bit value.
1815  */
ecore_phys_addr_diff(struct dbg_bus_mem_addr * a,struct dbg_bus_mem_addr * b)1816 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1817 								struct dbg_bus_mem_addr *b)
1818 {
1819 	return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1820 }
1821 
1822 /* Sets the value of the specified GRC param */
ecore_grc_set_param(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)1823 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1824 				 enum dbg_grc_params grc_param,
1825 				 u32 val)
1826 {
1827 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1828 
1829 	dev_data->grc.param_val[grc_param] = val;
1830 }
1831 
1832 /* Returns the value of the specified GRC param */
ecore_grc_get_param(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param)1833 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1834 							   enum dbg_grc_params grc_param)
1835 {
1836 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1837 
1838 	return dev_data->grc.param_val[grc_param];
1839 }
1840 
1841 /* Initializes the GRC parameters */
ecore_dbg_grc_init_params(struct ecore_hwfn * p_hwfn)1842 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1843 {
1844 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1845 
1846 	if (!dev_data->grc.params_initialized) {
1847 		ecore_dbg_grc_set_params_default(p_hwfn);
1848 		dev_data->grc.params_initialized = 1;
1849 	}
1850 }
1851 
1852 /* Initializes debug data for the specified device */
ecore_dbg_dev_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1853 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1854 										  struct ecore_ptt *p_ptt)
1855 {
1856 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1857 
1858 	if (dev_data->initialized)
1859 		return DBG_STATUS_OK;
1860 
1861 	if (!s_app_ver)
1862 		return DBG_STATUS_APP_VERSION_NOT_SET;
1863 
1864 	if (ECORE_IS_E5(p_hwfn->p_dev)) {
1865 		dev_data->chip_id = CHIP_E5;
1866 		dev_data->mode_enable[MODE_E5] = 1;
1867 	}
1868 	else if (ECORE_IS_K2(p_hwfn->p_dev)) {
1869 		dev_data->chip_id = CHIP_K2;
1870 		dev_data->mode_enable[MODE_K2] = 1;
1871 	}
1872 	else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1873 		dev_data->chip_id = CHIP_BB;
1874 		dev_data->mode_enable[MODE_BB] = 1;
1875 	}
1876 	else {
1877 		return DBG_STATUS_UNKNOWN_CHIP;
1878 	}
1879 
1880 #ifdef ASIC_ONLY
1881 	dev_data->platform_id = PLATFORM_ASIC;
1882 	dev_data->mode_enable[MODE_ASIC] = 1;
1883 #else
1884 	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1885 		dev_data->platform_id = PLATFORM_ASIC;
1886 		dev_data->mode_enable[MODE_ASIC] = 1;
1887 	}
1888 	else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1889 		if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1890 			dev_data->platform_id = PLATFORM_EMUL_FULL;
1891 			dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1892 		}
1893 		else {
1894 			dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1895 			dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1896 		}
1897 	}
1898 	else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1899 		dev_data->platform_id = PLATFORM_FPGA;
1900 		dev_data->mode_enable[MODE_FPGA] = 1;
1901 	}
1902 	else {
1903 		return DBG_STATUS_UNKNOWN_CHIP;
1904 	}
1905 #endif
1906 
1907 	/* Initializes the GRC parameters */
1908 	ecore_dbg_grc_init_params(p_hwfn);
1909 
1910 	dev_data->use_dmae = USE_DMAE;
1911 	dev_data->num_regs_read = 0;
1912 	dev_data->initialized = 1;
1913 
1914 	return DBG_STATUS_OK;
1915 }
1916 
get_dbg_bus_block_desc(struct ecore_hwfn * p_hwfn,enum block_id block_id)1917 static const struct dbg_bus_block *get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1918 														  enum block_id block_id)
1919 {
1920 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1921 
1922 	return (const struct dbg_bus_block *)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1923 }
1924 
1925 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
get_dbg_bus_line_desc(struct ecore_hwfn * p_hwfn,enum block_id block_id)1926 static const struct dbg_bus_line *get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1927 														enum block_id block_id)
1928 {
1929 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1930 	struct dbg_bus_block_data *block_bus;
1931 	const struct dbg_bus_block *block_desc;
1932 	u32 index;
1933 
1934 	block_bus = &dev_data->bus.blocks[block_id];
1935 	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1936 
1937 	if (!block_bus->line_num ||
1938 		(block_bus->line_num == 1 && block_desc->has_latency_events) ||
1939 		block_bus->line_num >= NUM_DBG_LINES(block_desc))
1940 		return OSAL_NULL;
1941 
1942 	index = block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc);
1943 
1944 	return (const struct dbg_bus_line *)&dbg_bus_lines[index];
1945 }
1946 
1947 /* Reads the FW info structure for the specified Storm from the chip,
1948  * and writes it to the specified fw_info pointer.
1949  */
ecore_read_fw_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)1950 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1951 							   struct ecore_ptt *p_ptt,
1952 							   u8 storm_id,
1953 							   struct fw_info *fw_info)
1954 {
1955 	struct storm_defs *storm = &s_storm_defs[storm_id];
1956 	struct fw_info_location fw_info_location;
1957 	u32 addr, i, *dest;
1958 
1959 	OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1960 	OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1961 
1962 	/* Read first the address that points to fw_info location.
1963 	 * The address is located in the last line of the Storm RAM.
1964 	 */
1965 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1966 		(ECORE_IS_E5(p_hwfn->p_dev) ?
1967 			DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_E5) :
1968 			DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2))
1969 		- sizeof(fw_info_location);
1970 
1971 	dest = (u32 *)&fw_info_location;
1972 
1973 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1974 		dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1975 
1976 	/* Read FW version info from Storm RAM */
1977 	if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1978 		addr = fw_info_location.grc_addr;
1979 		dest = (u32 *)fw_info;
1980 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1981 			dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1982 	}
1983 }
1984 
1985 /* Dumps the specified string to the specified buffer.
1986  * Returns the dumped size in bytes.
1987  */
ecore_dump_str(char * dump_buf,bool dump,const char * str)1988 static u32 ecore_dump_str(char *dump_buf,
1989 						  bool dump,
1990 						  const char *str)
1991 {
1992 	if (dump)
1993 		OSAL_STRCPY(dump_buf, str);
1994 
1995 	return (u32)OSAL_STRLEN(str) + 1;
1996 }
1997 
1998 /* Dumps zeros to align the specified buffer to dwords.
1999  * Returns the dumped size in bytes.
2000  */
ecore_dump_align(char * dump_buf,bool dump,u32 byte_offset)2001 static u32 ecore_dump_align(char *dump_buf,
2002 							bool dump,
2003 							u32 byte_offset)
2004 {
2005 	u8 offset_in_dword, align_size;
2006 
2007 	offset_in_dword = (u8)(byte_offset & 0x3);
2008 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
2009 
2010 	if (dump && align_size)
2011 		OSAL_MEMSET(dump_buf, 0, align_size);
2012 
2013 	return align_size;
2014 }
2015 
2016 /* Writes the specified string param to the specified buffer.
2017  * Returns the dumped size in dwords.
2018  */
ecore_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)2019 static u32 ecore_dump_str_param(u32 *dump_buf,
2020 								bool dump,
2021 								const char *param_name,
2022 								const char *param_val)
2023 {
2024 	char *char_buf = (char *)dump_buf;
2025 	u32 offset = 0;
2026 
2027 	/* Dump param name */
2028 	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2029 
2030 	/* Indicate a string param value */
2031 	if (dump)
2032 		*(char_buf + offset) = 1;
2033 	offset++;
2034 
2035 	/* Dump param value */
2036 	offset += ecore_dump_str(char_buf + offset, dump, param_val);
2037 
2038 	/* Align buffer to next dword */
2039 	offset += ecore_dump_align(char_buf + offset, dump, offset);
2040 
2041 	return BYTES_TO_DWORDS(offset);
2042 }
2043 
2044 /* Writes the specified numeric param to the specified buffer.
2045  * Returns the dumped size in dwords.
2046  */
ecore_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)2047 static u32 ecore_dump_num_param(u32 *dump_buf,
2048 								bool dump,
2049 								const char *param_name,
2050 								u32 param_val)
2051 {
2052 	char *char_buf = (char *)dump_buf;
2053 	u32 offset = 0;
2054 
2055 	/* Dump param name */
2056 	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2057 
2058 	/* Indicate a numeric param value */
2059 	if (dump)
2060 		*(char_buf + offset) = 0;
2061 	offset++;
2062 
2063 	/* Align buffer to next dword */
2064 	offset += ecore_dump_align(char_buf + offset, dump, offset);
2065 
2066 	/* Dump param value (and change offset from bytes to dwords) */
2067 	offset = BYTES_TO_DWORDS(offset);
2068 	if (dump)
2069 		*(dump_buf + offset) = param_val;
2070 	offset++;
2071 
2072 	return offset;
2073 }
2074 
2075 /* Reads the FW version and writes it as a param to the specified buffer.
2076  * Returns the dumped size in dwords.
2077  */
ecore_dump_fw_ver_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2078 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2079 								   struct ecore_ptt *p_ptt,
2080 								   u32 *dump_buf,
2081 								   bool dump)
2082 {
2083 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2084 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2085 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2086 	struct fw_info fw_info = { { 0 }, { 0 } };
2087 	u32 offset = 0;
2088 
2089 	if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2090 		/* Read FW image/version from PRAM in a non-reset SEMI */
2091 		bool found = false;
2092 		u8 storm_id;
2093 
2094 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2095 			struct storm_defs *storm = &s_storm_defs[storm_id];
2096 
2097 			/* Read FW version/image */
2098 			if (dev_data->block_in_reset[storm->block_id])
2099 				continue;
2100 
2101 			/* Read FW info for the current Storm */
2102 			ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2103 
2104 			/* Create FW version/image strings */
2105 			if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2106 				DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2107 			switch (fw_info.ver.image_id) {
2108 			case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2109 			case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2110 			case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2111 			default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2112 			}
2113 
2114 			found = true;
2115 		}
2116 	}
2117 
2118 	/* Dump FW version, image and timestamp */
2119 	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2120 	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2121 	offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2122 
2123 	return offset;
2124 }
2125 
2126 /* Reads the MFW version and writes it as a param to the specified buffer.
2127  * Returns the dumped size in dwords.
2128  */
ecore_dump_mfw_ver_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2129 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2130 									struct ecore_ptt *p_ptt,
2131 									u32 *dump_buf,
2132 									bool dump)
2133 {
2134 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2135 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2136 
2137 	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2138 		u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2139 
2140 		/* Find MCP public data GRC address. Needs to be ORed with
2141 		 * MCP_REG_SCRATCH due to a HW bug.
2142 		 */
2143 		public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2144 
2145 		/* Find MCP public global section offset */
2146 		global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2147 		global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2148 		global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2149 
2150 		/* Read MFW version from MCP public global section */
2151 		mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2152 
2153 		/* Dump MFW version param */
2154 		if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2155 			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2156 	}
2157 
2158 	return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2159 }
2160 
2161 /* Writes a section header to the specified buffer.
2162  * Returns the dumped size in dwords.
2163  */
ecore_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)2164 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2165 								  bool dump,
2166 								  const char *name,
2167 								  u32 num_params)
2168 {
2169 	return ecore_dump_num_param(dump_buf, dump, name, num_params);
2170 }
2171 
2172 /* Writes the common global params to the specified buffer.
2173  * Returns the dumped size in dwords.
2174  */
ecore_dump_common_global_params(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)2175 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2176 										   struct ecore_ptt *p_ptt,
2177 										   u32 *dump_buf,
2178 										   bool dump,
2179 										   u8 num_specific_global_params)
2180 {
2181 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2182 	u32 offset = 0;
2183 	u8 num_params;
2184 
2185 	/* Dump global params section header */
2186 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2187 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2188 
2189 	/* Store params */
2190 	offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2191 	offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2192 	offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2193 	offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2194 	offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2195 	offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2196 
2197 	return offset;
2198 }
2199 
2200 /* Writes the "last" section (including CRC) to the specified buffer at the
2201  * given offset. Returns the dumped size in dwords.
2202  */
ecore_dump_last_section(u32 * dump_buf,u32 offset,bool dump)2203 static u32 ecore_dump_last_section(u32 *dump_buf,
2204 								   u32 offset,
2205 								   bool dump)
2206 {
2207 	u32 start_offset = offset;
2208 
2209 	/* Dump CRC section header */
2210 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2211 
2212 	/* Calculate CRC32 and add it to the dword after the "last" section */
2213 	if (dump)
2214 		*(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8 *)dump_buf, DWORDS_TO_BYTES(offset));
2215 
2216 	offset++;
2217 
2218 	return offset - start_offset;
2219 }
2220 
2221 /* Update blocks reset state  */
ecore_update_blocks_reset_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2222 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2223 											struct ecore_ptt *p_ptt)
2224 {
2225 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2226 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2227 	u32 i;
2228 
2229 	/* Read reset registers */
2230 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2231 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2232 			reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2233 
2234 	/* Check if blocks are in reset */
2235 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2236 		struct block_defs *block = s_block_defs[i];
2237 
2238 		dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2239 	}
2240 }
2241 
2242 /* Enable / disable the Debug block */
ecore_bus_enable_dbg_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool enable)2243 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2244 									   struct ecore_ptt *p_ptt,
2245 									   bool enable)
2246 {
2247 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2248 }
2249 
2250 /* Resets the Debug block */
ecore_bus_reset_dbg_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2251 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2252 									  struct ecore_ptt *p_ptt)
2253 {
2254 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2255 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2256 
2257 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2258 	old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2259 	new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2260 
2261 	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2262 	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2263 }
2264 
ecore_bus_set_framing_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum dbg_bus_frame_modes mode)2265 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2266 									   struct ecore_ptt *p_ptt,
2267 									   enum dbg_bus_frame_modes mode)
2268 {
2269 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2270 }
2271 
2272 /* Enable / disable Debug Bus clients according to the specified mask
2273  * (1 = enable, 0 = disable).
2274  */
ecore_bus_enable_clients(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 client_mask)2275 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2276 									 struct ecore_ptt *p_ptt,
2277 									 u32 client_mask)
2278 {
2279 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2280 }
2281 
2282 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
ecore_bus_enable_storm(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum dbg_storms storm_id)2283 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2284 								   struct ecore_ptt *p_ptt,
2285 								   enum dbg_storms storm_id)
2286 {
2287 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2288 	u32 base_addr, sem_filter_params = 0;
2289 	struct dbg_bus_storm_data *storm_bus;
2290 	struct storm_mode_defs *storm_mode;
2291 	struct storm_defs *storm;
2292 
2293 	storm = &s_storm_defs[storm_id];
2294 	storm_bus = &dev_data->bus.storms[storm_id];
2295 	storm_mode = &s_storm_mode_defs[storm_bus->mode];
2296 	base_addr = storm->sem_fast_mem_addr;
2297 
2298 	/* Config SEM */
2299 	if (storm_mode->is_fast_dbg) {
2300 		/* Enable fast debug */
2301 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2302 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2303 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2304 
2305 		/* Enable messages. Must be done after enabling
2306 		 * SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2307 		 * be dropped after the SEMI sync fifo is filled.
2308 		 */
2309 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_ENABLE_VAL);
2310 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_ENABLE_VAL);
2311 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE_VAL);
2312 	}
2313 	else {
2314 		/* Enable slow debug */
2315 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2316 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2317 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2318 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2319 	}
2320 
2321 	/* Config SEM cid filter */
2322 	if (storm_bus->cid_filter_en) {
2323 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2324 		sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2325 	}
2326 
2327 	/* Config SEM eid filter */
2328 	if (storm_bus->eid_filter_en) {
2329 		const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2330 
2331 		if (storm_bus->eid_range_not_mask) {
2332 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2333 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2334 			sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2335 		}
2336 		else {
2337 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2338 			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2339 			sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2340 		}
2341 	}
2342 
2343 	/* Config accumulaed SEM filter parameters (if any) */
2344 	if (sem_filter_params)
2345 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2346 }
2347 
2348 /* Disables Debug Bus block inputs */
ecore_bus_disable_inputs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool empty_semi_fifos)2349 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2350 												struct ecore_ptt *p_ptt,
2351 												bool empty_semi_fifos)
2352 {
2353 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2354 	u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2355 	bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2356 	u32 block_id;
2357 
2358 	/* Disable messages output in all Storms */
2359 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2360 		struct storm_defs *storm = &s_storm_defs[storm_id];
2361 
2362 		if (dev_data->block_in_reset[storm->block_id])
2363 			continue;
2364 
2365 		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_DISABLE_VAL);
2366 		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_DISABLE_VAL);
2367 		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE_VAL);
2368 	}
2369 
2370 	/* Try to empty the SEMI sync fifo. Must be done after messages output
2371 	 * were disabled in all Storms.
2372 	 */
2373 	while (num_fifos_to_empty) {
2374 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2375 			struct storm_defs *storm = &s_storm_defs[storm_id];
2376 
2377 			if (is_fifo_empty[storm_id])
2378 				continue;
2379 
2380 			/* Check if sync fifo got empty */
2381 			if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2382 				is_fifo_empty[storm_id] = true;
2383 				num_fifos_to_empty--;
2384 			}
2385 		}
2386 
2387 		/* Check if need to continue polling */
2388 		if (num_fifos_to_empty) {
2389 			u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2390 			u32 polling_count = 0;
2391 
2392 			if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2393 				OSAL_MSLEEP(polling_ms);
2394 				polling_count++;
2395 			}
2396 			else {
2397 				DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2398 				break;
2399 			}
2400 		}
2401 	}
2402 
2403 	/* Disable debug in all Storms */
2404 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2405 		struct storm_defs *storm = &s_storm_defs[storm_id];
2406 		u32 base_addr = storm->sem_fast_mem_addr;
2407 
2408 		if (dev_data->block_in_reset[storm->block_id])
2409 			continue;
2410 
2411 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2412 		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2413 		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2414 		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2415 	}
2416 
2417 	/* Disable all clients */
2418 	ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2419 
2420 	/* Disable all blocks */
2421 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2422 		struct block_defs *block = s_block_defs[block_id];
2423 
2424 		if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS && !dev_data->block_in_reset[block_id])
2425 			ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2426 	}
2427 
2428 	/* Disable timestamp */
2429 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2430 
2431 	/* Disable filters and triggers */
2432 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2433 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2434 
2435 	return DBG_STATUS_OK;
2436 }
2437 
2438 /* Sets a Debug Bus trigger/filter constraint */
ecore_bus_set_constraint(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool is_filter,u8 constraint_id,u8 hw_op_val,u32 data_val,u32 data_mask,u8 frame_bit,u8 frame_mask,u16 dword_offset,u16 range,u8 cyclic_bit,u8 must_bit)2439 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2440 									 struct ecore_ptt *p_ptt,
2441 									 bool is_filter,
2442 									 u8 constraint_id,
2443 									 u8 hw_op_val,
2444 									 u32 data_val,
2445 									 u32 data_mask,
2446 									 u8 frame_bit,
2447 									 u8 frame_mask,
2448 									 u16 dword_offset,
2449 									 u16 range,
2450 									 u8 cyclic_bit,
2451 									 u8 must_bit)
2452 {
2453 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2454 	u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2455 	u8 curr_trigger_state;
2456 
2457 	/* For trigger only - set register offset according to state */
2458 	if (!is_filter) {
2459 		curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2460 		reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2461 	}
2462 
2463 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2464 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2465 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2466 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2467 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2468 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2469 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2470 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2471 	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2472 }
2473 
2474 /* Reads the specified DBG Bus internal buffer range and copy it to the
2475  * specified buffer. Returns the dumped size in dwords.
2476  */
ecore_bus_dump_int_buf_range(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 start_line,u32 end_line)2477 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2478 										struct ecore_ptt *p_ptt,
2479 										u32 *dump_buf,
2480 										bool dump,
2481 										u32 start_line,
2482 										u32 end_line)
2483 {
2484 	u32 line, reg_addr, i, offset = 0;
2485 
2486 	if (!dump)
2487 		return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2488 
2489 	for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2490 		line <= end_line;
2491 		line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2492 		for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2493 			dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2494 
2495 	return offset;
2496 }
2497 
2498 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2499  * Returns the dumped size in dwords.
2500  */
ecore_bus_dump_int_buf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2501 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2502 								  struct ecore_ptt *p_ptt,
2503 								  u32 *dump_buf,
2504 								  bool dump)
2505 {
2506 	u32 last_written_line, offset = 0;
2507 
2508 	last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2509 
2510 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2511 		/* Internal buffer was wrapped: first dump from write pointer
2512 		 * to buffer end, then dump from buffer start to write pointer.
2513 		 */
2514 		if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2515 			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2516 		offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2517 	}
2518 	else if (last_written_line) {
2519 		/* Internal buffer wasn't wrapped: dump from buffer start until
2520 		 *  write pointer.
2521 		 */
2522 		if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2523 			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2524 		else
2525 			DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2526 	}
2527 
2528 	return offset;
2529 }
2530 
2531 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2532  * buffer. Returns the dumped size in dwords.
2533  */
ecore_bus_dump_pci_buf_range(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump,u32 start_line,u32 end_line)2534 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2535 										u32 *dump_buf,
2536 										bool dump,
2537 										u32 start_line,
2538 										u32 end_line)
2539 {
2540 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2541 	u32 offset = 0;
2542 
2543 	/* Extract PCI buffer pointer from virtual address */
2544 	void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2545 	u32 *pci_buf_start = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2546 	u32 *pci_buf, line, i;
2547 
2548 	if (!dump)
2549 		return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2550 
2551 	for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2552 	line <= end_line;
2553 		line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2554 		for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2555 			dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2556 
2557 	return offset;
2558 }
2559 
2560 /* Copies the DBG Bus PCI buffer to the specified buffer.
2561  * Returns the dumped size in dwords.
2562  */
ecore_bus_dump_pci_buf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2563 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2564 								  struct ecore_ptt *p_ptt,
2565 								  u32 *dump_buf,
2566 								  bool dump)
2567 {
2568 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2569 	u32 next_wr_byte_offset, next_wr_line_offset;
2570 	struct dbg_bus_mem_addr next_wr_phys_addr;
2571 	u32 pci_buf_size_in_lines, offset = 0;
2572 
2573 	pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2574 
2575 	/* Extract write pointer (physical address) */
2576 	next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2577 	next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2578 
2579 	/* Convert write pointer to offset */
2580 	next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2581 	if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2582 		return 0;
2583 	next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2584 
2585 	/* PCI buffer wrapped: first dump from write pointer to buffer end. */
2586 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2587 		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2588 
2589 	/* Dump from buffer start until write pointer */
2590 	if (next_wr_line_offset)
2591 		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2592 
2593 	return offset;
2594 }
2595 
2596 /* Copies the DBG Bus recorded data to the specified buffer.
2597  * Returns the dumped size in dwords.
2598  */
ecore_bus_dump_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2599 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2600 							   struct ecore_ptt *p_ptt,
2601 							   u32 *dump_buf,
2602 							   bool dump)
2603 {
2604 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2605 
2606 	switch (dev_data->bus.target) {
2607 	case DBG_BUS_TARGET_ID_INT_BUF:
2608 		return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2609 	case DBG_BUS_TARGET_ID_PCI:
2610 		return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2611 	default:
2612 		break;
2613 	}
2614 
2615 	return 0;
2616 }
2617 
2618 /* Frees the Debug Bus PCI buffer */
ecore_bus_free_pci_buf(struct ecore_hwfn * p_hwfn)2619 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2620 {
2621 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2622 	dma_addr_t pci_buf_phys_addr;
2623 	void *virt_addr_lo;
2624 	u32 *pci_buf;
2625 
2626 	/* Extract PCI buffer pointer from virtual address */
2627 	virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2628 	pci_buf = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2629 
2630 	if (!dev_data->bus.pci_buf.size)
2631 		return;
2632 
2633 	OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2634 
2635 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2636 
2637 	dev_data->bus.pci_buf.size = 0;
2638 }
2639 
2640 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2641  * Returns the dumped size in dwords.
2642  */
ecore_bus_dump_inputs(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump)2643 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2644 								 u32 *dump_buf,
2645 								 bool dump)
2646 {
2647 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2648 	char storm_name[8] = "?storm";
2649 	u32 block_id, offset = 0;
2650 	u8 storm_id;
2651 
2652 	/* Store storms */
2653 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2654 		struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2655 		struct storm_defs *storm = &s_storm_defs[storm_id];
2656 
2657 		if (!dev_data->bus.storms[storm_id].enabled)
2658 			continue;
2659 
2660 		/* Dump section header */
2661 		storm_name[0] = storm->letter;
2662 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2663 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2664 		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2665 		offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2666 	}
2667 
2668 	/* Store blocks */
2669 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2670 		struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2671 		struct block_defs *block = s_block_defs[block_id];
2672 
2673 		if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2674 			continue;
2675 
2676 		/* Dump section header */
2677 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2678 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2679 		offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2680 		offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2681 		offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2682 	}
2683 
2684 	return offset;
2685 }
2686 
2687 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2688  * buffer. Returns the dumped size in dwords.
2689  */
ecore_bus_dump_hdr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2690 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2691 							  struct ecore_ptt *p_ptt,
2692 							  u32 *dump_buf,
2693 							  bool dump)
2694 {
2695 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2696 	char hw_id_mask_str[16];
2697 	u32 offset = 0;
2698 
2699 	if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2700 		DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2701 
2702 	/* Dump global params */
2703 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2704 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2705 	offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2706 	offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2707 	offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2708 	offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2709 
2710 	offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2711 
2712 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2713 		u32 recorded_dwords = 0;
2714 
2715 		if (dump)
2716 			recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2717 
2718 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2719 		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2720 	}
2721 
2722 	return offset;
2723 }
2724 
ecore_is_mode_match(struct ecore_hwfn * p_hwfn,u16 * modes_buf_offset)2725 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2726 								u16 *modes_buf_offset)
2727 {
2728 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2729 	bool arg1, arg2;
2730 	u8 tree_val;
2731 
2732 	/* Get next element from modes tree buffer */
2733 	tree_val = ((const u8 *)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2734 
2735 	switch (tree_val) {
2736 	case INIT_MODE_OP_NOT:
2737 		return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2738 	case INIT_MODE_OP_OR:
2739 	case INIT_MODE_OP_AND:
2740 		arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2741 		arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2742 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2743 	default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2744 	}
2745 }
2746 
2747 /* Returns true if the specified entity (indicated by GRC param) should be
2748  * included in the dump, false otherwise.
2749  */
ecore_grc_is_included(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param)2750 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2751 								  enum dbg_grc_params grc_param)
2752 {
2753 	return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2754 }
2755 
2756 /* Returns true of the specified Storm should be included in the dump, false
2757  * otherwise.
2758  */
ecore_grc_is_storm_included(struct ecore_hwfn * p_hwfn,enum dbg_storms storm)2759 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2760 										enum dbg_storms storm)
2761 {
2762 	return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2763 }
2764 
2765 /* Returns true if the specified memory should be included in the dump, false
2766  * otherwise.
2767  */
ecore_grc_is_mem_included(struct ecore_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)2768 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2769 									  enum block_id block_id,
2770 									  u8 mem_group_id)
2771 {
2772 	struct block_defs *block = s_block_defs[block_id];
2773 	u8 i;
2774 
2775 	/* Check Storm match */
2776 	if (block->associated_to_storm &&
2777 		!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2778 		return false;
2779 
2780 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2781 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2782 
2783 		if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2784 			return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2785 	}
2786 
2787 	switch (mem_group_id) {
2788 	case MEM_GROUP_PXP_ILT:
2789 	case MEM_GROUP_PXP_MEM:
2790 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2791 	case MEM_GROUP_RAM:
2792 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2793 	case MEM_GROUP_PBUF:
2794 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2795 	case MEM_GROUP_CAU_MEM:
2796 	case MEM_GROUP_CAU_SB:
2797 	case MEM_GROUP_CAU_PI:
2798 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2799 	case MEM_GROUP_QM_MEM:
2800 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2801 	case MEM_GROUP_CFC_MEM:
2802 	case MEM_GROUP_CONN_CFC_MEM:
2803 	case MEM_GROUP_TASK_CFC_MEM:
2804 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2805 	case MEM_GROUP_IGU_MEM:
2806 	case MEM_GROUP_IGU_MSIX:
2807 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2808 	case MEM_GROUP_MULD_MEM:
2809 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2810 	case MEM_GROUP_PRS_MEM:
2811 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2812 	case MEM_GROUP_DMAE_MEM:
2813 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2814 	case MEM_GROUP_TM_MEM:
2815 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2816 	case MEM_GROUP_SDM_MEM:
2817 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2818 	case MEM_GROUP_TDIF_CTX:
2819 	case MEM_GROUP_RDIF_CTX:
2820 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2821 	case MEM_GROUP_CM_MEM:
2822 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2823 	case MEM_GROUP_IOR:
2824 		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2825 	default:
2826 		return true;
2827 	}
2828 }
2829 
2830 /* Stalls all Storms */
ecore_grc_stall_storms(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool stall)2831 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2832 								   struct ecore_ptt *p_ptt,
2833 								   bool stall)
2834 {
2835 	u32 reg_addr;
2836 	u8 storm_id;
2837 
2838 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2839 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2840 			continue;
2841 
2842 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2843 		ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2844 	}
2845 
2846 	OSAL_MSLEEP(STALL_DELAY_MS);
2847 }
2848 
2849 /* Takes all blocks out of reset */
ecore_grc_unreset_blocks(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2850 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2851 									 struct ecore_ptt *p_ptt)
2852 {
2853 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2854 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2855 	u32 block_id, i;
2856 
2857 	/* Fill reset regs values */
2858 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2859 		struct block_defs *block = s_block_defs[block_id];
2860 
2861 		if (block->exists[dev_data->chip_id] && block->has_reset_bit && block->unreset)
2862 			reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2863 	}
2864 
2865 	/* Write reset registers */
2866 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2867 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2868 			continue;
2869 
2870 		reg_val[i] |= s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2871 
2872 		if (reg_val[i])
2873 			ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2874 	}
2875 }
2876 
2877 /* Returns the attention block data of the specified block */
ecore_get_block_attn_data(enum block_id block_id,enum dbg_attn_type attn_type)2878 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2879 																		enum dbg_attn_type attn_type)
2880 {
2881 	const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block *)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2882 
2883 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2884 }
2885 
2886 /* Returns the attention registers of the specified block */
ecore_get_block_attn_regs(enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)2887 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2888 															enum dbg_attn_type attn_type,
2889 															u8 *num_attn_regs)
2890 {
2891 	const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2892 
2893 	*num_attn_regs = block_type_data->num_regs;
2894 
2895 	return &((const struct dbg_attn_reg *)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2896 }
2897 
2898 /* For each block, clear the status of all parities */
ecore_grc_clear_all_prty(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2899 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2900 									 struct ecore_ptt *p_ptt)
2901 {
2902 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2903 	const struct dbg_attn_reg *attn_reg_arr;
2904 	u8 reg_idx, num_attn_regs;
2905 	u32 block_id;
2906 
2907 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2908 		if (dev_data->block_in_reset[block_id])
2909 			continue;
2910 
2911 		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2912 
2913 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2914 			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2915 			u16 modes_buf_offset;
2916 			bool eval_mode;
2917 
2918 			/* Check mode */
2919 			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2920 			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2921 
2922 			/* If Mode match: clear parity status */
2923 			if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2924 				ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2925 		}
2926 	}
2927 }
2928 
2929 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2930  * the following parameters are dumped:
2931  * - count:	 no. of dumped entries
2932  * - split:	 split type
2933  * - id:	 split ID (dumped only if split_id >= 0)
2934  * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2935  *		 and param_val != OSAL_NULL).
2936  */
ecore_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,const char * split_type,int split_id,const char * param_name,const char * param_val)2937 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2938 								   bool dump,
2939 								   u32 num_reg_entries,
2940 								   const char *split_type,
2941 								   int split_id,
2942 								   const char *param_name,
2943 								   const char *param_val)
2944 {
2945 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2946 	u32 offset = 0;
2947 
2948 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2949 	offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2950 	offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2951 	if (split_id >= 0)
2952 		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2953 	if (param_name && param_val)
2954 		offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2955 
2956 	return offset;
2957 }
2958 
2959 /* Reads the specified registers into the specified buffer.
2960  * The addr and len arguments are specified in dwords.
2961  */
ecore_read_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf,u32 addr,u32 len)2962 void ecore_read_regs(struct ecore_hwfn *p_hwfn,
2963 					 struct ecore_ptt *p_ptt,
2964 					 u32 *buf,
2965 					 u32 addr,
2966 					 u32 len)
2967 {
2968 	u32 i;
2969 
2970 	for (i = 0; i < len; i++)
2971 		buf[i] = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2972 }
2973 
2974 /* Dumps the GRC registers in the specified address range.
2975  * Returns the dumped size in dwords.
2976  * The addr and len arguments are specified in dwords.
2977  */
ecore_grc_dump_addr_range(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus)2978 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2979 									 struct ecore_ptt *p_ptt,
2980 									 u32 *dump_buf,
2981 									 bool dump,
2982 									 u32 addr,
2983 									 u32 len,
2984 									 bool wide_bus)
2985 {
2986 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2987 
2988 	if (!dump)
2989 		return len;
2990 
2991 	/* Print log if needed */
2992 	dev_data->num_regs_read += len;
2993 	if (dev_data->num_regs_read >= s_platform_defs[dev_data->platform_id].log_thresh) {
2994 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers...\n", dev_data->num_regs_read);
2995 		dev_data->num_regs_read = 0;
2996 	}
2997 
2998 	/* Try reading using DMAE */
2999 	if (dev_data->use_dmae && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || (PROTECT_WIDE_BUS && wide_bus))) {
3000 		if (!ecore_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), (u64)(osal_uintptr_t)(dump_buf), len, OSAL_NULL))
3001 			return len;
3002 		dev_data->use_dmae = 0;
3003 		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Failed reading from chip using DMAE, using GRC instead\n");
3004 	}
3005 
3006 	/* Read registers */
3007 	ecore_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
3008 
3009 	return len;
3010 }
3011 
3012 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
3013  * The addr and len arguments are specified in dwords.
3014  */
ecore_grc_dump_reg_entry_hdr(u32 * dump_buf,bool dump,u32 addr,u32 len)3015 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
3016 										bool dump,
3017 										u32 addr,
3018 										u32 len)
3019 {
3020 	if (dump)
3021 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
3022 
3023 	return 1;
3024 }
3025 
3026 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
3027  * The addr and len arguments are specified in dwords.
3028  */
ecore_grc_dump_reg_entry(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus)3029 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
3030 									struct ecore_ptt *p_ptt,
3031 									u32 *dump_buf,
3032 									bool dump,
3033 									u32 addr,
3034 									u32 len,
3035 									bool wide_bus)
3036 {
3037 	u32 offset = 0;
3038 
3039 	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
3040 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3041 
3042 	return offset;
3043 }
3044 
3045 /* Dumps GRC registers sequence with skip cycle.
3046  * Returns the dumped size in dwords.
3047  * - addr:	start GRC address in dwords
3048  * - total_len:	total no. of dwords to dump
3049  * - read_len:	no. consecutive dwords to read
3050  * - skip_len:	no. of dwords to skip (and fill with zeros)
3051  */
ecore_grc_dump_reg_entry_skip(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 total_len,u32 read_len,u32 skip_len)3052 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
3053 										 struct ecore_ptt *p_ptt,
3054 										 u32 *dump_buf,
3055 										 bool dump,
3056 										 u32 addr,
3057 										 u32 total_len,
3058 										 u32 read_len,
3059 										 u32 skip_len)
3060 {
3061 	u32 offset = 0, reg_offset = 0;
3062 
3063 	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3064 
3065 	if (!dump)
3066 		return offset + total_len;
3067 
3068 	while (reg_offset < total_len) {
3069 		u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3070 
3071 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3072 		reg_offset += curr_len;
3073 		addr += curr_len;
3074 
3075 		if (reg_offset < total_len) {
3076 			curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3077 			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3078 			offset += curr_len;
3079 			reg_offset += curr_len;
3080 			addr += curr_len;
3081 		}
3082 	}
3083 
3084 	return offset;
3085 }
3086 
3087 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
ecore_grc_dump_regs_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)3088 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3089 									   struct ecore_ptt *p_ptt,
3090 									   struct dbg_array input_regs_arr,
3091 									   u32 *dump_buf,
3092 									   bool dump,
3093 									   bool block_enable[MAX_BLOCK_ID],
3094 									   u32 *num_dumped_reg_entries)
3095 {
3096 	u32 i, offset = 0, input_offset = 0;
3097 	bool mode_match = true;
3098 
3099 	*num_dumped_reg_entries = 0;
3100 
3101 	while (input_offset < input_regs_arr.size_in_dwords) {
3102 		const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *)&input_regs_arr.ptr[input_offset++];
3103 		u16 modes_buf_offset;
3104 		bool eval_mode;
3105 
3106 		/* Check mode/block */
3107 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3108 		if (eval_mode) {
3109 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3110 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3111 		}
3112 
3113 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
3114 			input_offset += cond_hdr->data_size;
3115 			continue;
3116 		}
3117 
3118 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3119 			const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *)&input_regs_arr.ptr[input_offset];
3120 
3121 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3122 				GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3123 				GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3124 				GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3125 			(*num_dumped_reg_entries)++;
3126 		}
3127 	}
3128 
3129 	return offset;
3130 }
3131 
3132 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
ecore_grc_dump_split_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * split_type_name,u32 split_id,const char * param_name,const char * param_val)3133 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3134 									 struct ecore_ptt *p_ptt,
3135 									 struct dbg_array input_regs_arr,
3136 									 u32 *dump_buf,
3137 									 bool dump,
3138 									 bool block_enable[MAX_BLOCK_ID],
3139 									 const char *split_type_name,
3140 									 u32 split_id,
3141 									 const char *param_name,
3142 									 const char *param_val)
3143 {
3144 	u32 num_dumped_reg_entries, offset;
3145 
3146 	/* Calculate register dump header size (and skip it for now) */
3147 	offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3148 
3149 	/* Dump registers */
3150 	offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3151 
3152 	/* Write register dump header */
3153 	if (dump && num_dumped_reg_entries > 0)
3154 		ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3155 
3156 	return num_dumped_reg_entries > 0 ? offset : 0;
3157 }
3158 
3159 /* Dumps registers according to the input registers array. Returns the dumped
3160  * size in dwords.
3161  */
ecore_grc_dump_registers(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * param_name,const char * param_val)3162 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3163 									struct ecore_ptt *p_ptt,
3164 									u32 *dump_buf,
3165 									bool dump,
3166 									bool block_enable[MAX_BLOCK_ID],
3167 									const char *param_name,
3168 									const char *param_val)
3169 {
3170 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3171 	struct chip_platform_defs *chip_platform;
3172 	u32 offset = 0, input_offset = 0;
3173 	u8 port_id, pf_id, vf_id;
3174 
3175 	chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3176 
3177 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3178 		const struct dbg_dump_split_hdr *split_hdr;
3179 		struct dbg_array curr_input_regs_arr;
3180 		u32 split_data_size;
3181 		u8 split_type_id;
3182 
3183 		split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3184 		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3185 		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3186 		curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3187 		curr_input_regs_arr.size_in_dwords = split_data_size;
3188 
3189 		switch(split_type_id) {
3190 		case SPLIT_TYPE_NONE:
3191 			offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3192 			break;
3193 
3194 		case SPLIT_TYPE_PORT:
3195 			for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3196 				if (dump)
3197 					ecore_port_pretend(p_hwfn, p_ptt, port_id);
3198 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3199 			}
3200 			break;
3201 
3202 		case SPLIT_TYPE_PF:
3203 		case SPLIT_TYPE_PORT_PF:
3204 			for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3205 				if (dump)
3206 					ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3207 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3208 			}
3209 			break;
3210 
3211 		case SPLIT_TYPE_VF:
3212 			for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3213 				if (dump)
3214 					ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3215 				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3216 			}
3217 			break;
3218 
3219 		default:
3220 			break;
3221 		}
3222 
3223 		input_offset += split_data_size;
3224 	}
3225 
3226 	/* Pretend to original PF */
3227 	if (dump)
3228 		ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3229 
3230 	return offset;
3231 }
3232 
3233 /* Dump reset registers. Returns the dumped size in dwords. */
ecore_grc_dump_reset_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3234 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3235 	struct ecore_ptt *p_ptt,
3236 	u32 *dump_buf,
3237 	bool dump)
3238 {
3239 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3240 	u32 i, offset = 0, num_regs = 0;
3241 
3242 	/* Calculate header size */
3243 	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3244 
3245 	/* Write reset registers */
3246 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3247 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3248 			continue;
3249 
3250 		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3251 		num_regs++;
3252 	}
3253 
3254 	/* Write header */
3255 	if (dump)
3256 		ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3257 
3258 	return offset;
3259 }
3260 
3261 /* Dump registers that are modified during GRC Dump and therefore must be
3262  * dumped first. Returns the dumped size in dwords.
3263  */
ecore_grc_dump_modified_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3264 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3265 										struct ecore_ptt *p_ptt,
3266 										u32 *dump_buf,
3267 										bool dump)
3268 {
3269 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3270 	u32 block_id, offset = 0, num_reg_entries = 0;
3271 	const struct dbg_attn_reg *attn_reg_arr;
3272 	u8 storm_id, reg_idx, num_attn_regs;
3273 
3274 	/* Calculate header size */
3275 	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3276 
3277 	/* Write parity registers */
3278 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3279 		if (dev_data->block_in_reset[block_id] && dump)
3280 			continue;
3281 
3282 		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3283 
3284 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3285 			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3286 			u16 modes_buf_offset;
3287 			bool eval_mode;
3288 
3289 			/* Check mode */
3290 			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3291 			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3292 			if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3293 				continue;
3294 
3295 			/* Mode match: read & dump registers */
3296 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3297 			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3298 			num_reg_entries += 2;
3299 		}
3300 	}
3301 
3302 	/* Write Storm stall status registers */
3303 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3304 		struct storm_defs *storm = &s_storm_defs[storm_id];
3305 
3306 		if (dev_data->block_in_reset[storm->block_id] && dump)
3307 			continue;
3308 
3309 		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3310 			BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3311 		num_reg_entries++;
3312 	}
3313 
3314 	/* Write header */
3315 	if (dump)
3316 		ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3317 
3318 	return offset;
3319 }
3320 
3321 /* Dumps registers that can't be represented in the debug arrays */
ecore_grc_dump_special_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3322 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3323 									   struct ecore_ptt *p_ptt,
3324 									   u32 *dump_buf,
3325 									   bool dump)
3326 {
3327 	u32 offset = 0;
3328 
3329 	offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3330 
3331 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3332 	 * skipped).
3333 	 */
3334 	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3335 	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3336 
3337 	return offset;
3338 }
3339 
3340 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3341  * dwords. The following parameters are dumped:
3342  * - name:	   dumped only if it's not OSAL_NULL.
3343  * - addr:	   in dwords, dumped only if name is OSAL_NULL.
3344  * - len:	   in dwords, always dumped.
3345  * - width:	   dumped if it's not zero.
3346  * - packed:	   dumped only if it's not false.
3347  * - mem_group:	   always dumped.
3348  * - is_storm:	   true only if the memory is related to a Storm.
3349  * - storm_letter: valid only if is_storm is true.
3350  *
3351  */
ecore_grc_dump_mem_hdr(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)3352 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3353 								  u32 *dump_buf,
3354 								  bool dump,
3355 								  const char *name,
3356 								  u32 addr,
3357 								  u32 len,
3358 								  u32 bit_width,
3359 								  bool packed,
3360 								  const char *mem_group,
3361 								  bool is_storm,
3362 								  char storm_letter)
3363 {
3364 	u8 num_params = 3;
3365 	u32 offset = 0;
3366 	char buf[64];
3367 
3368 	if (!len)
3369 		DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3370 
3371 	if (bit_width)
3372 		num_params++;
3373 	if (packed)
3374 		num_params++;
3375 
3376 	/* Dump section header */
3377 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3378 
3379 	if (name) {
3380 		/* Dump name */
3381 		if (is_storm) {
3382 			OSAL_STRCPY(buf, "?STORM_");
3383 			buf[0] = storm_letter;
3384 			OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3385 		}
3386 		else {
3387 			OSAL_STRCPY(buf, name);
3388 		}
3389 
3390 		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3391 	}
3392 	else {
3393 		/* Dump address */
3394 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3395 
3396 		offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3397 	}
3398 
3399 	/* Dump len */
3400 	offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3401 
3402 	/* Dump bit width */
3403 	if (bit_width)
3404 		offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3405 
3406 	/* Dump packed */
3407 	if (packed)
3408 		offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3409 
3410 	/* Dump reg type */
3411 	if (is_storm) {
3412 		OSAL_STRCPY(buf, "?STORM_");
3413 		buf[0] = storm_letter;
3414 		OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3415 	}
3416 	else {
3417 		OSAL_STRCPY(buf, mem_group);
3418 	}
3419 
3420 	offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3421 
3422 	return offset;
3423 }
3424 
3425 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3426  * Returns the dumped size in dwords.
3427  * The addr and len arguments are specified in dwords.
3428  */
ecore_grc_dump_mem(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,bool wide_bus,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)3429 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3430 							  struct ecore_ptt *p_ptt,
3431 							  u32 *dump_buf,
3432 							  bool dump,
3433 							  const char *name,
3434 							  u32 addr,
3435 							  u32 len,
3436 							  bool wide_bus,
3437 							  u32 bit_width,
3438 							  bool packed,
3439 							  const char *mem_group,
3440 							  bool is_storm,
3441 							  char storm_letter)
3442 {
3443 	u32 offset = 0;
3444 
3445 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3446 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3447 
3448 	return offset;
3449 }
3450 
3451 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
ecore_grc_dump_mem_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct dbg_array input_mems_arr,u32 * dump_buf,bool dump)3452 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3453 									  struct ecore_ptt *p_ptt,
3454 									  struct dbg_array input_mems_arr,
3455 									  u32 *dump_buf,
3456 									  bool dump)
3457 {
3458 	u32 i, offset = 0, input_offset = 0;
3459 	bool mode_match = true;
3460 
3461 	while (input_offset < input_mems_arr.size_in_dwords) {
3462 		const struct dbg_dump_cond_hdr *cond_hdr;
3463 		u16 modes_buf_offset;
3464 		u32 num_entries;
3465 		bool eval_mode;
3466 
3467 		cond_hdr = (const struct dbg_dump_cond_hdr *)&input_mems_arr.ptr[input_offset++];
3468 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3469 
3470 		/* Check required mode */
3471 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3472 		if (eval_mode) {
3473 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3474 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3475 		}
3476 
3477 		if (!mode_match) {
3478 			input_offset += cond_hdr->data_size;
3479 			continue;
3480 		}
3481 
3482 		for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3483 			const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *)&input_mems_arr.ptr[input_offset];
3484 			u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3485 			bool is_storm = false, mem_wide_bus;
3486 			char storm_letter = 'a';
3487 			u32 mem_addr, mem_len;
3488 
3489 			if (mem_group_id >= MEM_GROUPS_NUM) {
3490 				DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3491 				return 0;
3492 			}
3493 
3494 			if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3495 				continue;
3496 
3497 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3498 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3499 			mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3500 
3501 			/* Update memory length for CCFC/TCFC memories
3502 			 * according to number of LCIDs/LTIDs.
3503 			 */
3504 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3505 				if (mem_len % MAX_LCIDS) {
3506 					DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3507 					return 0;
3508 				}
3509 
3510 				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3511 			}
3512 			else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3513 				if (mem_len % MAX_LTIDS) {
3514 					DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3515 					return 0;
3516 				}
3517 
3518 				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3519 			}
3520 
3521 			/* If memory is associated with Storm, udpate Storm
3522 			 * details.
3523 			 */
3524 			if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3525 				is_storm = true;
3526 				storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3527 			}
3528 
3529 			/* Dump memory */
3530 			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3531 				0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3532 		}
3533 	}
3534 
3535 	return offset;
3536 }
3537 
3538 /* Dumps GRC memories according to the input array dump_mem.
3539  * Returns the dumped size in dwords.
3540  */
ecore_grc_dump_memories(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3541 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3542 								   struct ecore_ptt *p_ptt,
3543 								   u32 *dump_buf,
3544 								   bool dump)
3545 {
3546 	u32 offset = 0, input_offset = 0;
3547 
3548 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3549 		const struct dbg_dump_split_hdr *split_hdr;
3550 		struct dbg_array curr_input_mems_arr;
3551 		u32 split_data_size;
3552 		u8 split_type_id;
3553 
3554 		split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3555 		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3556 		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3557 		curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3558 		curr_input_mems_arr.size_in_dwords = split_data_size;
3559 
3560 		switch (split_type_id) {
3561 		case SPLIT_TYPE_NONE:
3562 			offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3563 			break;
3564 
3565 		default:
3566 			DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3567 			break;
3568 		}
3569 
3570 		input_offset += split_data_size;
3571 	}
3572 
3573 	return offset;
3574 }
3575 
3576 /* Dumps GRC context data for the specified Storm.
3577  * Returns the dumped size in dwords.
3578  * The lid_size argument is specified in quad-regs.
3579  */
ecore_grc_dump_ctx_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,u32 lid_size,u32 rd_reg_addr,u8 storm_id)3580 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3581 								   struct ecore_ptt *p_ptt,
3582 								   u32 *dump_buf,
3583 								   bool dump,
3584 								   const char *name,
3585 								   u32 num_lids,
3586 								   u32 lid_size,
3587 								   u32 rd_reg_addr,
3588 								   u8 storm_id)
3589 {
3590 	struct storm_defs *storm = &s_storm_defs[storm_id];
3591 	u32 i, lid, total_size, offset = 0;
3592 
3593 	if (!lid_size)
3594 		return 0;
3595 
3596 	lid_size *= BYTES_IN_DWORD;
3597 	total_size = num_lids * lid_size;
3598 
3599 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3600 
3601 	if (!dump)
3602 		return offset + total_size;
3603 
3604 	/* Dump context data */
3605 	for (lid = 0; lid < num_lids; lid++) {
3606 		for (i = 0; i < lid_size; i++, offset++) {
3607 			ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3608 			*(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3609 		}
3610 	}
3611 
3612 	return offset;
3613 }
3614 
3615 /* Dumps GRC contexts. Returns the dumped size in dwords. */
ecore_grc_dump_ctx(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3616 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3617 							  struct ecore_ptt *p_ptt,
3618 							  u32 *dump_buf,
3619 							  bool dump)
3620 {
3621 	u32 offset = 0;
3622 	u8 storm_id;
3623 
3624 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3625 		struct storm_defs *storm = &s_storm_defs[storm_id];
3626 
3627 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3628 			continue;
3629 
3630 		/* Dump Conn AG context size */
3631 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3632 			storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3633 
3634 		/* Dump Conn ST context size */
3635 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3636 			storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3637 
3638 		/* Dump Task AG context size */
3639 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3640 			storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3641 
3642 		/* Dump Task ST context size */
3643 		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3644 			storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3645 	}
3646 
3647 	return offset;
3648 }
3649 
3650 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
ecore_grc_dump_iors(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3651 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3652 							   struct ecore_ptt *p_ptt,
3653 							   u32 *dump_buf,
3654 							   bool dump)
3655 {
3656 	char buf[10] = "IOR_SET_?";
3657 	u32 addr, offset = 0;
3658 	u8 storm_id, set_id;
3659 
3660 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3661 		struct storm_defs *storm = &s_storm_defs[storm_id];
3662 
3663 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3664 			continue;
3665 
3666 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3667 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3668 			buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3669 			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3670 		}
3671 	}
3672 
3673 	return offset;
3674 }
3675 
3676 /* Dump VFC CAM. Returns the dumped size in dwords. */
ecore_grc_dump_vfc_cam(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)3677 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3678 								  struct ecore_ptt *p_ptt,
3679 								  u32 *dump_buf,
3680 								  bool dump,
3681 								  u8 storm_id)
3682 {
3683 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3684 	struct storm_defs *storm = &s_storm_defs[storm_id];
3685 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3686 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3687 	u32 row, i, offset = 0;
3688 
3689 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3690 
3691 	if (!dump)
3692 		return offset + total_size;
3693 
3694 	/* Prepare CAM address */
3695 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3696 
3697 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3698 		/* Write VFC CAM command */
3699 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3700 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3701 
3702 		/* Write VFC CAM address */
3703 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3704 
3705 		/* Read VFC CAM read response */
3706 		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3707 	}
3708 
3709 	return offset;
3710 }
3711 
3712 /* Dump VFC RAM. Returns the dumped size in dwords. */
ecore_grc_dump_vfc_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)3713 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3714 								  struct ecore_ptt *p_ptt,
3715 								  u32 *dump_buf,
3716 								  bool dump,
3717 								  u8 storm_id,
3718 								  struct vfc_ram_defs *ram_defs)
3719 {
3720 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3721 	struct storm_defs *storm = &s_storm_defs[storm_id];
3722 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3723 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3724 	u32 row, i, offset = 0;
3725 
3726 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3727 
3728 	/* Prepare RAM address */
3729 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3730 
3731 	if (!dump)
3732 		return offset + total_size;
3733 
3734 	for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3735 		/* Write VFC RAM command */
3736 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3737 
3738 		/* Write VFC RAM address */
3739 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3740 		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3741 
3742 		/* Read VFC RAM read response */
3743 		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3744 	}
3745 
3746 	return offset;
3747 }
3748 
3749 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
ecore_grc_dump_vfc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3750 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3751 							  struct ecore_ptt *p_ptt,
3752 							  u32 *dump_buf,
3753 							  bool dump)
3754 {
3755 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3756 	u8 storm_id, i;
3757 	u32 offset = 0;
3758 
3759 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3760 		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3761 			!s_storm_defs[storm_id].has_vfc ||
3762 			(storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3763 			continue;
3764 
3765 		/* Read CAM */
3766 		offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3767 
3768 		/* Read RAM */
3769 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3770 			offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3771 	}
3772 
3773 	return offset;
3774 }
3775 
3776 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
ecore_grc_dump_rss(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3777 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3778 							  struct ecore_ptt *p_ptt,
3779 							  u32 *dump_buf,
3780 							  bool dump)
3781 {
3782 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3783 	u32 offset = 0;
3784 	u8 rss_mem_id;
3785 
3786 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3787 		u32 rss_addr, num_entries, total_dwords;
3788 		struct rss_mem_defs *rss_defs;
3789 		bool packed;
3790 
3791 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3792 		rss_addr = rss_defs->addr;
3793 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3794 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3795 		packed = (rss_defs->entry_width == 16);
3796 
3797 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3798 			rss_defs->entry_width, packed, rss_defs->type_name, false, 0);
3799 
3800 		/* Dump RSS data */
3801 		if (!dump) {
3802 			offset += total_dwords;
3803 			continue;
3804 		}
3805 
3806 		while (total_dwords) {
3807 			u32 num_dwords_to_read = OSAL_MIN_T(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords);
3808 			ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3809 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), num_dwords_to_read, false);
3810 			total_dwords -= num_dwords_to_read;
3811 			rss_addr++;
3812 		}
3813 	}
3814 
3815 	return offset;
3816 }
3817 
3818 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
ecore_grc_dump_big_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3819 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3820 								  struct ecore_ptt *p_ptt,
3821 								  u32 *dump_buf,
3822 								  bool dump,
3823 								  u8 big_ram_id)
3824 {
3825 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3826 	u32 block_size, ram_size, offset = 0, reg_val, i;
3827 	char mem_name[12] = "???_BIG_RAM";
3828 	char type_name[8] = "???_RAM";
3829 	struct big_ram_defs *big_ram;
3830 
3831 	big_ram = &s_big_ram_defs[big_ram_id];
3832 	ram_size = big_ram->ram_size[dev_data->chip_id];
3833 
3834 	reg_val = ecore_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3835 	block_size = reg_val & (1 << big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128;
3836 
3837 	OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3838 	OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3839 
3840 	/* Dump memory header */
3841 	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, block_size * 8, false, type_name, false, 0);
3842 
3843 	/* Read and dump Big RAM data */
3844 	if (!dump)
3845 		return offset + ram_size;
3846 
3847 	/* Dump Big RAM */
3848 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); i++) {
3849 		ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3850 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), BRB_REG_BIG_RAM_DATA_SIZE, false);
3851 	}
3852 
3853 	return offset;
3854 }
3855 
ecore_grc_dump_mcp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3856 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3857 							  struct ecore_ptt *p_ptt,
3858 							  u32 *dump_buf,
3859 							  bool dump)
3860 {
3861 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3862 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3863 	bool halted = false;
3864 	u32 offset = 0;
3865 
3866 	/* Halt MCP */
3867 	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3868 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3869 		if (!halted)
3870 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3871 	}
3872 
3873 	/* Dump MCP scratchpad */
3874 	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3875 		ECORE_IS_E5(p_hwfn->p_dev) ? MCP_REG_SCRATCH_SIZE_E5 : MCP_REG_SCRATCH_SIZE_BB_K2, false, 0, false, "MCP", false, 0);
3876 
3877 	/* Dump MCP cpu_reg_file */
3878 	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3879 		MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3880 
3881 	/* Dump MCP registers */
3882 	block_enable[BLOCK_MCP] = true;
3883 	offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3884 
3885 	/* Dump required non-MCP registers */
3886 	offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3887 	offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3888 
3889 	/* Release MCP */
3890 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3891 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3892 
3893 	return offset;
3894 }
3895 
3896 /* Dumps the tbus indirect memory for all PHYs. */
ecore_grc_dump_phy(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3897 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3898 							  struct ecore_ptt *p_ptt,
3899 							  u32 *dump_buf,
3900 							  bool dump)
3901 {
3902 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3903 	char mem_name[32];
3904 	u8 phy_id;
3905 
3906 	for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3907 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3908 		struct phy_defs *phy_defs;
3909 		u8 *bytes_buf;
3910 
3911 		phy_defs = &s_phy_defs[phy_id];
3912 		addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3913 		addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3914 		data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3915 		data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3916 
3917 		if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3918 			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3919 
3920 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3921 
3922 		if (!dump) {
3923 			offset += PHY_DUMP_SIZE_DWORDS;
3924 			continue;
3925 		}
3926 
3927 		bytes_buf = (u8 *)(dump_buf + offset);
3928 		for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3929 			ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3930 			for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3931 				ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3932 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3933 				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3934 			}
3935 		}
3936 
3937 		offset += PHY_DUMP_SIZE_DWORDS;
3938 	}
3939 
3940 	return offset;
3941 }
3942 
ecore_config_dbg_line(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)3943 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3944 								  struct ecore_ptt *p_ptt,
3945 								  enum block_id block_id,
3946 								  u8 line_id,
3947 								  u8 enable_mask,
3948 								  u8 right_shift,
3949 								  u8 force_valid_mask,
3950 								  u8 force_frame_mask)
3951 {
3952 	struct block_defs *block = s_block_defs[block_id];
3953 
3954 	ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3955 	ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3956 	ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3957 	ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3958 	ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3959 }
3960 
3961 /* Dumps Static Debug data. Returns the dumped size in dwords. */
ecore_grc_dump_static_debug(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3962 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3963 									   struct ecore_ptt *p_ptt,
3964 									   u32 *dump_buf,
3965 									   bool dump)
3966 {
3967 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3968 	u32 block_id, line_id, offset = 0;
3969 
3970 	/* don't dump static debug if a debug bus recording is in progress */
3971 	if (dump && ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3972 		return 0;
3973 
3974 	if (dump) {
3975 		/* Disable all blocks debug output */
3976 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3977 			struct block_defs *block = s_block_defs[block_id];
3978 
3979 			if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS)
3980 				ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3981 		}
3982 
3983 		ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3984 		ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3985 		ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3986 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3987 		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3988 	}
3989 
3990 	/* Dump all static debug lines for each relevant block */
3991 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3992 		struct block_defs *block = s_block_defs[block_id];
3993 		const struct dbg_bus_block *block_desc;
3994 		u32 block_dwords;
3995 
3996 		if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS)
3997 			continue;
3998 
3999 		block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
4000 		block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
4001 
4002 		/* Dump static section params */
4003 		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
4004 
4005 		if (!dump) {
4006 			offset += block_dwords;
4007 			continue;
4008 		}
4009 
4010 		/* If all lines are invalid - dump zeros */
4011 		if (dev_data->block_in_reset[block_id]) {
4012 			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
4013 			offset += block_dwords;
4014 			continue;
4015 		}
4016 
4017 		/* Enable block's client */
4018 		ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
4019 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
4020 			/* Configure debug line ID */
4021 			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
4022 
4023 			/* Read debug line info */
4024 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
4025 		}
4026 
4027 		/* Disable block's client and debug output */
4028 		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4029 		ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4030 	}
4031 
4032 	if (dump) {
4033 		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4034 		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4035 	}
4036 
4037 	return offset;
4038 }
4039 
4040 /* Performs GRC Dump to the specified buffer.
4041  * Returns the dumped size in dwords.
4042  */
ecore_grc_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4043 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
4044 									  struct ecore_ptt *p_ptt,
4045 									  u32 *dump_buf,
4046 									  bool dump,
4047 									  u32 *num_dumped_dwords)
4048 {
4049 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4050 	bool is_asic, parities_masked = false;
4051 	u8 i, port_mode = 0;
4052 	u32 offset = 0;
4053 
4054 	is_asic = dev_data->platform_id == PLATFORM_ASIC;
4055 
4056 	*num_dumped_dwords = 0;
4057 
4058 	if (dump) {
4059 		/* Find port mode */
4060 		switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4061 		case 0: port_mode = 1; break;
4062 		case 1: port_mode = 2; break;
4063 		case 2: port_mode = 4; break;
4064 		}
4065 
4066 		/* Update reset state */
4067 		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4068 	}
4069 
4070 	/* Dump global params */
4071 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4072 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4073 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4074 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4075 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4076 
4077 	/* Dump reset registers (dumped before taking blocks out of reset ) */
4078 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4079 		offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4080 
4081 	/* Take all blocks out of reset (using reset registers) */
4082 	if (dump) {
4083 		ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4084 		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4085 	}
4086 
4087 	/* Disable all parities using MFW command */
4088 	if (dump && is_asic && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4089 			parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4090 			if (!parities_masked) {
4091 				DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4092 				if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4093 					return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4094 			}
4095 		}
4096 
4097 	/* Dump modified registers (dumped before modifying them) */
4098 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4099 		offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4100 
4101 	/* Stall storms */
4102 	if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4103 		ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4104 
4105 	/* Dump all regs  */
4106 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4107 		bool block_enable[MAX_BLOCK_ID];
4108 
4109 		/* Dump all blocks except MCP */
4110 		for (i = 0; i < MAX_BLOCK_ID; i++)
4111 			block_enable[i] = true;
4112 		block_enable[BLOCK_MCP] = false;
4113 		offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4114 
4115 		/* Dump special registers */
4116 		offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4117 	}
4118 
4119 	/* Dump memories */
4120 	offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4121 
4122 	/* Dump MCP */
4123 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4124 		offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4125 
4126 	/* Dump context */
4127 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4128 		offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4129 
4130 	/* Dump RSS memories */
4131 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4132 		offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4133 
4134 	/* Dump Big RAM */
4135 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4136 		if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4137 			offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4138 
4139 	/* Dump IORs */
4140 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4141 		offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4142 
4143 	/* Dump VFC */
4144 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4145 		offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4146 
4147 	/* Dump PHY tbus */
4148 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4149 		offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4150 
4151 	/* Dump static debug data  */
4152 	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4153 		offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4154 
4155 	/* Dump last section */
4156 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4157 
4158 	if (dump) {
4159 		/* Unstall storms */
4160 		if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4161 			ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4162 
4163 		/* Clear parity status */
4164 		if (is_asic)
4165 			ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4166 
4167 		/* Enable all parities using MFW command */
4168 		if (parities_masked)
4169 			ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4170 	}
4171 
4172 	*num_dumped_dwords = offset;
4173 
4174 	return DBG_STATUS_OK;
4175 }
4176 
4177 /* Writes the specified failing Idle Check rule to the specified buffer.
4178  * Returns the dumped size in dwords.
4179  */
ecore_idle_chk_dump_failure(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)4180 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4181 									   struct ecore_ptt *p_ptt,
4182 									   u32 *dump_buf,
4183 									   bool dump,
4184 									   u16 rule_id,
4185 									   const struct dbg_idle_chk_rule *rule,
4186 									   u16 fail_entry_id,
4187 									   u32 *cond_reg_values)
4188 {
4189 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4190 	const struct dbg_idle_chk_cond_reg *cond_regs;
4191 	const struct dbg_idle_chk_info_reg *info_regs;
4192 	u32 i, next_reg_offset = 0, offset = 0;
4193 	struct dbg_idle_chk_result_hdr *hdr;
4194 	const union dbg_idle_chk_reg *regs;
4195 	u8 reg_id;
4196 
4197 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4198 	regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4199 	cond_regs = &regs[0].cond_reg;
4200 	info_regs = &regs[rule->num_cond_regs].info_reg;
4201 
4202 	/* Dump rule data */
4203 	if (dump) {
4204 		OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4205 		hdr->rule_id = rule_id;
4206 		hdr->mem_entry_id = fail_entry_id;
4207 		hdr->severity = rule->severity;
4208 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4209 	}
4210 
4211 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4212 
4213 	/* Dump condition register values */
4214 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4215 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4216 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4217 
4218 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4219 
4220 		/* Write register header */
4221 		if (!dump) {
4222 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4223 			continue;
4224 		}
4225 
4226 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4227 		OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4228 		reg_hdr->start_entry = reg->start_entry;
4229 		reg_hdr->size = reg->entry_size;
4230 		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4231 		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4232 
4233 		/* Write register values */
4234 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4235 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4236 	}
4237 
4238 	/* Dump info register values */
4239 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4240 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4241 		u32 block_id;
4242 
4243 		/* Check if register's block is in reset */
4244 		if (!dump) {
4245 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4246 			continue;
4247 		}
4248 
4249 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4250 		if (block_id >= MAX_BLOCK_ID) {
4251 			DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4252 			return 0;
4253 		}
4254 
4255 		if (!dev_data->block_in_reset[block_id]) {
4256 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4257 			bool wide_bus, eval_mode, mode_match = true;
4258 			u16 modes_buf_offset;
4259 			u32 addr;
4260 
4261 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4262 
4263 			/* Check mode */
4264 			eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4265 			if (eval_mode) {
4266 				modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4267 				mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4268 			}
4269 
4270 			if (!mode_match)
4271 				continue;
4272 
4273 			addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4274 			wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4275 
4276 			/* Write register header */
4277 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4278 			hdr->num_dumped_info_regs++;
4279 			OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4280 			reg_hdr->size = reg->size;
4281 			SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4282 
4283 			/* Write register values */
4284 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4285 		}
4286 	}
4287 
4288 	return offset;
4289 }
4290 
4291 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
ecore_idle_chk_dump_rule_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)4292 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4293 											struct ecore_ptt *p_ptt,
4294 											u32 *dump_buf,
4295 											bool dump,
4296 											const struct dbg_idle_chk_rule *input_rules,
4297 											u32 num_input_rules,
4298 											u32 *num_failing_rules)
4299 {
4300 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4301 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4302 	u32 i, offset = 0;
4303 	u16 entry_id;
4304 	u8 reg_id;
4305 
4306 	*num_failing_rules = 0;
4307 
4308 	for (i = 0; i < num_input_rules; i++) {
4309 		const struct dbg_idle_chk_cond_reg *cond_regs;
4310 		const struct dbg_idle_chk_rule *rule;
4311 		const union dbg_idle_chk_reg *regs;
4312 		u16 num_reg_entries = 1;
4313 		bool check_rule = true;
4314 		const u32 *imm_values;
4315 
4316 		rule = &input_rules[i];
4317 		regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4318 		cond_regs = &regs[0].cond_reg;
4319 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4320 
4321 		/* Check if all condition register blocks are out of reset, and
4322 		 * find maximal number of entries (all condition registers that
4323 		 * are memories must have the same size, which is > 1).
4324 		 */
4325 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4326 			u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4327 
4328 			if (block_id >= MAX_BLOCK_ID) {
4329 				DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4330 				return 0;
4331 			}
4332 
4333 			check_rule = !dev_data->block_in_reset[block_id];
4334 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4335 				num_reg_entries = cond_regs[reg_id].num_entries;
4336 		}
4337 
4338 		if (!check_rule && dump)
4339 			continue;
4340 
4341 		if (!dump) {
4342 			u32 entry_dump_size = ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, OSAL_NULL);
4343 
4344 			offset += num_reg_entries * entry_dump_size;
4345 			(*num_failing_rules) += num_reg_entries;
4346 			continue;
4347 		}
4348 
4349 		/* Go over all register entries (number of entries is the same for all
4350 		 * condition registers).
4351 		 */
4352 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4353 			u32 next_reg_offset = 0;
4354 
4355 			/* Read current entry of all condition registers */
4356 			for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4357 				const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4358 				u32 padded_entry_size, addr;
4359 				bool wide_bus;
4360 
4361 				/* Find GRC address (if it's a memory, the address of the
4362 				 * specific entry is calculated).
4363 				 */
4364 				addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4365 				wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4366 				if (reg->num_entries > 1 || reg->start_entry > 0) {
4367 					padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4368 					addr += (reg->start_entry + entry_id) * padded_entry_size;
4369 				}
4370 
4371 				/* Read registers */
4372 				if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4373 					DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4374 					return 0;
4375 				}
4376 
4377 				next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4378 			}
4379 
4380 			/* Call rule condition function. if returns true, it's a failure.*/
4381 			if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4382 				offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4383 				(*num_failing_rules)++;
4384 			}
4385 		}
4386 	}
4387 
4388 	return offset;
4389 }
4390 
4391 /* Performs Idle Check Dump to the specified buffer.
4392  * Returns the dumped size in dwords.
4393  */
ecore_idle_chk_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)4394 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4395 							   struct ecore_ptt *p_ptt,
4396 							   u32 *dump_buf,
4397 							   bool dump)
4398 {
4399 	u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4400 
4401 	/* Dump global params */
4402 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4403 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4404 
4405 	/* Dump idle check section header with a single parameter */
4406 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4407 	num_failing_rules_offset = offset;
4408 	offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4409 
4410 	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4411 		const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4412 		bool eval_mode, mode_match = true;
4413 		u32 curr_failing_rules;
4414 		u16 modes_buf_offset;
4415 
4416 		/* Check mode */
4417 		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4418 		if (eval_mode) {
4419 			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4420 			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4421 		}
4422 
4423 		if (mode_match) {
4424 			offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4425 			num_failing_rules += curr_failing_rules;
4426 		}
4427 
4428 		input_offset += cond_hdr->data_size;
4429 	}
4430 
4431 	/* Overwrite num_rules parameter */
4432 	if (dump)
4433 		ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4434 
4435 	/* Dump last section */
4436 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4437 
4438 	return offset;
4439 }
4440 
4441 /* Finds the meta data image in NVRAM */
ecore_find_nvram_image(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes)4442 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4443 											  struct ecore_ptt *p_ptt,
4444 											  u32 image_type,
4445 											  u32 *nvram_offset_bytes,
4446 											  u32 *nvram_size_bytes)
4447 {
4448 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4449 	struct mcp_file_att file_att;
4450 	int nvm_result;
4451 
4452 	/* Call NVRAM get file command */
4453 	nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32 *)&file_att);
4454 
4455 	/* Check response */
4456 	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4457 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4458 
4459 	/* Update return values */
4460 	*nvram_offset_bytes = file_att.nvm_start_addr;
4461 	*nvram_size_bytes = file_att.len;
4462 
4463 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4464 
4465 	/* Check alignment */
4466 	if (*nvram_size_bytes & 0x3)
4467 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4468 
4469 	return DBG_STATUS_OK;
4470 }
4471 
4472 /* Reads data from NVRAM */
ecore_nvram_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf)4473 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4474 										struct ecore_ptt *p_ptt,
4475 										u32 nvram_offset_bytes,
4476 										u32 nvram_size_bytes,
4477 										u32 *ret_buf)
4478 {
4479 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4480 	s32 bytes_left = nvram_size_bytes;
4481 	u32 read_offset = 0;
4482 
4483 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4484 
4485 	do {
4486 		bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4487 
4488 		/* Call NVRAM read command */
4489 		if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset)))
4490 			return DBG_STATUS_NVRAM_READ_FAILED;
4491 
4492 		/* Check response */
4493 		if ((ret_mcp_resp  & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4494 			return DBG_STATUS_NVRAM_READ_FAILED;
4495 
4496 		/* Update read offset */
4497 		read_offset += ret_read_size;
4498 		bytes_left -= ret_read_size;
4499 	} while (bytes_left > 0);
4500 
4501 	return DBG_STATUS_OK;
4502 }
4503 
4504 /* Get info on the MCP Trace data in the scratchpad:
4505  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4506  * - trace_data_size (OUT): trace data size in bytes (without the header)
4507  */
ecore_mcp_trace_get_data_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size)4508 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4509 													 struct ecore_ptt *p_ptt,
4510 													 u32 *trace_data_grc_addr,
4511 													 u32 *trace_data_size)
4512 {
4513 	u32 spad_trace_offsize, signature;
4514 
4515 	/* Read trace section offsize structure from MCP scratchpad */
4516 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4517 
4518 	/* Extract trace section address from offsize (in scratchpad) */
4519 	*trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4520 
4521 	/* Read signature from MCP trace section */
4522 	signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4523 
4524 	if (signature != MFW_TRACE_SIGNATURE)
4525 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4526 
4527 	/* Read trace size from MCP trace section */
4528 	*trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4529 
4530 	return DBG_STATUS_OK;
4531 }
4532 
4533 /* Reads MCP trace meta data image from NVRAM
4534  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4535  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4536  *			      loaded from file).
4537  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4538  */
ecore_mcp_trace_get_meta_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset,u32 * trace_meta_size)4539 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4540 													 struct ecore_ptt *p_ptt,
4541 													 u32 trace_data_size_bytes,
4542 													 u32 *running_bundle_id,
4543 													 u32 *trace_meta_offset,
4544 													 u32 *trace_meta_size)
4545 {
4546 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4547 
4548 	/* Read MCP trace section offsize structure from MCP scratchpad */
4549 	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4550 
4551 	/* Find running bundle ID */
4552 	running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4553 	*running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4554 	if (*running_bundle_id > 1)
4555 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4556 
4557 	/* Find image in NVRAM */
4558 	nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4559 	return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4560 }
4561 
4562 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
ecore_mcp_trace_read_meta(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)4563 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4564 												 struct ecore_ptt *p_ptt,
4565 												 u32 nvram_offset_in_bytes,
4566 												 u32 size_in_bytes,
4567 												 u32 *buf)
4568 {
4569 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4570 	enum dbg_status status;
4571 	u32 signature;
4572 
4573 	/* Read meta data from NVRAM */
4574 	status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4575 	if (status != DBG_STATUS_OK)
4576 		return status;
4577 
4578 	/* Extract and check first signature */
4579 	signature = ecore_read_unaligned_dword(byte_buf);
4580 	byte_buf += sizeof(signature);
4581 	if (signature != NVM_MAGIC_VALUE)
4582 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4583 
4584 	/* Extract number of modules */
4585 	modules_num = *(byte_buf++);
4586 
4587 	/* Skip all modules */
4588 	for (i = 0; i < modules_num; i++) {
4589 		module_len = *(byte_buf++);
4590 		byte_buf += module_len;
4591 	}
4592 
4593 	/* Extract and check second signature */
4594 	signature = ecore_read_unaligned_dword(byte_buf);
4595 	byte_buf += sizeof(signature);
4596 	if (signature != NVM_MAGIC_VALUE)
4597 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4598 
4599 	return DBG_STATUS_OK;
4600 }
4601 
4602 /* Dump MCP Trace */
ecore_mcp_trace_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4603 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4604 											struct ecore_ptt *p_ptt,
4605 											u32 *dump_buf,
4606 											bool dump,
4607 											u32 *num_dumped_dwords)
4608 {
4609 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4610 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4611 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4612 	u32 running_bundle_id, offset = 0;
4613 	enum dbg_status status;
4614 	bool mcp_access;
4615 	int halted = 0;
4616 
4617 	*num_dumped_dwords = 0;
4618 
4619 	mcp_access = dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4620 
4621 	/* Get trace data info */
4622 	status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4623 	if (status != DBG_STATUS_OK)
4624 		return status;
4625 
4626 	/* Dump global params */
4627 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4628 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4629 
4630 	/* Halt MCP while reading from scratchpad so the read data will be
4631 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4632 	 * risk that it may be corrupt.
4633 	 */
4634 	if (dump && mcp_access) {
4635 		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4636 		if (!halted)
4637 			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4638 	}
4639 
4640 	/* Find trace data size */
4641 	trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4642 
4643 	/* Dump trace data section header and param */
4644 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4645 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4646 
4647 	/* Read trace data from scratchpad into dump buffer */
4648 	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4649 
4650 	/* Resume MCP (only if halt succeeded) */
4651 	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4652 		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4653 
4654 	/* Dump trace meta section header */
4655 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4656 
4657 	/* Read trace meta only if NVRAM access is enabled
4658 	 * (trace_meta_size_bytes is dword-aligned).
4659 	 */
4660 	if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4661 		status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4662 		if (status == DBG_STATUS_OK)
4663 			trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4664 	}
4665 
4666 	/* Dump trace meta size param */
4667 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4668 
4669 	/* Read trace meta image into dump buffer */
4670 	if (dump && trace_meta_size_dwords)
4671 		status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4672 	if (status == DBG_STATUS_OK)
4673 		offset += trace_meta_size_dwords;
4674 
4675 	/* Dump last section */
4676 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4677 
4678 	*num_dumped_dwords = offset;
4679 
4680 	/* If no mcp access, indicate that the dump doesn't contain the meta
4681 	 * data from NVRAM.
4682 	 */
4683 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4684 }
4685 
4686 /* Dump GRC FIFO */
ecore_reg_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4687 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4688 										   struct ecore_ptt *p_ptt,
4689 										   u32 *dump_buf,
4690 										   bool dump,
4691 										   u32 *num_dumped_dwords)
4692 {
4693 	u32 dwords_read, size_param_offset, offset = 0;
4694 	bool fifo_has_data;
4695 
4696 	*num_dumped_dwords = 0;
4697 
4698 	/* Dump global params */
4699 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4700 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4701 
4702 	/* Dump fifo data section header and param. The size param is 0 for
4703 	 * now, and is overwritten after reading the FIFO.
4704 	 */
4705 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4706 	size_param_offset = offset;
4707 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4708 
4709 	if (dump) {
4710 		fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4711 
4712 		/* Pull available data from fifo. Use DMAE since this is
4713 		 * widebus memory and must be accessed atomically. Test for
4714 		 * dwords_read not passing buffer size since more entries could
4715 		 * be added to the buffer as we
4716 		 * are emptying it.
4717 		 */
4718 		for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4719 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO), REG_FIFO_ELEMENT_DWORDS, true);
4720 			fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4721 		}
4722 
4723 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4724 	}
4725 	else {
4726 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4727 		 * test how much data is available, except for reading it.
4728 		 */
4729 		offset += REG_FIFO_DEPTH_DWORDS;
4730 	}
4731 
4732 	/* Dump last section */
4733 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4734 
4735 	*num_dumped_dwords = offset;
4736 
4737 	return DBG_STATUS_OK;
4738 }
4739 
4740 /* Dump IGU FIFO */
ecore_igu_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4741 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4742 										   struct ecore_ptt *p_ptt,
4743 										   u32 *dump_buf,
4744 										   bool dump,
4745 										   u32 *num_dumped_dwords)
4746 {
4747 	u32 dwords_read, size_param_offset, offset = 0;
4748 	bool fifo_has_data;
4749 
4750 	*num_dumped_dwords = 0;
4751 
4752 	/* Dump global params */
4753 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4754 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4755 
4756 	/* Dump fifo data section header and param. The size param is 0 for
4757 	 * now, and is overwritten after reading the FIFO.
4758 	 */
4759 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4760 	size_param_offset = offset;
4761 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4762 
4763 	if (dump) {
4764 		fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4765 
4766 		/* Pull available data from fifo. Use DMAE since this is
4767 		 * widebus memory and must be accessed atomically. Test for
4768 		 * dwords_read not passing buffer size since more entries could
4769 		 * be added to the buffer as we are emptying it.
4770 		 */
4771 		for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4772 			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY), IGU_FIFO_ELEMENT_DWORDS, true);
4773 			fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4774 		}
4775 
4776 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4777 	}
4778 	else {
4779 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4780 		 * test how much data is available, except for reading it.
4781 		 */
4782 		offset += IGU_FIFO_DEPTH_DWORDS;
4783 	}
4784 
4785 	/* Dump last section */
4786 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4787 
4788 	*num_dumped_dwords = offset;
4789 
4790 	return DBG_STATUS_OK;
4791 }
4792 
4793 /* Protection Override dump */
ecore_protection_override_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4794 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4795 													  struct ecore_ptt *p_ptt,
4796 													  u32 *dump_buf,
4797 													  bool dump,
4798 													  u32 *num_dumped_dwords)
4799 {
4800 	u32 size_param_offset, override_window_dwords, offset = 0;
4801 
4802 	*num_dumped_dwords = 0;
4803 
4804 	/* Dump global params */
4805 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4806 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4807 
4808 	/* Dump data section header and param. The size param is 0 for now,
4809 	 * and is overwritten after reading the data.
4810 	 */
4811 	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4812 	size_param_offset = offset;
4813 	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4814 
4815 	if (dump) {
4816 		/* Add override window info to buffer */
4817 		override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4818 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW), override_window_dwords, true);
4819 		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4820 	}
4821 	else {
4822 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4823 	}
4824 
4825 	/* Dump last section */
4826 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4827 
4828 	*num_dumped_dwords = offset;
4829 
4830 	return DBG_STATUS_OK;
4831 }
4832 
4833 /* Performs FW Asserts Dump to the specified buffer.
4834  * Returns the dumped size in dwords.
4835  */
ecore_fw_asserts_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)4836 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4837 								 struct ecore_ptt *p_ptt,
4838 								 u32 *dump_buf,
4839 								 bool dump)
4840 {
4841 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4842 	struct fw_asserts_ram_section *asserts;
4843 	char storm_letter_str[2] = "?";
4844 	struct fw_info fw_info;
4845 	u32 offset = 0;
4846 	u8 storm_id;
4847 
4848 	/* Dump global params */
4849 	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4850 	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4851 
4852 	/* Find Storm dump size */
4853 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4854 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4855 		struct storm_defs *storm = &s_storm_defs[storm_id];
4856 
4857 		if (dev_data->block_in_reset[storm->block_id])
4858 			continue;
4859 
4860 		/* Read FW info for the current Storm  */
4861 		ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4862 
4863 		asserts = &fw_info.fw_asserts_section;
4864 
4865 		/* Dump FW Asserts section header and params */
4866 		storm_letter_str[0] = storm->letter;
4867 		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4868 		offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4869 		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4870 
4871 		/* Read and dump FW Asserts data */
4872 		if (!dump) {
4873 			offset += asserts->list_element_dword_size;
4874 			continue;
4875 		}
4876 
4877 		fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4878 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4879 		next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4880 		next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4881 		last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4882 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4883 					last_list_idx * asserts->list_element_dword_size;
4884 		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4885 	}
4886 
4887 	/* Dump last section */
4888 	offset += ecore_dump_last_section(dump_buf, offset, dump);
4889 
4890 	return offset;
4891 }
4892 
4893 /***************************** Public Functions *******************************/
4894 
ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)4895 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4896 {
4897 	const struct bin_buffer_hdr *buf_array = (const struct bin_buffer_hdr *)bin_ptr;
4898 	u8 buf_id;
4899 
4900 	/* convert binary data to debug arrays */
4901 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4902 		s_dbg_arrays[buf_id].ptr = (const u32 *)(bin_ptr + buf_array[buf_id].offset);
4903 		s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4904 	}
4905 
4906 	return DBG_STATUS_OK;
4907 }
4908 
ecore_dbg_set_app_ver(u32 ver)4909 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4910 {
4911 	if (ver < TOOLS_VERSION)
4912 		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4913 
4914 	s_app_ver = ver;
4915 
4916 	return DBG_STATUS_OK;
4917 }
4918 
ecore_dbg_get_fw_func_ver(void)4919 u32 ecore_dbg_get_fw_func_ver(void)
4920 {
4921 	return TOOLS_VERSION;
4922 }
4923 
ecore_dbg_get_chip_id(struct ecore_hwfn * p_hwfn)4924 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4925 {
4926 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4927 
4928 	return (enum chip_ids)dev_data->chip_id;
4929 }
4930 
ecore_dbg_bus_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool one_shot_en,u8 force_hw_dwords,bool unify_inputs,bool grc_input_en)4931 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4932 									struct ecore_ptt *p_ptt,
4933 									bool one_shot_en,
4934 									u8 force_hw_dwords,
4935 									bool unify_inputs,
4936 									bool grc_input_en)
4937 {
4938 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4939 	enum dbg_status status;
4940 
4941 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4942 	if (status != DBG_STATUS_OK)
4943 		return status;
4944 
4945 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4946 
4947 	if (force_hw_dwords &&
4948 		force_hw_dwords != 4 &&
4949 		force_hw_dwords != 8)
4950 		return DBG_STATUS_INVALID_ARGS;
4951 
4952 	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4953 		return DBG_STATUS_DBG_BUS_IN_USE;
4954 
4955 	/* Update reset state of all blocks */
4956 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4957 
4958 	/* Disable all debug inputs */
4959 	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4960 	if (status != DBG_STATUS_OK)
4961 		return status;
4962 
4963 	/* Reset DBG block */
4964 	ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4965 
4966 	/* Set one-shot / wrap-around */
4967 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4968 
4969 	/* Init state params */
4970 	OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4971 	dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4972 	dev_data->bus.state = DBG_BUS_STATE_READY;
4973 	dev_data->bus.one_shot_en = one_shot_en;
4974 	dev_data->bus.hw_dwords = force_hw_dwords;
4975 	dev_data->bus.grc_input_en = grc_input_en;
4976 	dev_data->bus.unify_inputs = unify_inputs;
4977 	dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4978 
4979 	/* Init special DBG block */
4980 	if (grc_input_en)
4981 		SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4982 
4983 	return DBG_STATUS_OK;
4984 }
4985 
ecore_dbg_bus_set_pci_output(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 buf_size_kb)4986 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4987 											 struct ecore_ptt *p_ptt,
4988 											 u16 buf_size_kb)
4989 {
4990 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4991 	dma_addr_t pci_buf_phys_addr;
4992 	void *pci_buf;
4993 
4994 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4995 
4996 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4997 		return DBG_STATUS_OUTPUT_ALREADY_SET;
4998 	if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4999 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5000 
5001 	dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
5002 	dev_data->bus.pci_buf.size = buf_size_kb * 1024;
5003 	if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
5004 		return DBG_STATUS_INVALID_ARGS;
5005 
5006 	pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
5007 	if (!pci_buf)
5008 		return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
5009 
5010 	OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
5011 
5012 	dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
5013 	dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
5014 
5015 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
5016 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
5017 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
5018 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
5019 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
5020 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
5021 	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
5022 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
5023 	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
5024 
5025 	return DBG_STATUS_OK;
5026 }
5027 
ecore_dbg_bus_set_nw_output(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 port_id,u32 dest_addr_lo32,u16 dest_addr_hi16,u16 data_limit_size_kb,bool send_to_other_engine,bool rcv_from_other_engine)5028 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
5029 											struct ecore_ptt *p_ptt,
5030 											u8 port_id,
5031 											u32 dest_addr_lo32,
5032 											u16 dest_addr_hi16,
5033 											u16 data_limit_size_kb,
5034 											bool send_to_other_engine,
5035 											bool rcv_from_other_engine)
5036 {
5037 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5038 
5039 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
5040 
5041 	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5042 		return DBG_STATUS_OUTPUT_ALREADY_SET;
5043 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5044 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5045 	if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5046 		return DBG_STATUS_INVALID_ARGS;
5047 
5048 	dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5049 	dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5050 
5051 	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5052 	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5053 
5054 	if (send_to_other_engine)
5055 		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5056 	else
5057 		ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5058 
5059 	if (rcv_from_other_engine) {
5060 		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5061 	}
5062 	else {
5063 		/* Configure ethernet header of 14 bytes */
5064 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5065 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5066 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5067 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5068 		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5069 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5070 		if (data_limit_size_kb)
5071 			ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5072 	}
5073 
5074 	return DBG_STATUS_OK;
5075 }
5076 
ecore_is_overlapping_enable_mask(struct ecore_hwfn * p_hwfn,u8 enable_mask,u8 right_shift)5077 static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5078 									  u8 enable_mask,
5079 									  u8 right_shift)
5080 {
5081 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5082 	u8 curr_shifted_enable_mask, shifted_enable_mask;
5083 	u32 block_id;
5084 
5085 	shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5086 
5087 	if (dev_data->bus.num_enabled_blocks) {
5088 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5089 			struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5090 
5091 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5092 				continue;
5093 
5094 			curr_shifted_enable_mask =
5095 				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5096 					VALUES_PER_CYCLE,
5097 					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5098 			if (shifted_enable_mask & curr_shifted_enable_mask)
5099 				return true;
5100 		}
5101 	}
5102 
5103 	return false;
5104 }
5105 
ecore_dbg_bus_enable_block(struct ecore_hwfn * p_hwfn,enum block_id block_id,u8 line_num,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)5106 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5107 										   enum block_id block_id,
5108 										   u8 line_num,
5109 										   u8 enable_mask,
5110 										   u8 right_shift,
5111 										   u8 force_valid_mask,
5112 										   u8 force_frame_mask)
5113 {
5114 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5115 	struct block_defs *block = s_block_defs[block_id];
5116 	struct dbg_bus_block_data *block_bus;
5117 	const struct dbg_bus_block *block_desc;
5118 
5119 	block_bus = &dev_data->bus.blocks[block_id];
5120 	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5121 
5122 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5123 
5124 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5125 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5126 	if (block_id >= MAX_BLOCK_ID)
5127 		return DBG_STATUS_INVALID_ARGS;
5128 	if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5129 		return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5130 	if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS ||
5131 		line_num >= NUM_DBG_LINES(block_desc) ||
5132 		!enable_mask ||
5133 		enable_mask > MAX_CYCLE_VALUES_MASK ||
5134 		force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5135 		force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5136 		right_shift > VALUES_PER_CYCLE - 1)
5137 		return DBG_STATUS_INVALID_ARGS;
5138 	if (dev_data->block_in_reset[block_id])
5139 		return DBG_STATUS_BLOCK_IN_RESET;
5140 	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5141 		return DBG_STATUS_INPUT_OVERLAP;
5142 
5143 	dev_data->bus.blocks[block_id].line_num = line_num;
5144 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5145 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5146 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5147 	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5148 
5149 	dev_data->bus.num_enabled_blocks++;
5150 
5151 	return DBG_STATUS_OK;
5152 }
5153 
ecore_dbg_bus_enable_storm(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,enum dbg_bus_storm_modes storm_mode)5154 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5155 										   enum dbg_storms storm_id,
5156 										   enum dbg_bus_storm_modes storm_mode)
5157 {
5158 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5159 	struct dbg_bus_data *bus = &dev_data->bus;
5160 	struct dbg_bus_storm_data *storm_bus;
5161 	struct storm_defs *storm;
5162 
5163 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm_id, storm_mode);
5164 
5165 	if (bus->state != DBG_BUS_STATE_READY)
5166 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5167 	if (bus->hw_dwords >= 4)
5168 		return DBG_STATUS_HW_ONLY_RECORDING;
5169 	if (storm_id >= MAX_DBG_STORMS)
5170 		return DBG_STATUS_INVALID_ARGS;
5171 	if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5172 		return DBG_STATUS_INVALID_ARGS;
5173 	if (bus->unify_inputs)
5174 		return DBG_STATUS_INVALID_ARGS;
5175 	if (bus->storms[storm_id].enabled)
5176 		return DBG_STATUS_STORM_ALREADY_ENABLED;
5177 
5178 	storm = &s_storm_defs[storm_id];
5179 	storm_bus = &bus->storms[storm_id];
5180 
5181 	if (dev_data->block_in_reset[storm->block_id])
5182 		return DBG_STATUS_BLOCK_IN_RESET;
5183 
5184 	storm_bus->enabled = true;
5185 	storm_bus->mode = (u8)storm_mode;
5186 	storm_bus->hw_id = bus->num_enabled_storms;
5187 
5188 	bus->num_enabled_storms++;
5189 
5190 	return DBG_STATUS_OK;
5191 }
5192 
ecore_dbg_bus_enable_timestamp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 valid_mask,u8 frame_mask,u32 tick_len)5193 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5194 											   struct ecore_ptt *p_ptt,
5195 											   u8 valid_mask,
5196 											   u8 frame_mask,
5197 											   u32 tick_len)
5198 {
5199 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5200 
5201 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5202 
5203 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5204 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5205 	if (valid_mask > 0x7 || frame_mask > 0x7)
5206 		return DBG_STATUS_INVALID_ARGS;
5207 	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5208 		return DBG_STATUS_INPUT_OVERLAP;
5209 
5210 	dev_data->bus.timestamp_input_en = true;
5211 	dev_data->bus.num_enabled_blocks++;
5212 
5213 	SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5214 
5215 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5216 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5217 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5218 
5219 	return DBG_STATUS_OK;
5220 }
5221 
ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,u8 min_eid,u8 max_eid)5222 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5223 													   enum dbg_storms storm_id,
5224 													   u8 min_eid,
5225 													   u8 max_eid)
5226 {
5227 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5228 	struct dbg_bus_storm_data *storm_bus;
5229 
5230 	storm_bus = &dev_data->bus.storms[storm_id];
5231 
5232 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5233 
5234 	if (storm_id >= MAX_DBG_STORMS)
5235 		return DBG_STATUS_INVALID_ARGS;
5236 	if (min_eid > max_eid)
5237 		return DBG_STATUS_INVALID_ARGS;
5238 	if (!storm_bus->enabled)
5239 		return DBG_STATUS_STORM_NOT_ENABLED;
5240 
5241 	storm_bus->eid_filter_en = 1;
5242 	storm_bus->eid_range_not_mask = 1;
5243 	storm_bus->eid_filter_params.range.min = min_eid;
5244 	storm_bus->eid_filter_params.range.max = max_eid;
5245 
5246 	return DBG_STATUS_OK;
5247 }
5248 
ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,u8 eid_val,u8 eid_mask)5249 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5250 													  enum dbg_storms storm_id,
5251 													  u8 eid_val,
5252 													  u8 eid_mask)
5253 {
5254 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5255 	struct dbg_bus_storm_data *storm_bus;
5256 
5257 	storm_bus = &dev_data->bus.storms[storm_id];
5258 
5259 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5260 
5261 	if (storm_id >= MAX_DBG_STORMS)
5262 		return DBG_STATUS_INVALID_ARGS;
5263 	if (!storm_bus->enabled)
5264 		return DBG_STATUS_STORM_NOT_ENABLED;
5265 
5266 	storm_bus->eid_filter_en = 1;
5267 	storm_bus->eid_range_not_mask = 0;
5268 	storm_bus->eid_filter_params.mask.val = eid_val;
5269 	storm_bus->eid_filter_params.mask.mask = eid_mask;
5270 
5271 	return DBG_STATUS_OK;
5272 }
5273 
ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,u32 cid)5274 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5275 												 enum dbg_storms storm_id,
5276 												 u32 cid)
5277 {
5278 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5279 	struct dbg_bus_storm_data *storm_bus;
5280 
5281 	storm_bus = &dev_data->bus.storms[storm_id];
5282 
5283 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5284 
5285 	if (storm_id >= MAX_DBG_STORMS)
5286 		return DBG_STATUS_INVALID_ARGS;
5287 	if (!storm_bus->enabled)
5288 		return DBG_STATUS_STORM_NOT_ENABLED;
5289 
5290 	storm_bus->cid_filter_en = 1;
5291 	storm_bus->cid = cid;
5292 
5293 	return DBG_STATUS_OK;
5294 }
5295 
ecore_dbg_bus_enable_filter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 const_msg_len)5296 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5297 											struct ecore_ptt *p_ptt,
5298 											enum block_id block_id,
5299 											u8 const_msg_len)
5300 {
5301 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5302 
5303 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5304 
5305 	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5306 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5307 	if (dev_data->bus.filter_en)
5308 		return DBG_STATUS_FILTER_ALREADY_ENABLED;
5309 	if (block_id >= MAX_BLOCK_ID)
5310 		return DBG_STATUS_INVALID_ARGS;
5311 	if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5312 		return DBG_STATUS_BLOCK_NOT_ENABLED;
5313 	if (!dev_data->bus.unify_inputs)
5314 		return DBG_STATUS_FILTER_BUG;
5315 
5316 	dev_data->bus.filter_en = true;
5317 	dev_data->bus.next_constraint_id = 0;
5318 	dev_data->bus.adding_filter = true;
5319 
5320 	/* HW ID is set to 0 due to required unifyInputs */
5321 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5322 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5323 	if (const_msg_len > 0)
5324 		ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5325 
5326 	return DBG_STATUS_OK;
5327 }
5328 
ecore_dbg_bus_enable_trigger(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool rec_pre_trigger,u8 pre_chunks,bool rec_post_trigger,u32 post_cycles,bool filter_pre_trigger,bool filter_post_trigger)5329 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5330 											 struct ecore_ptt *p_ptt,
5331 											 bool rec_pre_trigger,
5332 											 u8 pre_chunks,
5333 											 bool rec_post_trigger,
5334 											 u32 post_cycles,
5335 											 bool filter_pre_trigger,
5336 											 bool filter_post_trigger)
5337 {
5338 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5339 	enum dbg_bus_post_trigger_types post_trigger_type;
5340 	enum dbg_bus_pre_trigger_types pre_trigger_type;
5341 	struct dbg_bus_data *bus = &dev_data->bus;
5342 
5343 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5344 
5345 	if (bus->state != DBG_BUS_STATE_READY)
5346 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5347 	if (bus->trigger_en)
5348 		return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5349 	if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5350 		return DBG_STATUS_INVALID_ARGS;
5351 
5352 	bus->trigger_en = true;
5353 	bus->filter_pre_trigger = filter_pre_trigger;
5354 	bus->filter_post_trigger = filter_post_trigger;
5355 
5356 	if (rec_pre_trigger) {
5357 		pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5358 		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5359 	}
5360 	else {
5361 		pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5362 	}
5363 
5364 	if (rec_post_trigger) {
5365 		post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5366 		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5367 	}
5368 	else {
5369 		post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5370 	}
5371 
5372 	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5373 	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5374 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5375 
5376 	return DBG_STATUS_OK;
5377 }
5378 
ecore_dbg_bus_add_trigger_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 const_msg_len,u16 count_to_next)5379 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5380 												struct ecore_ptt *p_ptt,
5381 												enum block_id block_id,
5382 												u8 const_msg_len,
5383 												u16 count_to_next)
5384 {
5385 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5386 	struct dbg_bus_data *bus = &dev_data->bus;
5387 	struct dbg_bus_block_data *block_bus;
5388 	u8 reg_offset;
5389 
5390 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5391 
5392 	block_bus = &bus->blocks[block_id];
5393 
5394 	if (!bus->trigger_en)
5395 		return DBG_STATUS_TRIGGER_NOT_ENABLED;
5396 	if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5397 		return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5398 	if (block_id >= MAX_BLOCK_ID)
5399 		return DBG_STATUS_INVALID_ARGS;
5400 	if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5401 		return DBG_STATUS_BLOCK_NOT_ENABLED;
5402 	if (!count_to_next)
5403 		return DBG_STATUS_INVALID_ARGS;
5404 
5405 	bus->next_constraint_id = 0;
5406 	bus->adding_filter = false;
5407 
5408 	/* Store block's shifted enable mask */
5409 	SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5410 					   VALUES_PER_CYCLE,
5411 					   GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5412 
5413 	/* Set trigger state registers */
5414 	reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5415 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5416 	if (const_msg_len > 0)
5417 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5418 
5419 	/* Set trigger set registers */
5420 	reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5421 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5422 
5423 	/* Set next state to final state, and overwrite previous next state
5424 	 * (if any).
5425 	 */
5426 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5427 	if (bus->next_trigger_state > 0) {
5428 		reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5429 		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5430 	}
5431 
5432 	bus->next_trigger_state++;
5433 
5434 	return DBG_STATUS_OK;
5435 }
5436 
ecore_dbg_bus_add_constraint(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum dbg_bus_constraint_ops constraint_op,u32 data_val,u32 data_mask,bool compare_frame,u8 frame_bit,u8 cycle_offset,u8 dword_offset_in_cycle,bool is_mandatory)5437 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5438 											 struct ecore_ptt *p_ptt,
5439 											 enum dbg_bus_constraint_ops constraint_op,
5440 											 u32 data_val,
5441 											 u32 data_mask,
5442 											 bool compare_frame,
5443 											 u8 frame_bit,
5444 											 u8 cycle_offset,
5445 											 u8 dword_offset_in_cycle,
5446 											 bool is_mandatory)
5447 {
5448 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5449 	struct dbg_bus_data *bus = &dev_data->bus;
5450 	u16 dword_offset, range = 0;
5451 
5452 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5453 
5454 	if (!bus->filter_en && !dev_data->bus.trigger_en)
5455 		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5456 	if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5457 		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5458 	if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5459 		return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5460 	if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5461 		return DBG_STATUS_INVALID_ARGS;
5462 	if (compare_frame &&
5463 		constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5464 		constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5465 		return DBG_STATUS_INVALID_ARGS;
5466 
5467 	dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5468 
5469 	if (!bus->adding_filter) {
5470 		u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5471 		struct dbg_bus_trigger_state_data *trigger_state;
5472 
5473 		trigger_state = &bus->trigger_states[curr_trigger_state_id];
5474 
5475 		/* Check if the selected dword is enabled in the block */
5476 		if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5477 			return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5478 
5479 		/* Add selected dword to trigger state's dword mask */
5480 		SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5481 	}
5482 
5483 	/* Prepare data mask and range */
5484 	if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5485 		constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5486 		data_mask = ~data_mask;
5487 	}
5488 	else {
5489 		u8 lsb, width;
5490 
5491 		/* Extract lsb and width from mask */
5492 		if (!data_mask)
5493 			return DBG_STATUS_INVALID_ARGS;
5494 
5495 		for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5496 		for (width = 0; width < 32 - lsb && (data_mask & 1); width++, data_mask >>= 1);
5497 		if (data_mask)
5498 			return DBG_STATUS_INVALID_ARGS;
5499 		range = (lsb << 5) | (width - 1);
5500 	}
5501 
5502 	/* Add constraint */
5503 	ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5504 		dev_data->bus.next_constraint_id,
5505 		s_constraint_op_defs[constraint_op].hw_op_val,
5506 		data_val, data_mask, frame_bit,
5507 		compare_frame ? 0 : 1, dword_offset, range,
5508 		s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5509 		is_mandatory ? 1 : 0);
5510 
5511 	/* If first constraint, fill other 3 constraints with dummy constraints
5512 	 * that always match (using the same offset).
5513 	 */
5514 	if (!dev_data->bus.next_constraint_id) {
5515 		u8 i;
5516 
5517 		for (i = 1; i < MAX_CONSTRAINTS; i++)
5518 			ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5519 				i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5520 				0, 1, dword_offset, 0, 0, 1);
5521 	}
5522 
5523 	bus->next_constraint_id++;
5524 
5525 	return DBG_STATUS_OK;
5526 }
5527 
5528 /* Configure the DBG block client mask */
ecore_config_dbg_block_client_mask(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5529 static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5530 										struct ecore_ptt *p_ptt)
5531 {
5532 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5533 	struct dbg_bus_data *bus = &dev_data->bus;
5534 	u32 block_id, client_mask = 0;
5535 	u8 storm_id;
5536 
5537 	/* Update client mask for Storm inputs */
5538 	if (bus->num_enabled_storms)
5539 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5540 			struct storm_defs *storm = &s_storm_defs[storm_id];
5541 
5542 			if (bus->storms[storm_id].enabled)
5543 				client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5544 		}
5545 
5546 	/* Update client mask for block inputs */
5547 	if (bus->num_enabled_blocks) {
5548 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5549 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5550 			struct block_defs *block = s_block_defs[block_id];
5551 
5552 			if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5553 				client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5554 		}
5555 	}
5556 
5557 	/* Update client mask for GRC input */
5558 	if (bus->grc_input_en)
5559 		client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5560 
5561 	/* Update client mask for timestamp input */
5562 	if (bus->timestamp_input_en)
5563 		client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5564 
5565 	ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5566 }
5567 
5568 /* Configure the DBG block framing mode */
ecore_config_dbg_block_framing_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5569 static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5570 													struct ecore_ptt *p_ptt)
5571 {
5572 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5573 	struct dbg_bus_data *bus = &dev_data->bus;
5574 	enum dbg_bus_frame_modes dbg_framing_mode;
5575 	u32 block_id;
5576 
5577 	if (!bus->hw_dwords && bus->num_enabled_blocks) {
5578 		const struct dbg_bus_line *line_desc;
5579 		u8 hw_dwords;
5580 
5581 		/* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5582 		 * (256-bit mode).
5583 		 */
5584 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5585 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5586 
5587 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5588 				continue;
5589 
5590 			line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5591 			hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5592 
5593 			if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5594 				return DBG_STATUS_NON_MATCHING_LINES;
5595 
5596 			/* The DBG block doesn't support triggers and
5597 			 * filters on 256b debug lines.
5598 			 */
5599 			if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5600 				return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5601 
5602 			bus->hw_dwords = hw_dwords;
5603 		}
5604 	}
5605 
5606 	switch (bus->hw_dwords) {
5607 	case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5608 	case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5609 	case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5610 	default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5611 	}
5612 	ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5613 
5614 	return DBG_STATUS_OK;
5615 }
5616 
5617 /* Configure the DBG block Storm data */
ecore_config_storm_inputs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5618 static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5619 										  struct ecore_ptt *p_ptt)
5620 {
5621 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5622 	struct dbg_bus_data *bus = &dev_data->bus;
5623 	u8 storm_id, i, next_storm_id = 0;
5624 	u32 storm_id_mask = 0;
5625 
5626 	/* Check if SEMI sync FIFO is empty */
5627 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5628 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5629 		struct storm_defs *storm = &s_storm_defs[storm_id];
5630 
5631 		if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5632 			return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5633 	}
5634 
5635 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5636 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5637 
5638 		if (storm_bus->enabled)
5639 			storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5640 	}
5641 
5642 	ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5643 
5644 	/* Disable storm stall if recording to internal buffer in one-shot */
5645 	ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5646 
5647 	/* Configure calendar */
5648 	for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5649 		/* Find next enabled Storm */
5650 		for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5651 
5652 		/* Configure calendar slot */
5653 		ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5654 	}
5655 
5656 	return DBG_STATUS_OK;
5657 }
5658 
5659 /* Assign HW ID to each dword/qword:
5660  * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5661  * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5662  * data synchronization issues. however, we need to check if there is a trigger
5663  * state for which more than one dword has a constraint. if there is, we cannot
5664  * assign a different HW ID to each dword (since a trigger state has a single
5665  * HW ID), so we assign a different HW ID to each block.
5666  */
ecore_assign_hw_ids(struct ecore_hwfn * p_hwfn,u8 hw_ids[VALUES_PER_CYCLE])5667 static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5668 						 u8 hw_ids[VALUES_PER_CYCLE])
5669 {
5670 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5671 	struct dbg_bus_data *bus = &dev_data->bus;
5672 	bool hw_id_per_dword = true;
5673 	u8 val_id, state_id;
5674 	u32 block_id;
5675 
5676 	OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5677 
5678 	if (bus->unify_inputs)
5679 		return;
5680 
5681 	if (bus->trigger_en) {
5682 		for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5683 			u8 num_dwords = 0;
5684 
5685 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5686 				if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5687 					num_dwords++;
5688 
5689 			if (num_dwords > 1)
5690 				hw_id_per_dword = false;
5691 		}
5692 	}
5693 
5694 	if (hw_id_per_dword) {
5695 		/* Assign a different HW ID for each dword */
5696 		for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5697 			hw_ids[val_id] = val_id;
5698 	}
5699 	else {
5700 		u8 shifted_enable_mask, next_hw_id = 0;
5701 
5702 		/* Assign HW IDs according to blocks enable /  */
5703 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5704 			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5705 
5706 			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5707 				continue;
5708 
5709 			block_bus->hw_id = next_hw_id++;
5710 			if (!block_bus->hw_id)
5711 				continue;
5712 
5713 			shifted_enable_mask =
5714 				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5715 					VALUES_PER_CYCLE,
5716 					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5717 
5718 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5719 				if (shifted_enable_mask & (1 << val_id))
5720 					hw_ids[val_id] = block_bus->hw_id;
5721 		}
5722 	}
5723 }
5724 
5725 /* Configure the DBG block HW blocks data */
ecore_config_block_inputs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5726 static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5727 							   struct ecore_ptt *p_ptt)
5728 {
5729 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5730 	struct dbg_bus_data *bus = &dev_data->bus;
5731 	u8 hw_ids[VALUES_PER_CYCLE];
5732 	u8 val_id, state_id;
5733 
5734 	ecore_assign_hw_ids(p_hwfn, hw_ids);
5735 
5736 	/* Assign a HW ID to each trigger state */
5737 	if (dev_data->bus.trigger_en) {
5738 		for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5739 			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5740 				u8 state_data = bus->trigger_states[state_id].data;
5741 
5742 				if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5743 					ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5744 					break;
5745 				}
5746 			}
5747 		}
5748 	}
5749 
5750 	/* Configure HW ID mask */
5751 	dev_data->bus.hw_id_mask = 0;
5752 	for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5753 		bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5754 	ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5755 
5756 	/* Configure additional K2 PCIE registers */
5757 	if (dev_data->chip_id == CHIP_K2 &&
5758 		(GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5759 			GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5760 		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5761 		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5762 	}
5763 }
5764 
ecore_dbg_bus_start(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5765 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5766 									struct ecore_ptt *p_ptt)
5767 {
5768 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5769 	struct dbg_bus_data *bus = &dev_data->bus;
5770 	enum dbg_bus_filter_types filter_type;
5771 	enum dbg_status status;
5772 	u32 block_id;
5773 	u8 storm_id;
5774 
5775 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5776 
5777 	if (bus->state != DBG_BUS_STATE_READY)
5778 		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5779 
5780 	/* Check if any input was enabled */
5781 	if (!bus->num_enabled_storms &&
5782 		!bus->num_enabled_blocks &&
5783 		!bus->rcv_from_other_engine)
5784 		return DBG_STATUS_NO_INPUT_ENABLED;
5785 
5786 	/* Check if too many input types were enabled (storm+dbgmux) */
5787 	if (bus->num_enabled_storms && bus->num_enabled_blocks)
5788 		return DBG_STATUS_TOO_MANY_INPUTS;
5789 
5790 	/* Configure framing mode */
5791 	if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5792 		return status;
5793 
5794 	/* Configure DBG block for Storm inputs */
5795 	if (bus->num_enabled_storms)
5796 		if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5797 			return status;
5798 
5799 	/* Configure DBG block for block inputs */
5800 	if (bus->num_enabled_blocks)
5801 		ecore_config_block_inputs(p_hwfn, p_ptt);
5802 
5803 	/* Configure filter type */
5804 	if (bus->filter_en) {
5805 		if (bus->trigger_en) {
5806 			if (bus->filter_pre_trigger)
5807 				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5808 			else
5809 				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5810 		}
5811 		else {
5812 			filter_type = DBG_BUS_FILTER_TYPE_ON;
5813 		}
5814 	}
5815 	else {
5816 		filter_type = DBG_BUS_FILTER_TYPE_OFF;
5817 	}
5818 	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5819 
5820 	/* Restart timestamp */
5821 	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5822 
5823 	/* Enable debug block */
5824 	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5825 
5826 	/* Configure enabled blocks - must be done before the DBG block is
5827 	 * enabled.
5828 	 */
5829 	if (dev_data->bus.num_enabled_blocks) {
5830 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5831 			if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5832 				continue;
5833 
5834 			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5835 				dev_data->bus.blocks[block_id].line_num,
5836 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5837 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5838 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5839 				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5840 		}
5841 	}
5842 
5843 	/* Configure client mask */
5844 	ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5845 
5846 	/* Configure enabled Storms - must be done after the DBG block is
5847 	 * enabled.
5848 	 */
5849 	if (dev_data->bus.num_enabled_storms)
5850 		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5851 			if (dev_data->bus.storms[storm_id].enabled)
5852 				ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id);
5853 
5854 	dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5855 
5856 	return DBG_STATUS_OK;
5857 }
5858 
ecore_dbg_bus_stop(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5859 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5860 								   struct ecore_ptt *p_ptt)
5861 {
5862 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5863 	struct dbg_bus_data *bus = &dev_data->bus;
5864 	enum dbg_status status = DBG_STATUS_OK;
5865 
5866 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5867 
5868 	if (bus->state != DBG_BUS_STATE_RECORDING)
5869 		return DBG_STATUS_RECORDING_NOT_STARTED;
5870 
5871 	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5872 	if (status != DBG_STATUS_OK)
5873 		return status;
5874 
5875 	ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5876 
5877 	OSAL_MSLEEP(FLUSH_DELAY_MS);
5878 
5879 	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5880 
5881 	/* Check if trigger worked */
5882 	if (bus->trigger_en) {
5883 		u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5884 
5885 		if (trigger_state != MAX_TRIGGER_STATES)
5886 			return DBG_STATUS_DATA_DIDNT_TRIGGER;
5887 	}
5888 
5889 	bus->state = DBG_BUS_STATE_STOPPED;
5890 
5891 	return status;
5892 }
5893 
ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5894 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5895 												struct ecore_ptt *p_ptt,
5896 												u32 *buf_size)
5897 {
5898 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5899 	struct dbg_bus_data *bus = &dev_data->bus;
5900 	enum dbg_status status;
5901 
5902 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5903 
5904 	*buf_size = 0;
5905 
5906 	if (status != DBG_STATUS_OK)
5907 		return status;
5908 
5909 	/* Add dump header */
5910 	*buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5911 
5912 	switch (bus->target) {
5913 	case DBG_BUS_TARGET_ID_INT_BUF:
5914 		*buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5915 	case DBG_BUS_TARGET_ID_PCI:
5916 		*buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5917 	default:
5918 		break;
5919 	}
5920 
5921 	/* Dump last section */
5922 	*buf_size += ecore_dump_last_section(OSAL_NULL, 0, false);
5923 
5924 	return DBG_STATUS_OK;
5925 }
5926 
ecore_dbg_bus_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5927 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5928 								   struct ecore_ptt *p_ptt,
5929 								   u32 *dump_buf,
5930 								   u32 buf_size_in_dwords,
5931 								   u32 *num_dumped_dwords)
5932 {
5933 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5934 	u32 min_buf_size_in_dwords, block_id, offset = 0;
5935 	struct dbg_bus_data *bus = &dev_data->bus;
5936 	enum dbg_status status;
5937 	u8 storm_id;
5938 
5939 	*num_dumped_dwords = 0;
5940 
5941 	status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5942 	if (status != DBG_STATUS_OK)
5943 		return status;
5944 
5945 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5946 
5947 	if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5948 		return DBG_STATUS_RECORDING_NOT_STARTED;
5949 
5950 	if (bus->state == DBG_BUS_STATE_RECORDING) {
5951 		enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5952 		if (stop_state != DBG_STATUS_OK)
5953 			return stop_state;
5954 	}
5955 
5956 	if (buf_size_in_dwords < min_buf_size_in_dwords)
5957 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5958 
5959 	if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5960 		return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5961 
5962 	/* Dump header */
5963 	offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5964 
5965 	/* Dump recorded data */
5966 	if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5967 		u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5968 
5969 		if (!recorded_dwords)
5970 			return DBG_STATUS_NO_DATA_RECORDED;
5971 		if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5972 			return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5973 		offset += recorded_dwords;
5974 	}
5975 
5976 	/* Dump last section */
5977 	offset += ecore_dump_last_section(dump_buf, offset, true);
5978 
5979 	/* If recorded to PCI buffer - free the buffer */
5980 	ecore_bus_free_pci_buf(p_hwfn);
5981 
5982 	/* Clear debug bus parameters */
5983 	bus->state = DBG_BUS_STATE_IDLE;
5984 	bus->num_enabled_blocks = 0;
5985 	bus->num_enabled_storms = 0;
5986 	bus->filter_en = bus->trigger_en = 0;
5987 
5988 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5989 		SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5990 
5991 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5992 		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5993 
5994 		storm_bus->enabled = false;
5995 		storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5996 	}
5997 
5998 	*num_dumped_dwords = offset;
5999 
6000 	return DBG_STATUS_OK;
6001 }
6002 
ecore_dbg_grc_config(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)6003 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
6004 									 enum dbg_grc_params grc_param,
6005 									 u32 val)
6006 {
6007 	int i;
6008 
6009 	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
6010 
6011 	/* Initializes the GRC parameters (if not initialized). Needed in order
6012 	 * to set the default parameter values for the first time.
6013 	 */
6014 	ecore_dbg_grc_init_params(p_hwfn);
6015 
6016 	if (grc_param >= MAX_DBG_GRC_PARAMS)
6017 		return DBG_STATUS_INVALID_ARGS;
6018 	if (val < s_grc_param_defs[grc_param].min ||
6019 		val > s_grc_param_defs[grc_param].max)
6020 		return DBG_STATUS_INVALID_ARGS;
6021 
6022 	if (s_grc_param_defs[grc_param].is_preset) {
6023 		/* Preset param */
6024 
6025 		/* Disabling a preset is not allowed. Call
6026 		 * dbg_grc_set_params_default instead.
6027 		 */
6028 		if (!val)
6029 			return DBG_STATUS_INVALID_ARGS;
6030 
6031 		/* Update all params with the preset values */
6032 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
6033 			u32 preset_val;
6034 
6035 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
6036 				preset_val = s_grc_param_defs[i].exclude_all_preset_val;
6037 			else if (grc_param == DBG_GRC_PARAM_CRASH)
6038 				preset_val = s_grc_param_defs[i].crash_preset_val;
6039 			else
6040 				return DBG_STATUS_INVALID_ARGS;
6041 
6042 			ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6043 		}
6044 	}
6045 	else {
6046 		/* Regular param - set its value */
6047 		ecore_grc_set_param(p_hwfn, grc_param, val);
6048 	}
6049 
6050 	return DBG_STATUS_OK;
6051 }
6052 
6053 /* Assign default GRC param values */
ecore_dbg_grc_set_params_default(struct ecore_hwfn * p_hwfn)6054 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6055 {
6056 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6057 	u32 i;
6058 
6059 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6060 		dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6061 }
6062 
ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6063 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6064 												struct ecore_ptt *p_ptt,
6065 												u32 *buf_size)
6066 {
6067 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6068 
6069 	*buf_size = 0;
6070 
6071 	if (status != DBG_STATUS_OK)
6072 		return status;
6073 
6074 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6075 		!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6076 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6077 
6078 	return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6079 }
6080 
ecore_dbg_grc_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6081 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6082 								   struct ecore_ptt *p_ptt,
6083 								   u32 *dump_buf,
6084 								   u32 buf_size_in_dwords,
6085 								   u32 *num_dumped_dwords)
6086 {
6087 	u32 needed_buf_size_in_dwords;
6088 	enum dbg_status status;
6089 
6090 	*num_dumped_dwords = 0;
6091 
6092 	status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6093 	if (status != DBG_STATUS_OK)
6094 		return status;
6095 
6096 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6097 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6098 
6099 	/* Doesn't do anything, needed for compile time asserts */
6100 	ecore_static_asserts();
6101 
6102 	/* GRC Dump */
6103 	status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6104 
6105 	/* Reveret GRC params to their default */
6106 	ecore_dbg_grc_set_params_default(p_hwfn);
6107 
6108 	return status;
6109 }
6110 
ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6111 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6112 													 struct ecore_ptt *p_ptt,
6113 													 u32 *buf_size)
6114 {
6115 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6116 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6117 	enum dbg_status status;
6118 
6119 	*buf_size = 0;
6120 
6121 	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6122 	if (status != DBG_STATUS_OK)
6123 		return status;
6124 
6125 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6126 		!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6127 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6128 
6129 	if (!idle_chk->buf_size_set) {
6130 		idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6131 		idle_chk->buf_size_set = true;
6132 	}
6133 
6134 	*buf_size = idle_chk->buf_size;
6135 
6136 	return DBG_STATUS_OK;
6137 }
6138 
ecore_dbg_idle_chk_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6139 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6140 										struct ecore_ptt *p_ptt,
6141 										u32 *dump_buf,
6142 										u32 buf_size_in_dwords,
6143 										u32 *num_dumped_dwords)
6144 {
6145 	u32 needed_buf_size_in_dwords;
6146 	enum dbg_status status;
6147 
6148 	*num_dumped_dwords = 0;
6149 
6150 	status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6151 	if (status != DBG_STATUS_OK)
6152 		return status;
6153 
6154 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6155 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6156 
6157 	/* Update reset state */
6158 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6159 
6160 	/* Idle Check Dump */
6161 	*num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6162 
6163 	/* Reveret GRC params to their default */
6164 	ecore_dbg_grc_set_params_default(p_hwfn);
6165 
6166 	return DBG_STATUS_OK;
6167 }
6168 
ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6169 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6170 													  struct ecore_ptt *p_ptt,
6171 													  u32 *buf_size)
6172 {
6173 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6174 
6175 	*buf_size = 0;
6176 
6177 	if (status != DBG_STATUS_OK)
6178 		return status;
6179 
6180 	return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6181 }
6182 
ecore_dbg_mcp_trace_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6183 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6184 										 struct ecore_ptt *p_ptt,
6185 										 u32 *dump_buf,
6186 										 u32 buf_size_in_dwords,
6187 										 u32 *num_dumped_dwords)
6188 {
6189 	u32 needed_buf_size_in_dwords;
6190 	enum dbg_status status;
6191 
6192 	status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6193 	if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6194 		return status;
6195 
6196 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6197 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6198 
6199 	/* Update reset state */
6200 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6201 
6202 	/* Perform dump */
6203 	status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6204 
6205 	/* Reveret GRC params to their default */
6206 	ecore_dbg_grc_set_params_default(p_hwfn);
6207 
6208 	return status;
6209 }
6210 
ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6211 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6212 													 struct ecore_ptt *p_ptt,
6213 													 u32 *buf_size)
6214 {
6215 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6216 
6217 	*buf_size = 0;
6218 
6219 	if (status != DBG_STATUS_OK)
6220 		return status;
6221 
6222 	return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6223 }
6224 
ecore_dbg_reg_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6225 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6226 										struct ecore_ptt *p_ptt,
6227 										u32 *dump_buf,
6228 										u32 buf_size_in_dwords,
6229 										u32 *num_dumped_dwords)
6230 {
6231 	u32 needed_buf_size_in_dwords;
6232 	enum dbg_status status;
6233 
6234 	*num_dumped_dwords = 0;
6235 
6236 	status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6237 	if (status != DBG_STATUS_OK)
6238 		return status;
6239 
6240 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6241 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6242 
6243 	/* Update reset state */
6244 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6245 
6246 	status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6247 
6248 	/* Reveret GRC params to their default */
6249 	ecore_dbg_grc_set_params_default(p_hwfn);
6250 
6251 	return status;
6252 }
6253 
ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6254 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6255 													 struct ecore_ptt *p_ptt,
6256 													 u32 *buf_size)
6257 {
6258 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6259 
6260 	*buf_size = 0;
6261 
6262 	if (status != DBG_STATUS_OK)
6263 		return status;
6264 
6265 	return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6266 }
6267 
ecore_dbg_igu_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6268 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6269 										struct ecore_ptt *p_ptt,
6270 										u32 *dump_buf,
6271 										u32 buf_size_in_dwords,
6272 										u32 *num_dumped_dwords)
6273 {
6274 	u32 needed_buf_size_in_dwords;
6275 	enum dbg_status status;
6276 
6277 	*num_dumped_dwords = 0;
6278 
6279 	status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6280 	if (status != DBG_STATUS_OK)
6281 		return status;
6282 
6283 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6284 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6285 
6286 	/* Update reset state */
6287 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6288 
6289 	status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6290 
6291 	/* Reveret GRC params to their default */
6292 	ecore_dbg_grc_set_params_default(p_hwfn);
6293 
6294 	return status;
6295 }
6296 
ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6297 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6298 																struct ecore_ptt *p_ptt,
6299 																u32 *buf_size)
6300 {
6301 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6302 
6303 	*buf_size = 0;
6304 
6305 	if (status != DBG_STATUS_OK)
6306 		return status;
6307 
6308 	return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6309 }
6310 
ecore_dbg_protection_override_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6311 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6312 												   struct ecore_ptt *p_ptt,
6313 												   u32 *dump_buf,
6314 												   u32 buf_size_in_dwords,
6315 												   u32 *num_dumped_dwords)
6316 {
6317 	u32 needed_buf_size_in_dwords;
6318 	enum dbg_status status;
6319 
6320 	*num_dumped_dwords = 0;
6321 
6322 	status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6323 	if (status != DBG_STATUS_OK)
6324 		return status;
6325 
6326 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6327 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6328 
6329 	/* Update reset state */
6330 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6331 
6332 	status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6333 
6334 	/* Reveret GRC params to their default */
6335 	ecore_dbg_grc_set_params_default(p_hwfn);
6336 
6337 	return status;
6338 }
6339 
ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6340 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6341 													   struct ecore_ptt *p_ptt,
6342 													   u32 *buf_size)
6343 {
6344 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6345 
6346 	*buf_size = 0;
6347 
6348 	if (status != DBG_STATUS_OK)
6349 		return status;
6350 
6351 	/* Update reset state */
6352 	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6353 
6354 	*buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6355 
6356 	return DBG_STATUS_OK;
6357 }
6358 
ecore_dbg_fw_asserts_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6359 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6360 										  struct ecore_ptt *p_ptt,
6361 										  u32 *dump_buf,
6362 										  u32 buf_size_in_dwords,
6363 										  u32 *num_dumped_dwords)
6364 {
6365 	u32 needed_buf_size_in_dwords;
6366 	enum dbg_status status;
6367 
6368 	*num_dumped_dwords = 0;
6369 
6370 	status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6371 	if (status != DBG_STATUS_OK)
6372 		return status;
6373 
6374 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6375 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6376 
6377 	*num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6378 
6379 	/* Reveret GRC params to their default */
6380 	ecore_dbg_grc_set_params_default(p_hwfn);
6381 
6382 	return DBG_STATUS_OK;
6383 }
6384 
ecore_dbg_read_attn(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,enum dbg_attn_type attn_type,bool clear_status,struct dbg_attn_block_result * results)6385 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6386 									struct ecore_ptt *p_ptt,
6387 									enum block_id block_id,
6388 									enum dbg_attn_type attn_type,
6389 									bool clear_status,
6390 									struct dbg_attn_block_result *results)
6391 {
6392 	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6393 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
6394 	const struct dbg_attn_reg *attn_reg_arr;
6395 
6396 	if (status != DBG_STATUS_OK)
6397 		return status;
6398 
6399 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6400 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6401 
6402 	attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6403 
6404 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6405 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6406 		struct dbg_attn_reg_result *reg_result;
6407 		u32 sts_addr, sts_val;
6408 		u16 modes_buf_offset;
6409 		bool eval_mode;
6410 
6411 		/* Check mode */
6412 		eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6413 		modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6414 		if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6415 			continue;
6416 
6417 		/* Mode match - read attention status register */
6418 		sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6419 		sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6420 		if (!sts_val)
6421 			continue;
6422 
6423 		/* Non-zero attention status - add to results */
6424 		reg_result = &results->reg_results[num_result_regs];
6425 		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6426 		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6427 		reg_result->block_attn_offset = reg_data->block_attn_offset;
6428 		reg_result->sts_val = sts_val;
6429 		reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6430 		num_result_regs++;
6431 	}
6432 
6433 	results->block_id = (u8)block_id;
6434 	results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6435 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6436 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6437 
6438 	return DBG_STATUS_OK;
6439 }
6440 
ecore_dbg_print_attn(struct ecore_hwfn * p_hwfn,struct dbg_attn_block_result * results)6441 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6442 									 struct dbg_attn_block_result *results)
6443 {
6444 	enum dbg_attn_type attn_type;
6445 	u8 num_regs, i;
6446 
6447 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6448 	attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6449 
6450 	for (i = 0; i < num_regs; i++) {
6451 		struct dbg_attn_reg_result *reg_result;
6452 		const char *attn_type_str;
6453 		u32 sts_addr;
6454 
6455 		reg_result = &results->reg_results[i];
6456 		attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6457 		sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6458 		DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6459 	}
6460 
6461 	return DBG_STATUS_OK;
6462 }
6463 
ecore_is_block_in_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id)6464 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6465 							 struct ecore_ptt *p_ptt,
6466 							 enum block_id block_id)
6467 {
6468 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6469 	struct block_defs *block = s_block_defs[block_id];
6470 	u32 reset_reg;
6471 
6472 	if (!block->has_reset_bit)
6473 		return false;
6474 
6475 	reset_reg = block->reset_reg;
6476 
6477 	return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6478 		!(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) :	true;
6479 }
6480