1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 #include "bcm_osal.h"
37 #include "ecore.h"
38 #include "ecore_hw.h"
39 #include "ecore_mcp.h"
40 #include "spad_layout.h"
41 #include "nvm_map.h"
42 #include "reg_addr.h"
43 #include "ecore_hsi_common.h"
44 #include "ecore_hsi_debug_tools.h"
45 #include "mcp_public.h"
46 #include "nvm_map.h"
47 #ifndef USE_DBG_BIN_FILE
48 #include "ecore_dbg_values.h"
49 #endif
50 #include "ecore_dbg_fw_funcs.h"
51
52 /* Memory groups enum */
53 enum mem_groups {
54 MEM_GROUP_PXP_MEM,
55 MEM_GROUP_DMAE_MEM,
56 MEM_GROUP_CM_MEM,
57 MEM_GROUP_QM_MEM,
58 MEM_GROUP_TM_MEM,
59 MEM_GROUP_BRB_RAM,
60 MEM_GROUP_BRB_MEM,
61 MEM_GROUP_PRS_MEM,
62 MEM_GROUP_SDM_MEM,
63 MEM_GROUP_IOR,
64 MEM_GROUP_RAM,
65 MEM_GROUP_BTB_RAM,
66 MEM_GROUP_RDIF_CTX,
67 MEM_GROUP_TDIF_CTX,
68 MEM_GROUP_CFC_MEM,
69 MEM_GROUP_CONN_CFC_MEM,
70 MEM_GROUP_TASK_CFC_MEM,
71 MEM_GROUP_CAU_PI,
72 MEM_GROUP_CAU_MEM,
73 MEM_GROUP_PXP_ILT,
74 MEM_GROUP_PBUF,
75 MEM_GROUP_MULD_MEM,
76 MEM_GROUP_BTB_MEM,
77 MEM_GROUP_IGU_MEM,
78 MEM_GROUP_IGU_MSIX,
79 MEM_GROUP_CAU_SB,
80 MEM_GROUP_BMB_RAM,
81 MEM_GROUP_BMB_MEM,
82 MEM_GROUPS_NUM
83 };
84
85 /* Memory groups names */
86 static const char* s_mem_group_names[] = {
87 "PXP_MEM",
88 "DMAE_MEM",
89 "CM_MEM",
90 "QM_MEM",
91 "TM_MEM",
92 "BRB_RAM",
93 "BRB_MEM",
94 "PRS_MEM",
95 "SDM_MEM",
96 "IOR",
97 "RAM",
98 "BTB_RAM",
99 "RDIF_CTX",
100 "TDIF_CTX",
101 "CFC_MEM",
102 "CONN_CFC_MEM",
103 "TASK_CFC_MEM",
104 "CAU_PI",
105 "CAU_MEM",
106 "PXP_ILT",
107 "PBUF",
108 "MULD_MEM",
109 "BTB_MEM",
110 "IGU_MEM",
111 "IGU_MSIX",
112 "CAU_SB",
113 "BMB_RAM",
114 "BMB_MEM",
115 };
116
117 /* Idle check conditions */
118
119 #ifndef __PREVENT_COND_ARR__
120
cond5(const u32 * r,const u32 * imm)121 static u32 cond5(const u32 *r, const u32 *imm) {
122 return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
123 }
124
cond7(const u32 * r,const u32 * imm)125 static u32 cond7(const u32 *r, const u32 *imm) {
126 return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
127 }
128
cond14(const u32 * r,const u32 * imm)129 static u32 cond14(const u32 *r, const u32 *imm) {
130 return ((r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]));
131 }
132
cond6(const u32 * r,const u32 * imm)133 static u32 cond6(const u32 *r, const u32 *imm) {
134 return ((r[0] & imm[0]) != imm[1]);
135 }
136
cond9(const u32 * r,const u32 * imm)137 static u32 cond9(const u32 *r, const u32 *imm) {
138 return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
139 }
140
cond10(const u32 * r,const u32 * imm)141 static u32 cond10(const u32 *r, const u32 *imm) {
142 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
143 }
144
cond4(const u32 * r,const u32 * imm)145 static u32 cond4(const u32 *r, const u32 *imm) {
146 return ((r[0] & ~imm[0]) != imm[1]);
147 }
148
cond0(const u32 * r,const u32 * imm)149 static u32 cond0(const u32 *r, const u32 *imm) {
150 return ((r[0] & ~r[1]) != imm[0]);
151 }
152
cond1(const u32 * r,const u32 * imm)153 static u32 cond1(const u32 *r, const u32 *imm) {
154 return (r[0] != imm[0]);
155 }
156
cond11(const u32 * r,const u32 * imm)157 static u32 cond11(const u32 *r, const u32 *imm) {
158 return (r[0] != r[1] && r[2] == imm[0]);
159 }
160
cond12(const u32 * r,const u32 * imm)161 static u32 cond12(const u32 *r, const u32 *imm) {
162 return (r[0] != r[1] && r[2] > imm[0]);
163 }
164
cond3(const u32 * r,const u32 * imm)165 static u32 cond3(const u32 *r, const u32 *imm) {
166 return (r[0] != r[1]);
167 }
168
cond13(const u32 * r,const u32 * imm)169 static u32 cond13(const u32 *r, const u32 *imm) {
170 return (r[0] & imm[0]);
171 }
172
cond8(const u32 * r,const u32 * imm)173 static u32 cond8(const u32 *r, const u32 *imm) {
174 return (r[0] < (r[1] - imm[0]));
175 }
176
cond2(const u32 * r,const u32 * imm)177 static u32 cond2(const u32 *r, const u32 *imm) {
178 return (r[0] > imm[0]);
179 }
180
181 /* Array of Idle Check conditions */
182 static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
183 cond0,
184 cond1,
185 cond2,
186 cond3,
187 cond4,
188 cond5,
189 cond6,
190 cond7,
191 cond8,
192 cond9,
193 cond10,
194 cond11,
195 cond12,
196 cond13,
197 cond14,
198 };
199
200 #endif /* __PREVENT_COND_ARR__ */
201
202
203 /******************************* Data Types **********************************/
204
205 enum platform_ids {
206 PLATFORM_ASIC,
207 PLATFORM_EMUL_FULL,
208 PLATFORM_EMUL_REDUCED,
209 PLATFORM_FPGA,
210 MAX_PLATFORM_IDS
211 };
212
213 struct chip_platform_defs {
214 u8 num_ports;
215 u8 num_pfs;
216 u8 num_vfs;
217 };
218
219 /* Chip constant definitions */
220 struct chip_defs {
221 const char *name;
222 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
223 };
224
225 /* Platform constant definitions */
226 struct platform_defs {
227 const char *name;
228 u32 delay_factor;
229 };
230
231 /* Storm constant definitions.
232 * Addresses are in bytes, sizes are in quad-regs.
233 */
234 struct storm_defs {
235 char letter;
236 enum block_id block_id;
237 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 bool has_vfc;
239 u32 sem_fast_mem_addr;
240 u32 sem_frame_mode_addr;
241 u32 sem_slow_enable_addr;
242 u32 sem_slow_mode_addr;
243 u32 sem_slow_mode1_conf_addr;
244 u32 sem_sync_dbg_empty_addr;
245 u32 sem_slow_dbg_empty_addr;
246 u32 cm_ctx_wr_addr;
247 u32 cm_conn_ag_ctx_lid_size;
248 u32 cm_conn_ag_ctx_rd_addr;
249 u32 cm_conn_st_ctx_lid_size;
250 u32 cm_conn_st_ctx_rd_addr;
251 u32 cm_task_ag_ctx_lid_size;
252 u32 cm_task_ag_ctx_rd_addr;
253 u32 cm_task_st_ctx_lid_size;
254 u32 cm_task_st_ctx_rd_addr;
255 };
256
257 /* Block constant definitions */
258 struct block_defs {
259 const char *name;
260 bool has_dbg_bus[MAX_CHIP_IDS];
261 bool associated_to_storm;
262
263 /* Valid only if associated_to_storm is true */
264 u32 storm_id;
265 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
266 u32 dbg_select_addr;
267 u32 dbg_enable_addr;
268 u32 dbg_shift_addr;
269 u32 dbg_force_valid_addr;
270 u32 dbg_force_frame_addr;
271 bool has_reset_bit;
272
273 /* If true, block is taken out of reset before dump */
274 bool unreset;
275 enum dbg_reset_regs reset_reg;
276
277 /* Bit offset in reset register */
278 u8 reset_bit_offset;
279 };
280
281 /* Reset register definitions */
282 struct reset_reg_defs {
283 u32 addr;
284 u32 unreset_val;
285 bool exists[MAX_CHIP_IDS];
286 };
287
288 /* Debug Bus Constraint operation constant definitions */
289 struct dbg_bus_constraint_op_defs {
290 u8 hw_op_val;
291 bool is_cyclic;
292 };
293
294 /* Storm Mode definitions */
295 struct storm_mode_defs {
296 const char *name;
297 bool is_fast_dbg;
298 u8 id_in_hw;
299 };
300
301 struct grc_param_defs {
302 u32 default_val[MAX_CHIP_IDS];
303 u32 min;
304 u32 max;
305 bool is_preset;
306 u32 exclude_all_preset_val;
307 u32 crash_preset_val;
308 };
309
310 /* address is in 128b units. Width is in bits. */
311 struct rss_mem_defs {
312 const char *mem_name;
313 const char *type_name;
314 u32 addr;
315 u32 num_entries[MAX_CHIP_IDS];
316 u32 entry_width[MAX_CHIP_IDS];
317 };
318
319 struct vfc_ram_defs {
320 const char *mem_name;
321 const char *type_name;
322 u32 base_row;
323 u32 num_rows;
324 };
325
326 struct big_ram_defs {
327 const char *instance_name;
328 enum mem_groups mem_group_id;
329 enum mem_groups ram_mem_group_id;
330 enum dbg_grc_params grc_param;
331 u32 addr_reg_addr;
332 u32 data_reg_addr;
333 u32 num_of_blocks[MAX_CHIP_IDS];
334 };
335
336 struct phy_defs {
337 const char *phy_name;
338
339 /* PHY base GRC address */
340 u32 base_addr;
341
342 /* Relative address of indirect TBUS address register (bits 0..7) */
343 u32 tbus_addr_lo_addr;
344
345 /* Relative address of indirect TBUS address register (bits 8..10) */
346 u32 tbus_addr_hi_addr;
347
348 /* Relative address of indirect TBUS data register (bits 0..7) */
349 u32 tbus_data_lo_addr;
350
351 /* Relative address of indirect TBUS data register (bits 8..11) */
352 u32 tbus_data_hi_addr;
353 };
354
355 /******************************** Constants **********************************/
356
357 #define MAX_LCIDS 320
358 #define MAX_LTIDS 320
359
360 #define NUM_IOR_SETS 2
361 #define IORS_PER_SET 176
362 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
363
364 #define BYTES_IN_DWORD sizeof(u32)
365
366 /* Cyclic right */
367 #define SHR(val, val_width, amount) (((val) | ((val) << (val_width))) >> (amount)) & ((1 << (val_width)) - 1)
368
369 /* In the macros below, size and offset are specified in bits */
370 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
371 #define FIELD_BIT_OFFSET(type, field) type##_##field##_##OFFSET
372 #define FIELD_BIT_SIZE(type, field) type##_##field##_##SIZE
373 #define FIELD_DWORD_OFFSET(type, field) (int)(FIELD_BIT_OFFSET(type, field) / 32)
374 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
375 #define FIELD_BIT_MASK(type, field) (((1 << FIELD_BIT_SIZE(type, field)) - 1) << FIELD_DWORD_SHIFT(type, field))
376
377 #define SET_VAR_FIELD(var, type, field, val) var[FIELD_DWORD_OFFSET(type, field)] &= (~FIELD_BIT_MASK(type, field)); var[FIELD_DWORD_OFFSET(type, field)] |= (val) << FIELD_DWORD_SHIFT(type, field)
378
379 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) ecore_wr(dev, ptt, addr, (arr)[i])
380
381 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) for (i = 0; i < (arr_size); i++) (arr)[i] = ecore_rd(dev, ptt, addr)
382
383 #define CHECK_ARR_SIZE(arr, size) OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
384
385 #ifndef DWORDS_TO_BYTES
386 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
387 #endif
388 #ifndef BYTES_TO_DWORDS
389 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
390 #endif
391
392 /* extra lines include a signature line + optional latency events line */
393 #ifndef NUM_DBG_LINES
394 #define NUM_EXTRA_DBG_LINES(block_desc) (1 + (block_desc->has_latency_events ? 1 : 0))
395 #define NUM_DBG_LINES(block_desc) (block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
396 #endif
397
398 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
399 #define RAM_LINES_TO_BYTES(lines) DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
400
401 #define REG_DUMP_LEN_SHIFT 24
402 #define MEM_DUMP_ENTRY_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
403
404 #define IDLE_CHK_RULE_SIZE_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
405
406 #define IDLE_CHK_RESULT_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
407
408 #define IDLE_CHK_RESULT_REG_HDR_DWORDS BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
409
410 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
411
412 /* The sizes and offsets below are specified in bits */
413 #define VFC_CAM_CMD_STRUCT_SIZE 64
414 #define VFC_CAM_CMD_ROW_OFFSET 48
415 #define VFC_CAM_CMD_ROW_SIZE 9
416 #define VFC_CAM_ADDR_STRUCT_SIZE 16
417 #define VFC_CAM_ADDR_OP_OFFSET 0
418 #define VFC_CAM_ADDR_OP_SIZE 4
419 #define VFC_CAM_RESP_STRUCT_SIZE 256
420 #define VFC_RAM_ADDR_STRUCT_SIZE 16
421 #define VFC_RAM_ADDR_OP_OFFSET 0
422 #define VFC_RAM_ADDR_OP_SIZE 2
423 #define VFC_RAM_ADDR_ROW_OFFSET 2
424 #define VFC_RAM_ADDR_ROW_SIZE 10
425 #define VFC_RAM_RESP_STRUCT_SIZE 256
426
427 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
428 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
429 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
430 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
431 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
432 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
433
434 #define NUM_VFC_RAM_TYPES 4
435
436 #define VFC_CAM_NUM_ROWS 512
437
438 #define VFC_OPCODE_CAM_RD 14
439 #define VFC_OPCODE_RAM_RD 0
440
441 #define NUM_RSS_MEM_TYPES 5
442
443 #define NUM_BIG_RAM_TYPES 3
444 #define BIG_RAM_BLOCK_SIZE_BYTES 128
445 #define BIG_RAM_BLOCK_SIZE_DWORDS BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
446
447 #define NUM_PHY_TBUS_ADDRESSES 2048
448 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
449
450 #define SEM_FAST_MODE6_SRC_ENABLE 0x10
451 #define SEM_FAST_MODE6_SRC_DISABLE 0x3f
452
453 #define SEM_SLOW_MODE1_DATA_ENABLE 0x1
454
455 #define VALUES_PER_CYCLE 4
456 #define MAX_CYCLE_VALUES_MASK ((1 << VALUES_PER_CYCLE) - 1)
457
458 #define MAX_DWORDS_PER_CYCLE 8
459
460 #define HW_ID_BITS 3
461
462 #define NUM_CALENDAR_SLOTS 16
463
464 #define MAX_TRIGGER_STATES 3
465 #define TRIGGER_SETS_PER_STATE 2
466 #define MAX_CONSTRAINTS 4
467
468 #define SEM_FILTER_CID_EN_MASK 0x008
469 #define SEM_FILTER_EID_MASK_EN_MASK 0x010
470 #define SEM_FILTER_EID_RANGE_EN_MASK 0x110
471
472 #define CHUNK_SIZE_IN_DWORDS 64
473 #define CHUNK_SIZE_IN_BYTES DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
474
475 #define INT_BUF_NUM_OF_LINES 192
476 #define INT_BUF_LINE_SIZE_IN_DWORDS 16
477 #define INT_BUF_SIZE_IN_DWORDS (INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
478 #define INT_BUF_SIZE_IN_CHUNKS (INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
479
480 #define PCI_BUF_LINE_SIZE_IN_DWORDS 8
481 #define PCI_BUF_LINE_SIZE_IN_BYTES DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
482
483 #define TARGET_EN_MASK_PCI 0x3
484 #define TARGET_EN_MASK_NIG 0x4
485
486 #define PCI_REQ_CREDIT 1
487 #define PCI_PHYS_ADDR_TYPE 0
488
489 #define OPAQUE_FID(pci_func) ((pci_func << 4) | 0xff00)
490
491 #define RESET_REG_UNRESET_OFFSET 4
492
493 #define PCI_PKT_SIZE_IN_CHUNKS 1
494 #define PCI_PKT_SIZE_IN_BYTES (PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
495
496 #define NIG_PKT_SIZE_IN_CHUNKS 4
497
498 #define FLUSH_DELAY_MS 500
499 #define STALL_DELAY_MS 500
500
501 #define SRC_MAC_ADDR_LO16 0x0a0b
502 #define SRC_MAC_ADDR_HI32 0x0c0d0e0f
503 #define ETH_TYPE 0x1000
504
505 #define STATIC_DEBUG_LINE_DWORDS 9
506
507 #define NUM_COMMON_GLOBAL_PARAMS 8
508
509 #define FW_IMG_KUKU 0
510 #define FW_IMG_MAIN 1
511 #define FW_IMG_L2B 2
512
513 #ifndef REG_FIFO_ELEMENT_DWORDS
514 #define REG_FIFO_ELEMENT_DWORDS 2
515 #endif
516 #define REG_FIFO_DEPTH_ELEMENTS 32
517 #define REG_FIFO_DEPTH_DWORDS (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
518
519 #ifndef IGU_FIFO_ELEMENT_DWORDS
520 #define IGU_FIFO_ELEMENT_DWORDS 4
521 #endif
522 #define IGU_FIFO_DEPTH_ELEMENTS 64
523 #define IGU_FIFO_DEPTH_DWORDS (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
524
525 #define SEMI_SYNC_FIFO_POLLING_DELAY_MS 5
526 #define SEMI_SYNC_FIFO_POLLING_COUNT 20
527
528 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
529 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
530 #endif
531 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
532 #define PROTECTION_OVERRIDE_DEPTH_DWORDS (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * PROTECTION_OVERRIDE_ELEMENT_DWORDS)
533
534 #define MCP_SPAD_TRACE_OFFSIZE_ADDR (MCP_REG_SCRATCH + offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
535
536 #define EMPTY_FW_VERSION_STR "???_???_???_???"
537 #define EMPTY_FW_IMAGE_STR "???????????????"
538
539
540 /***************************** Constant Arrays *******************************/
541
542 struct dbg_array {
543 const u32 *ptr;
544 u32 size_in_dwords;
545 };
546
547 /* Debug arrays */
548 #ifdef USE_DBG_BIN_FILE
549 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
550 #else
551 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
552
553 /* BIN_BUF_DBG_MODE_TREE */
554 { (u32*)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
555
556 /* BIN_BUF_DBG_DUMP_REG */
557 { dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
558
559 /* BIN_BUF_DBG_DUMP_MEM */
560 { dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
561
562 /* BIN_BUF_DBG_IDLE_CHK_REGS */
563 { idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
564
565 /* BIN_BUF_DBG_IDLE_CHK_IMMS */
566 { idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
567
568 /* BIN_BUF_DBG_IDLE_CHK_RULES */
569 { idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
570
571 /* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
572 { OSAL_NULL, 0 },
573
574 /* BIN_BUF_DBG_ATTN_BLOCKS */
575 { attn_block, OSAL_ARRAY_SIZE(attn_block) },
576
577 /* BIN_BUF_DBG_ATTN_REGSS */
578 { attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
579
580 /* BIN_BUF_DBG_ATTN_INDEXES */
581 { OSAL_NULL, 0 },
582
583 /* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
584 { OSAL_NULL, 0 },
585
586 /* BIN_BUF_DBG_BUS_BLOCKS */
587 { dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
588
589 /* BIN_BUF_DBG_BUS_LINES */
590 { dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
591
592 /* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
593 { OSAL_NULL, 0 },
594
595 /* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
596 { OSAL_NULL, 0 },
597
598 /* BIN_BUF_DBG_PARSING_STRINGS */
599 { OSAL_NULL, 0 }
600 };
601 #endif
602
603 /* Chip constant definitions array */
604 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
605 { "bb",
606
607 /* ASIC */
608 { { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
609
610 /* EMUL_FULL */
611 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
612
613 /* EMUL_REDUCED */
614 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
615
616 /* FPGA */
617 { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
618
619 { "ah",
620
621 /* ASIC */
622 { { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
623
624 /* EMUL_FULL */
625 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
626
627 /* EMUL_REDUCED */
628 { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
629
630 /* FPGA */
631 { MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } }
632 };
633
634 /* Storm constant definitions array */
635 static struct storm_defs s_storm_defs[] = {
636
637 /* Tstorm */
638 { 'T', BLOCK_TSEM,
639 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
640 TSEM_REG_FAST_MEMORY,
641 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
642 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
643 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
644 TCM_REG_CTX_RBC_ACCS,
645 4, TCM_REG_AGG_CON_CTX,
646 16, TCM_REG_SM_CON_CTX,
647 2, TCM_REG_AGG_TASK_CTX,
648 4, TCM_REG_SM_TASK_CTX },
649
650 /* Mstorm */
651 { 'M', BLOCK_MSEM,
652 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM }, false,
653 MSEM_REG_FAST_MEMORY,
654 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
655 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
656 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
657 MCM_REG_CTX_RBC_ACCS,
658 1, MCM_REG_AGG_CON_CTX,
659 10, MCM_REG_SM_CON_CTX,
660 2, MCM_REG_AGG_TASK_CTX,
661 7, MCM_REG_SM_TASK_CTX },
662
663 /* Ustorm */
664 { 'U', BLOCK_USEM,
665 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
666 USEM_REG_FAST_MEMORY,
667 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
668 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
669 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
670 UCM_REG_CTX_RBC_ACCS,
671 2, UCM_REG_AGG_CON_CTX,
672 13, UCM_REG_SM_CON_CTX,
673 3, UCM_REG_AGG_TASK_CTX,
674 3, UCM_REG_SM_TASK_CTX },
675
676 /* Xstorm */
677 { 'X', BLOCK_XSEM,
678 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
679 XSEM_REG_FAST_MEMORY,
680 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
681 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
682 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
683 XCM_REG_CTX_RBC_ACCS,
684 9, XCM_REG_AGG_CON_CTX,
685 15, XCM_REG_SM_CON_CTX,
686 0, 0,
687 0, 0 },
688
689 /* Ystorm */
690 { 'Y', BLOCK_YSEM,
691 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY }, false,
692 YSEM_REG_FAST_MEMORY,
693 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
694 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
695 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
696 YCM_REG_CTX_RBC_ACCS,
697 2, YCM_REG_AGG_CON_CTX,
698 3, YCM_REG_SM_CON_CTX,
699 2, YCM_REG_AGG_TASK_CTX,
700 12, YCM_REG_SM_TASK_CTX },
701
702 /* Pstorm */
703 { 'P', BLOCK_PSEM,
704 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
705 PSEM_REG_FAST_MEMORY,
706 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
707 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
708 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
709 PCM_REG_CTX_RBC_ACCS,
710 0, 0,
711 10, PCM_REG_SM_CON_CTX,
712 0, 0,
713 0, 0 }
714 };
715
716 /* Block definitions array */
717
718 static struct block_defs block_grc_defs = {
719 "grc", { true, true }, false, 0,
720 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
721 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
722 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
723 GRC_REG_DBG_FORCE_FRAME,
724 true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
725
726 static struct block_defs block_miscs_defs = {
727 "miscs", { false, false }, false, 0,
728 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
729 0, 0, 0, 0, 0,
730 false, false, MAX_DBG_RESET_REGS, 0 };
731
732 static struct block_defs block_misc_defs = {
733 "misc", { false, false }, false, 0,
734 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
735 0, 0, 0, 0, 0,
736 false, false, MAX_DBG_RESET_REGS, 0 };
737
738 static struct block_defs block_dbu_defs = {
739 "dbu", { false, false }, false, 0,
740 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
741 0, 0, 0, 0, 0,
742 false, false, MAX_DBG_RESET_REGS, 0 };
743
744 static struct block_defs block_pglue_b_defs = {
745 "pglue_b", { true, true }, false, 0,
746 { DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
747 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
748 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
749 PGLUE_B_REG_DBG_FORCE_FRAME,
750 true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
751
752 static struct block_defs block_cnig_defs = {
753 "cnig", { false, true }, false, 0,
754 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
755 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
756 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
757 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
758 true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
759
760 static struct block_defs block_cpmu_defs = {
761 "cpmu", { false, false }, false, 0,
762 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
763 0, 0, 0, 0, 0,
764 true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
765
766 static struct block_defs block_ncsi_defs = {
767 "ncsi", { true, true }, false, 0,
768 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
769 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
770 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
771 NCSI_REG_DBG_FORCE_FRAME,
772 true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
773
774 static struct block_defs block_opte_defs = {
775 "opte", { false, false }, false, 0,
776 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
777 0, 0, 0, 0, 0,
778 true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
779
780 static struct block_defs block_bmb_defs = {
781 "bmb", { true, true }, false, 0,
782 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB },
783 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
784 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
785 BMB_REG_DBG_FORCE_FRAME,
786 true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
787
788 static struct block_defs block_pcie_defs = {
789 "pcie", { false, true }, false, 0,
790 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
791 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
792 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
793 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
794 false, false, MAX_DBG_RESET_REGS, 0 };
795
796 static struct block_defs block_mcp_defs = {
797 "mcp", { false, false }, false, 0,
798 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
799 0, 0, 0, 0, 0,
800 false, false, MAX_DBG_RESET_REGS, 0 };
801
802 static struct block_defs block_mcp2_defs = {
803 "mcp2", { true, true }, false, 0,
804 { DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
805 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
806 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
807 MCP2_REG_DBG_FORCE_FRAME,
808 false, false, MAX_DBG_RESET_REGS, 0 };
809
810 static struct block_defs block_pswhst_defs = {
811 "pswhst", { true, true }, false, 0,
812 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
813 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
814 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
815 PSWHST_REG_DBG_FORCE_FRAME,
816 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
817
818 static struct block_defs block_pswhst2_defs = {
819 "pswhst2", { true, true }, false, 0,
820 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
821 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
822 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
823 PSWHST2_REG_DBG_FORCE_FRAME,
824 true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
825
826 static struct block_defs block_pswrd_defs = {
827 "pswrd", { true, true }, false, 0,
828 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
829 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
830 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
831 PSWRD_REG_DBG_FORCE_FRAME,
832 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
833
834 static struct block_defs block_pswrd2_defs = {
835 "pswrd2", { true, true }, false, 0,
836 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
837 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
838 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
839 PSWRD2_REG_DBG_FORCE_FRAME,
840 true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
841
842 static struct block_defs block_pswwr_defs = {
843 "pswwr", { true, true }, false, 0,
844 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
845 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
846 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
847 PSWWR_REG_DBG_FORCE_FRAME,
848 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
849
850 static struct block_defs block_pswwr2_defs = {
851 "pswwr2", { false, false }, false, 0,
852 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
853 0, 0, 0, 0, 0,
854 true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
855
856 static struct block_defs block_pswrq_defs = {
857 "pswrq", { true, true }, false, 0,
858 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
859 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
860 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
861 PSWRQ_REG_DBG_FORCE_FRAME,
862 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
863
864 static struct block_defs block_pswrq2_defs = {
865 "pswrq2", { true, true }, false, 0,
866 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
867 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
868 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
869 PSWRQ2_REG_DBG_FORCE_FRAME,
870 true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
871
872 static struct block_defs block_pglcs_defs = {
873 "pglcs", { false, true }, false, 0,
874 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
875 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
876 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
877 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
878 true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
879
880 static struct block_defs block_ptu_defs ={
881 "ptu", { true, true }, false, 0,
882 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
883 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
884 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
885 PTU_REG_DBG_FORCE_FRAME,
886 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
887
888 static struct block_defs block_dmae_defs = {
889 "dmae", { true, true }, false, 0,
890 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
891 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
892 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
893 DMAE_REG_DBG_FORCE_FRAME,
894 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
895
896 static struct block_defs block_tcm_defs = {
897 "tcm", { true, true }, true, DBG_TSTORM_ID,
898 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
899 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
900 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
901 TCM_REG_DBG_FORCE_FRAME,
902 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
903
904 static struct block_defs block_mcm_defs = {
905 "mcm", { true, true }, true, DBG_MSTORM_ID,
906 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
907 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
908 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
909 MCM_REG_DBG_FORCE_FRAME,
910 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
911
912 static struct block_defs block_ucm_defs = {
913 "ucm", { true, true }, true, DBG_USTORM_ID,
914 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
915 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
916 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
917 UCM_REG_DBG_FORCE_FRAME,
918 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
919
920 static struct block_defs block_xcm_defs = {
921 "xcm", { true, true }, true, DBG_XSTORM_ID,
922 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
923 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
924 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
925 XCM_REG_DBG_FORCE_FRAME,
926 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
927
928 static struct block_defs block_ycm_defs = {
929 "ycm", { true, true }, true, DBG_YSTORM_ID,
930 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
931 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
932 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
933 YCM_REG_DBG_FORCE_FRAME,
934 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
935
936 static struct block_defs block_pcm_defs = {
937 "pcm", { true, true }, true, DBG_PSTORM_ID,
938 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
939 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
940 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
941 PCM_REG_DBG_FORCE_FRAME,
942 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
943
944 static struct block_defs block_qm_defs = {
945 "qm", { true, true }, false, 0,
946 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ },
947 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
948 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
949 QM_REG_DBG_FORCE_FRAME,
950 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
951
952 static struct block_defs block_tm_defs = {
953 "tm", { true, true }, false, 0,
954 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
955 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
956 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
957 TM_REG_DBG_FORCE_FRAME,
958 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
959
960 static struct block_defs block_dorq_defs = {
961 "dorq", { true, true }, false, 0,
962 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
963 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
964 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
965 DORQ_REG_DBG_FORCE_FRAME,
966 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
967
968 static struct block_defs block_brb_defs = {
969 "brb", { true, true }, false, 0,
970 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
971 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
972 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
973 BRB_REG_DBG_FORCE_FRAME,
974 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
975
976 static struct block_defs block_src_defs = {
977 "src", { true, true }, false, 0,
978 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
979 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
980 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
981 SRC_REG_DBG_FORCE_FRAME,
982 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
983
984 static struct block_defs block_prs_defs = {
985 "prs", { true, true }, false, 0,
986 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
987 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
988 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
989 PRS_REG_DBG_FORCE_FRAME,
990 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
991
992 static struct block_defs block_tsdm_defs = {
993 "tsdm", { true, true }, true, DBG_TSTORM_ID,
994 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
995 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
996 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
997 TSDM_REG_DBG_FORCE_FRAME,
998 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
999
1000 static struct block_defs block_msdm_defs = {
1001 "msdm", { true, true }, true, DBG_MSTORM_ID,
1002 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1003 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1004 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1005 MSDM_REG_DBG_FORCE_FRAME,
1006 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1007
1008 static struct block_defs block_usdm_defs = {
1009 "usdm", { true, true }, true, DBG_USTORM_ID,
1010 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1011 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1012 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1013 USDM_REG_DBG_FORCE_FRAME,
1014 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1015 };
1016 static struct block_defs block_xsdm_defs = {
1017 "xsdm", { true, true }, true, DBG_XSTORM_ID,
1018 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1019 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1020 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1021 XSDM_REG_DBG_FORCE_FRAME,
1022 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1023
1024 static struct block_defs block_ysdm_defs = {
1025 "ysdm", { true, true }, true, DBG_YSTORM_ID,
1026 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
1027 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1028 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1029 YSDM_REG_DBG_FORCE_FRAME,
1030 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1031
1032 static struct block_defs block_psdm_defs = {
1033 "psdm", { true, true }, true, DBG_PSTORM_ID,
1034 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1035 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1036 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1037 PSDM_REG_DBG_FORCE_FRAME,
1038 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1039
1040 static struct block_defs block_tsem_defs = {
1041 "tsem", { true, true }, true, DBG_TSTORM_ID,
1042 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1043 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1044 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1045 TSEM_REG_DBG_FORCE_FRAME,
1046 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1047
1048 static struct block_defs block_msem_defs = {
1049 "msem", { true, true }, true, DBG_MSTORM_ID,
1050 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1051 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1052 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1053 MSEM_REG_DBG_FORCE_FRAME,
1054 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1055
1056 static struct block_defs block_usem_defs = {
1057 "usem", { true, true }, true, DBG_USTORM_ID,
1058 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1059 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1060 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1061 USEM_REG_DBG_FORCE_FRAME,
1062 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1063
1064 static struct block_defs block_xsem_defs = {
1065 "xsem", { true, true }, true, DBG_XSTORM_ID,
1066 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1067 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1068 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1069 XSEM_REG_DBG_FORCE_FRAME,
1070 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1071
1072 static struct block_defs block_ysem_defs = {
1073 "ysem", { true, true }, true, DBG_YSTORM_ID,
1074 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY },
1075 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1076 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1077 YSEM_REG_DBG_FORCE_FRAME,
1078 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1079
1080 static struct block_defs block_psem_defs = {
1081 "psem", { true, true }, true, DBG_PSTORM_ID,
1082 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1083 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1084 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1085 PSEM_REG_DBG_FORCE_FRAME,
1086 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1087
1088 static struct block_defs block_rss_defs = {
1089 "rss", { true, true }, false, 0,
1090 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1091 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1092 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1093 RSS_REG_DBG_FORCE_FRAME,
1094 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1095
1096 static struct block_defs block_tmld_defs = {
1097 "tmld", { true, true }, false, 0,
1098 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1099 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1100 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1101 TMLD_REG_DBG_FORCE_FRAME,
1102 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1103
1104 static struct block_defs block_muld_defs = {
1105 "muld", { true, true }, false, 0,
1106 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1107 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1108 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1109 MULD_REG_DBG_FORCE_FRAME,
1110 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1111
1112 static struct block_defs block_yuld_defs = {
1113 "yuld", { true, true }, false, 0,
1114 { DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1115 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1116 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1117 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1118 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1119
1120 static struct block_defs block_xyld_defs = {
1121 "xyld", { true, true }, false, 0,
1122 { DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1123 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1124 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1125 XYLD_REG_DBG_FORCE_FRAME,
1126 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1127
1128 static struct block_defs block_prm_defs = {
1129 "prm", { true, true }, false, 0,
1130 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1131 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1132 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1133 PRM_REG_DBG_FORCE_FRAME,
1134 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1135
1136 static struct block_defs block_pbf_pb1_defs = {
1137 "pbf_pb1", { true, true }, false, 0,
1138 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1139 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1140 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1141 PBF_PB1_REG_DBG_FORCE_FRAME,
1142 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1143
1144 static struct block_defs block_pbf_pb2_defs = {
1145 "pbf_pb2", { true, true }, false, 0,
1146 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1147 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1148 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1149 PBF_PB2_REG_DBG_FORCE_FRAME,
1150 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1151
1152 static struct block_defs block_rpb_defs = {
1153 "rpb", { true, true }, false, 0,
1154 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1155 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1156 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1157 RPB_REG_DBG_FORCE_FRAME,
1158 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1159
1160 static struct block_defs block_btb_defs = {
1161 "btb", { true, true }, false, 0,
1162 { DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV },
1163 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1164 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1165 BTB_REG_DBG_FORCE_FRAME,
1166 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1167
1168 static struct block_defs block_pbf_defs = {
1169 "pbf", { true, true }, false, 0,
1170 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV },
1171 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1172 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1173 PBF_REG_DBG_FORCE_FRAME,
1174 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1175
1176 static struct block_defs block_rdif_defs = {
1177 "rdif", { true, true }, false, 0,
1178 { DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM },
1179 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1180 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1181 RDIF_REG_DBG_FORCE_FRAME,
1182 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1183
1184 static struct block_defs block_tdif_defs = {
1185 "tdif", { true, true }, false, 0,
1186 { DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1187 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1188 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1189 TDIF_REG_DBG_FORCE_FRAME,
1190 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1191
1192 static struct block_defs block_cdu_defs = {
1193 "cdu", { true, true }, false, 0,
1194 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1195 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1196 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1197 CDU_REG_DBG_FORCE_FRAME,
1198 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1199
1200 static struct block_defs block_ccfc_defs = {
1201 "ccfc", { true, true }, false, 0,
1202 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1203 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1204 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1205 CCFC_REG_DBG_FORCE_FRAME,
1206 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1207
1208 static struct block_defs block_tcfc_defs = {
1209 "tcfc", { true, true }, false, 0,
1210 { DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1211 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1212 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1213 TCFC_REG_DBG_FORCE_FRAME,
1214 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1215
1216 static struct block_defs block_igu_defs = {
1217 "igu", { true, true }, false, 0,
1218 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1219 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1220 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1221 IGU_REG_DBG_FORCE_FRAME,
1222 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1223
1224 static struct block_defs block_cau_defs = {
1225 "cau", { true, true }, false, 0,
1226 { DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1227 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1228 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1229 CAU_REG_DBG_FORCE_FRAME,
1230 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1231
1232 static struct block_defs block_umac_defs = {
1233 "umac", { false, true }, false, 0,
1234 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1235 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1236 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1237 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1238 true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1239
1240 static struct block_defs block_xmac_defs = {
1241 "xmac", { false, false }, false, 0,
1242 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1243 0, 0, 0, 0, 0,
1244 false, false, MAX_DBG_RESET_REGS, 0 };
1245
1246 static struct block_defs block_dbg_defs = {
1247 "dbg", { false, false }, false, 0,
1248 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1249 0, 0, 0, 0, 0,
1250 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1251
1252 static struct block_defs block_nig_defs = {
1253 "nig", { true, true }, false, 0,
1254 { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1255 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1256 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1257 NIG_REG_DBG_FORCE_FRAME,
1258 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1259
1260 static struct block_defs block_wol_defs = {
1261 "wol", { false, true }, false, 0,
1262 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1263 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1264 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1265 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1266 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1267
1268 static struct block_defs block_bmbn_defs = {
1269 "bmbn", { false, true }, false, 0,
1270 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB },
1271 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1272 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1273 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1274 false, false, MAX_DBG_RESET_REGS, 0 };
1275
1276 static struct block_defs block_ipc_defs = {
1277 "ipc", { false, false }, false, 0,
1278 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1279 0, 0, 0, 0, 0,
1280 true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1281
1282 static struct block_defs block_nwm_defs = {
1283 "nwm", { false, true }, false, 0,
1284 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
1285 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1286 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1287 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1288 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1289
1290 static struct block_defs block_nws_defs = {
1291 "nws", { false, true }, false, 0,
1292 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW },
1293 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1294 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1295 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1296 true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1297
1298 static struct block_defs block_ms_defs = {
1299 "ms", { false, true }, false, 0,
1300 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ },
1301 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1302 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1303 MS_REG_DBG_FORCE_FRAME_K2_E5,
1304 true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1305
1306 static struct block_defs block_phy_pcie_defs = {
1307 "phy_pcie", { false, true }, false, 0,
1308 { MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1309 PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1310 PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1311 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1312 false, false, MAX_DBG_RESET_REGS, 0 };
1313
1314 static struct block_defs block_led_defs = {
1315 "led", { false, false }, false, 0,
1316 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1317 0, 0, 0, 0, 0,
1318 true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1319
1320 static struct block_defs block_avs_wrap_defs = {
1321 "avs_wrap", { false, false }, false, 0,
1322 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1323 0, 0, 0, 0, 0,
1324 true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1325
1326 static struct block_defs block_rgfs_defs = {
1327 "rgfs", { false, false }, false, 0,
1328 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1329 0, 0, 0, 0, 0,
1330 false, false, MAX_DBG_RESET_REGS, 0 };
1331
1332 static struct block_defs block_rgsrc_defs = {
1333 "rgsrc", { false, false }, false, 0,
1334 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1335 0, 0, 0, 0, 0,
1336 false, false, MAX_DBG_RESET_REGS, 0 };
1337
1338 static struct block_defs block_tgfs_defs = {
1339 "tgfs", { false, false }, false, 0,
1340 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1341 0, 0, 0, 0, 0,
1342 false, false, MAX_DBG_RESET_REGS, 0 };
1343
1344 static struct block_defs block_tgsrc_defs = {
1345 "tgsrc", { false, false }, false, 0,
1346 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1347 0, 0, 0, 0, 0,
1348 false, false, MAX_DBG_RESET_REGS, 0 };
1349
1350 static struct block_defs block_ptld_defs = {
1351 "ptld", { false, false }, false, 0,
1352 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1353 0, 0, 0, 0, 0,
1354 false, false, MAX_DBG_RESET_REGS, 0 };
1355
1356 static struct block_defs block_ypld_defs = {
1357 "ypld", { false, false }, false, 0,
1358 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1359 0, 0, 0, 0, 0,
1360 false, false, MAX_DBG_RESET_REGS, 0 };
1361
1362 static struct block_defs block_misc_aeu_defs = {
1363 "misc_aeu", { false, false }, false, 0,
1364 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1365 0, 0, 0, 0, 0,
1366 false, false, MAX_DBG_RESET_REGS, 0 };
1367
1368 static struct block_defs block_bar0_map_defs = {
1369 "bar0_map", { false, false }, false, 0,
1370 { MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1371 0, 0, 0, 0, 0,
1372 false, false, MAX_DBG_RESET_REGS, 0 };
1373
1374
1375 static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1376 &block_grc_defs,
1377 &block_miscs_defs,
1378 &block_misc_defs,
1379 &block_dbu_defs,
1380 &block_pglue_b_defs,
1381 &block_cnig_defs,
1382 &block_cpmu_defs,
1383 &block_ncsi_defs,
1384 &block_opte_defs,
1385 &block_bmb_defs,
1386 &block_pcie_defs,
1387 &block_mcp_defs,
1388 &block_mcp2_defs,
1389 &block_pswhst_defs,
1390 &block_pswhst2_defs,
1391 &block_pswrd_defs,
1392 &block_pswrd2_defs,
1393 &block_pswwr_defs,
1394 &block_pswwr2_defs,
1395 &block_pswrq_defs,
1396 &block_pswrq2_defs,
1397 &block_pglcs_defs,
1398 &block_dmae_defs,
1399 &block_ptu_defs,
1400 &block_tcm_defs,
1401 &block_mcm_defs,
1402 &block_ucm_defs,
1403 &block_xcm_defs,
1404 &block_ycm_defs,
1405 &block_pcm_defs,
1406 &block_qm_defs,
1407 &block_tm_defs,
1408 &block_dorq_defs,
1409 &block_brb_defs,
1410 &block_src_defs,
1411 &block_prs_defs,
1412 &block_tsdm_defs,
1413 &block_msdm_defs,
1414 &block_usdm_defs,
1415 &block_xsdm_defs,
1416 &block_ysdm_defs,
1417 &block_psdm_defs,
1418 &block_tsem_defs,
1419 &block_msem_defs,
1420 &block_usem_defs,
1421 &block_xsem_defs,
1422 &block_ysem_defs,
1423 &block_psem_defs,
1424 &block_rss_defs,
1425 &block_tmld_defs,
1426 &block_muld_defs,
1427 &block_yuld_defs,
1428 &block_xyld_defs,
1429 &block_ptld_defs,
1430 &block_ypld_defs,
1431 &block_prm_defs,
1432 &block_pbf_pb1_defs,
1433 &block_pbf_pb2_defs,
1434 &block_rpb_defs,
1435 &block_btb_defs,
1436 &block_pbf_defs,
1437 &block_rdif_defs,
1438 &block_tdif_defs,
1439 &block_cdu_defs,
1440 &block_ccfc_defs,
1441 &block_tcfc_defs,
1442 &block_igu_defs,
1443 &block_cau_defs,
1444 &block_rgfs_defs,
1445 &block_rgsrc_defs,
1446 &block_tgfs_defs,
1447 &block_tgsrc_defs,
1448 &block_umac_defs,
1449 &block_xmac_defs,
1450 &block_dbg_defs,
1451 &block_nig_defs,
1452 &block_wol_defs,
1453 &block_bmbn_defs,
1454 &block_ipc_defs,
1455 &block_nwm_defs,
1456 &block_nws_defs,
1457 &block_ms_defs,
1458 &block_phy_pcie_defs,
1459 &block_led_defs,
1460 &block_avs_wrap_defs,
1461 &block_misc_aeu_defs,
1462 &block_bar0_map_defs,
1463
1464 };
1465
1466
1467 /* Constraint operation types */
1468 static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1469
1470 /* DBG_BUS_CONSTRAINT_OP_EQ */
1471 { 0, false },
1472
1473 /* DBG_BUS_CONSTRAINT_OP_NE */
1474 { 5, false },
1475
1476 /* DBG_BUS_CONSTRAINT_OP_LT */
1477 { 1, false },
1478
1479 /* DBG_BUS_CONSTRAINT_OP_LTC */
1480 { 1, true },
1481
1482 /* DBG_BUS_CONSTRAINT_OP_LE */
1483 { 2, false },
1484
1485 /* DBG_BUS_CONSTRAINT_OP_LEC */
1486 { 2, true },
1487
1488 /* DBG_BUS_CONSTRAINT_OP_GT */
1489 { 4, false },
1490
1491 /* DBG_BUS_CONSTRAINT_OP_GTC */
1492 { 4, true },
1493
1494 /* DBG_BUS_CONSTRAINT_OP_GE */
1495 { 3, false },
1496
1497 /* DBG_BUS_CONSTRAINT_OP_GEC */
1498 { 3, true }
1499 };
1500
1501 static const char* s_dbg_target_names[] = {
1502
1503 /* DBG_BUS_TARGET_ID_INT_BUF */
1504 "int-buf",
1505
1506 /* DBG_BUS_TARGET_ID_NIG */
1507 "nw",
1508
1509 /* DBG_BUS_TARGET_ID_PCI */
1510 "pci-buf"
1511 };
1512
1513 static struct storm_mode_defs s_storm_mode_defs[] = {
1514
1515 /* DBG_BUS_STORM_MODE_PRINTF */
1516 { "printf", true, 0 },
1517
1518 /* DBG_BUS_STORM_MODE_PRAM_ADDR */
1519 { "pram_addr", true, 1 },
1520
1521 /* DBG_BUS_STORM_MODE_DRA_RW */
1522 { "dra_rw", true, 2 },
1523
1524 /* DBG_BUS_STORM_MODE_DRA_W */
1525 { "dra_w", true, 3 },
1526
1527 /* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1528 { "ld_st_addr", true, 4 },
1529
1530 /* DBG_BUS_STORM_MODE_DRA_FSM */
1531 { "dra_fsm", true, 5 },
1532
1533 /* DBG_BUS_STORM_MODE_RH */
1534 { "rh", true, 6 },
1535
1536 /* DBG_BUS_STORM_MODE_FOC */
1537 { "foc", false, 1 },
1538
1539 /* DBG_BUS_STORM_MODE_EXT_STORE */
1540 { "ext_store", false, 3 }
1541 };
1542
1543 static struct platform_defs s_platform_defs[] = {
1544
1545 /* PLATFORM_ASIC */
1546 { "asic", 1 },
1547
1548 /* PLATFORM_EMUL_FULL */
1549 { "emul_full", 2000 },
1550
1551 /* PLATFORM_EMUL_REDUCED */
1552 { "emul_reduced", 2000 },
1553
1554 /* PLATFORM_FPGA */
1555 { "fpga", 200 }
1556 };
1557
1558 static struct grc_param_defs s_grc_param_defs[] = {
1559
1560 /* DBG_GRC_PARAM_DUMP_TSTORM */
1561 { { 1, 1 }, 0, 1, false, 1, 1 },
1562
1563 /* DBG_GRC_PARAM_DUMP_MSTORM */
1564 { { 1, 1 }, 0, 1, false, 1, 1 },
1565
1566 /* DBG_GRC_PARAM_DUMP_USTORM */
1567 { { 1, 1 }, 0, 1, false, 1, 1 },
1568
1569 /* DBG_GRC_PARAM_DUMP_XSTORM */
1570 { { 1, 1 }, 0, 1, false, 1, 1 },
1571
1572 /* DBG_GRC_PARAM_DUMP_YSTORM */
1573 { { 1, 1 }, 0, 1, false, 1, 1 },
1574
1575 /* DBG_GRC_PARAM_DUMP_PSTORM */
1576 { { 1, 1 }, 0, 1, false, 1, 1 },
1577
1578 /* DBG_GRC_PARAM_DUMP_REGS */
1579 { { 1, 1 }, 0, 1, false, 0, 1 },
1580
1581 /* DBG_GRC_PARAM_DUMP_RAM */
1582 { { 1, 1 }, 0, 1, false, 0, 1 },
1583
1584 /* DBG_GRC_PARAM_DUMP_PBUF */
1585 { { 1, 1 }, 0, 1, false, 0, 1 },
1586
1587 /* DBG_GRC_PARAM_DUMP_IOR */
1588 { { 0, 0 }, 0, 1, false, 0, 1 },
1589
1590 /* DBG_GRC_PARAM_DUMP_VFC */
1591 { { 0, 0 }, 0, 1, false, 0, 1 },
1592
1593 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1594 { { 1, 1 }, 0, 1, false, 0, 1 },
1595
1596 /* DBG_GRC_PARAM_DUMP_ILT */
1597 { { 1, 1 }, 0, 1, false, 0, 1 },
1598
1599 /* DBG_GRC_PARAM_DUMP_RSS */
1600 { { 1, 1 }, 0, 1, false, 0, 1 },
1601
1602 /* DBG_GRC_PARAM_DUMP_CAU */
1603 { { 1, 1 }, 0, 1, false, 0, 1 },
1604
1605 /* DBG_GRC_PARAM_DUMP_QM */
1606 { { 1, 1 }, 0, 1, false, 0, 1 },
1607
1608 /* DBG_GRC_PARAM_DUMP_MCP */
1609 { { 1, 1 }, 0, 1, false, 0, 1 },
1610
1611 /* DBG_GRC_PARAM_RESERVED */
1612 { { 1, 1 }, 0, 1, false, 0, 1 },
1613
1614 /* DBG_GRC_PARAM_DUMP_CFC */
1615 { { 1, 1 }, 0, 1, false, 0, 1 },
1616
1617 /* DBG_GRC_PARAM_DUMP_IGU */
1618 { { 1, 1 }, 0, 1, false, 0, 1 },
1619
1620 /* DBG_GRC_PARAM_DUMP_BRB */
1621 { { 0, 0 }, 0, 1, false, 0, 1 },
1622
1623 /* DBG_GRC_PARAM_DUMP_BTB */
1624 { { 0, 0 }, 0, 1, false, 0, 1 },
1625
1626 /* DBG_GRC_PARAM_DUMP_BMB */
1627 { { 0, 0 }, 0, 1, false, 0, 1 },
1628
1629 /* DBG_GRC_PARAM_DUMP_NIG */
1630 { { 1, 1 }, 0, 1, false, 0, 1 },
1631
1632 /* DBG_GRC_PARAM_DUMP_MULD */
1633 { { 1, 1 }, 0, 1, false, 0, 1 },
1634
1635 /* DBG_GRC_PARAM_DUMP_PRS */
1636 { { 1, 1 }, 0, 1, false, 0, 1 },
1637
1638 /* DBG_GRC_PARAM_DUMP_DMAE */
1639 { { 1, 1 }, 0, 1, false, 0, 1 },
1640
1641 /* DBG_GRC_PARAM_DUMP_TM */
1642 { { 1, 1 }, 0, 1, false, 0, 1 },
1643
1644 /* DBG_GRC_PARAM_DUMP_SDM */
1645 { { 1, 1 }, 0, 1, false, 0, 1 },
1646
1647 /* DBG_GRC_PARAM_DUMP_DIF */
1648 { { 1, 1 }, 0, 1, false, 0, 1 },
1649
1650 /* DBG_GRC_PARAM_DUMP_STATIC */
1651 { { 1, 1 }, 0, 1, false, 0, 1 },
1652
1653 /* DBG_GRC_PARAM_UNSTALL */
1654 { { 0, 0 }, 0, 1, false, 0, 0 },
1655
1656 /* DBG_GRC_PARAM_NUM_LCIDS */
1657 { { MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1658
1659 /* DBG_GRC_PARAM_NUM_LTIDS */
1660 { { MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1661
1662 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1663 { { 0, 0 }, 0, 1, true, 0, 0 },
1664
1665 /* DBG_GRC_PARAM_CRASH */
1666 { { 0, 0 }, 0, 1, true, 0, 0 },
1667
1668 /* DBG_GRC_PARAM_PARITY_SAFE */
1669 { { 0, 0 }, 0, 1, false, 1, 0 },
1670
1671 /* DBG_GRC_PARAM_DUMP_CM */
1672 { { 1, 1 }, 0, 1, false, 0, 1 },
1673
1674 /* DBG_GRC_PARAM_DUMP_PHY */
1675 { { 1, 1 }, 0, 1, false, 0, 1 },
1676
1677 /* DBG_GRC_PARAM_NO_MCP */
1678 { { 0, 0 }, 0, 1, false, 0, 0 },
1679
1680 /* DBG_GRC_PARAM_NO_FW_VER */
1681 { { 0, 0 }, 0, 1, false, 0, 0 }
1682 };
1683
1684 static struct rss_mem_defs s_rss_mem_defs[] = {
1685 { "rss_mem_cid", "rss_cid", 0,
1686 { 256, 320 },
1687 { 32, 32 } },
1688
1689 { "rss_mem_key_msb", "rss_key", 1024,
1690 { 128, 208 },
1691 { 256, 256 } },
1692
1693 { "rss_mem_key_lsb", "rss_key", 2048,
1694 { 128, 208 },
1695 { 64, 64 } },
1696
1697 { "rss_mem_info", "rss_info", 3072,
1698 { 128, 208 },
1699 { 16, 16 } },
1700
1701 { "rss_mem_ind", "rss_ind", 4096,
1702 { 16384, 26624 },
1703 { 16, 16 } }
1704 };
1705
1706 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1707 { "vfc_ram_tt1", "vfc_ram", 0, 512 },
1708 { "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1709 { "vfc_ram_stt2", "vfc_ram", 640, 32 },
1710 { "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1711 };
1712
1713 static struct big_ram_defs s_big_ram_defs[] = {
1714 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1715 { 4800, 5632 } },
1716
1717 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1718 { 2880, 3680 } },
1719
1720 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1721 { 1152, 1152 } }
1722 };
1723
1724 static struct reset_reg_defs s_reset_regs_defs[] = {
1725
1726 /* DBG_RESET_REG_MISCS_PL_UA */
1727 { MISCS_REG_RESET_PL_UA, 0x0, { true, true } },
1728
1729 /* DBG_RESET_REG_MISCS_PL_HV */
1730 { MISCS_REG_RESET_PL_HV, 0x0, { true, true } },
1731
1732 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1733 { MISCS_REG_RESET_PL_HV_2_K2_E5, 0x0, { false, true } },
1734
1735 /* DBG_RESET_REG_MISC_PL_UA */
1736 { MISC_REG_RESET_PL_UA, 0x0, { true, true } },
1737
1738 /* DBG_RESET_REG_MISC_PL_HV */
1739 { MISC_REG_RESET_PL_HV, 0x0, { true, true } },
1740
1741 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1742 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, { true, true } },
1743
1744 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1745 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, { true, true } },
1746
1747 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1748 { MISC_REG_RESET_PL_PDA_VAUX, 0x2, { true, true } },
1749 };
1750
1751 static struct phy_defs s_phy_defs[] = {
1752 { "nw_phy", NWS_REG_NWS_CMU_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1753 { "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1754 { "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1755 { "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1756 };
1757
1758 /* The order of indexes that should be applied to a PCI buffer line */
1759 static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1760
1761 /******************************** Variables **********************************/
1762
1763 /* The version of the calling app */
1764 static u32 s_app_ver;
1765
1766 /**************************** Private Functions ******************************/
1767
ecore_static_asserts(void)1768 static void ecore_static_asserts(void)
1769 {
1770 CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1771 CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1772 CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1773 CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1774 CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1775 CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1776 CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1777 CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1778 CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1779 CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1780 CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1781 CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1782 }
1783
1784 /* Reads and returns a single dword from the specified unaligned buffer. */
ecore_read_unaligned_dword(u8 * buf)1785 static u32 ecore_read_unaligned_dword(u8 *buf)
1786 {
1787 u32 dword;
1788
1789 OSAL_MEMCPY((u8*)&dword, buf, sizeof(dword));
1790 return dword;
1791 }
1792
1793 /* Returns the difference in bytes between the specified physical addresses.
1794 * Assumes that the first address is bigger then the second, and that the
1795 * difference is a 32-bit value.
1796 */
ecore_phys_addr_diff(struct dbg_bus_mem_addr * a,struct dbg_bus_mem_addr * b)1797 static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1798 struct dbg_bus_mem_addr *b)
1799 {
1800 return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1801 }
1802
1803 /* Sets the value of the specified GRC param */
ecore_grc_set_param(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)1804 static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1805 enum dbg_grc_params grc_param,
1806 u32 val)
1807 {
1808 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1809
1810 dev_data->grc.param_val[grc_param] = val;
1811 }
1812
1813 /* Returns the value of the specified GRC param */
ecore_grc_get_param(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param)1814 static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1815 enum dbg_grc_params grc_param)
1816 {
1817 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1818
1819 return dev_data->grc.param_val[grc_param];
1820 }
1821
1822 /* Initializes the GRC parameters */
ecore_dbg_grc_init_params(struct ecore_hwfn * p_hwfn)1823 static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1824 {
1825 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1826
1827 if (!dev_data->grc.params_initialized) {
1828 ecore_dbg_grc_set_params_default(p_hwfn);
1829 dev_data->grc.params_initialized = 1;
1830 }
1831 }
1832
1833 /* Initializes debug data for the specified device */
ecore_dbg_dev_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1834 static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1835 struct ecore_ptt *p_ptt)
1836 {
1837 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1838
1839 if (dev_data->initialized)
1840 return DBG_STATUS_OK;
1841
1842 if (!s_app_ver)
1843 return DBG_STATUS_APP_VERSION_NOT_SET;
1844
1845 if (ECORE_IS_K2(p_hwfn->p_dev)) {
1846 dev_data->chip_id = CHIP_K2;
1847 dev_data->mode_enable[MODE_K2] = 1;
1848 }
1849 else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1850 dev_data->chip_id = CHIP_BB;
1851 dev_data->mode_enable[MODE_BB] = 1;
1852 }
1853 else {
1854 return DBG_STATUS_UNKNOWN_CHIP;
1855 }
1856
1857 #ifdef ASIC_ONLY
1858 dev_data->platform_id = PLATFORM_ASIC;
1859 dev_data->mode_enable[MODE_ASIC] = 1;
1860 #else
1861 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1862 dev_data->platform_id = PLATFORM_ASIC;
1863 dev_data->mode_enable[MODE_ASIC] = 1;
1864 }
1865 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1866 if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1867 dev_data->platform_id = PLATFORM_EMUL_FULL;
1868 dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1869 }
1870 else {
1871 dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1872 dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1873 }
1874 }
1875 else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1876 dev_data->platform_id = PLATFORM_FPGA;
1877 dev_data->mode_enable[MODE_FPGA] = 1;
1878 }
1879 else {
1880 return DBG_STATUS_UNKNOWN_CHIP;
1881 }
1882 #endif
1883
1884 /* Initializes the GRC parameters */
1885 ecore_dbg_grc_init_params(p_hwfn);
1886
1887 dev_data->initialized = true;
1888
1889 return DBG_STATUS_OK;
1890 }
1891
get_dbg_bus_block_desc(struct ecore_hwfn * p_hwfn,enum block_id block_id)1892 static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1893 enum block_id block_id)
1894 {
1895 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1896
1897 return (struct dbg_bus_block*)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1898 }
1899
1900 /* Returns OSAL_NULL for signature line, latency line and non-existing lines */
get_dbg_bus_line_desc(struct ecore_hwfn * p_hwfn,enum block_id block_id)1901 static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1902 enum block_id block_id)
1903 {
1904 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1905 struct dbg_bus_block_data *block_bus;
1906 struct dbg_bus_block *block_desc;
1907
1908 block_bus = &dev_data->bus.blocks[block_id];
1909 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1910
1911 if (!block_bus->line_num ||
1912 (block_bus->line_num == 1 && block_desc->has_latency_events) ||
1913 block_bus->line_num >= NUM_DBG_LINES(block_desc))
1914 return OSAL_NULL;
1915
1916 return (struct dbg_bus_line*)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1917 }
1918
1919 /* Reads the FW info structure for the specified Storm from the chip,
1920 * and writes it to the specified fw_info pointer.
1921 */
ecore_read_fw_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)1922 static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1923 struct ecore_ptt *p_ptt,
1924 u8 storm_id,
1925 struct fw_info *fw_info)
1926 {
1927 struct storm_defs *storm = &s_storm_defs[storm_id];
1928 struct fw_info_location fw_info_location;
1929 u32 addr, i, *dest;
1930
1931 OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1932 OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1933
1934 /* Read first the address that points to fw_info location.
1935 * The address is located in the last line of the Storm RAM.
1936 */
1937 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - sizeof(fw_info_location);
1938 dest = (u32*)&fw_info_location;
1939
1940 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1941 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1942
1943 /* Read FW version info from Storm RAM */
1944 if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1945 addr = fw_info_location.grc_addr;
1946 dest = (u32*)fw_info;
1947 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1948 dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1949 }
1950 }
1951
1952 /* Dumps the specified string to the specified buffer.
1953 * Returns the dumped size in bytes.
1954 */
ecore_dump_str(char * dump_buf,bool dump,const char * str)1955 static u32 ecore_dump_str(char *dump_buf,
1956 bool dump,
1957 const char *str)
1958 {
1959 if (dump)
1960 OSAL_STRCPY(dump_buf, str);
1961
1962 return (u32)OSAL_STRLEN(str) + 1;
1963 }
1964
1965 /* Dumps zeros to align the specified buffer to dwords.
1966 * Returns the dumped size in bytes.
1967 */
ecore_dump_align(char * dump_buf,bool dump,u32 byte_offset)1968 static u32 ecore_dump_align(char *dump_buf,
1969 bool dump,
1970 u32 byte_offset)
1971 {
1972 u8 offset_in_dword, align_size;
1973
1974 offset_in_dword = (u8)(byte_offset & 0x3);
1975 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1976
1977 if (dump && align_size)
1978 OSAL_MEMSET(dump_buf, 0, align_size);
1979
1980 return align_size;
1981 }
1982
1983 /* Writes the specified string param to the specified buffer.
1984 * Returns the dumped size in dwords.
1985 */
ecore_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)1986 static u32 ecore_dump_str_param(u32 *dump_buf,
1987 bool dump,
1988 const char *param_name,
1989 const char *param_val)
1990 {
1991 char *char_buf = (char*)dump_buf;
1992 u32 offset = 0;
1993
1994 /* Dump param name */
1995 offset += ecore_dump_str(char_buf + offset, dump, param_name);
1996
1997 /* Indicate a string param value */
1998 if (dump)
1999 *(char_buf + offset) = 1;
2000 offset++;
2001
2002 /* Dump param value */
2003 offset += ecore_dump_str(char_buf + offset, dump, param_val);
2004
2005 /* Align buffer to next dword */
2006 offset += ecore_dump_align(char_buf + offset, dump, offset);
2007
2008 return BYTES_TO_DWORDS(offset);
2009 }
2010
2011 /* Writes the specified numeric param to the specified buffer.
2012 * Returns the dumped size in dwords.
2013 */
ecore_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)2014 static u32 ecore_dump_num_param(u32 *dump_buf,
2015 bool dump,
2016 const char *param_name,
2017 u32 param_val)
2018 {
2019 char *char_buf = (char*)dump_buf;
2020 u32 offset = 0;
2021
2022 /* Dump param name */
2023 offset += ecore_dump_str(char_buf + offset, dump, param_name);
2024
2025 /* Indicate a numeric param value */
2026 if (dump)
2027 *(char_buf + offset) = 0;
2028 offset++;
2029
2030 /* Align buffer to next dword */
2031 offset += ecore_dump_align(char_buf + offset, dump, offset);
2032
2033 /* Dump param value (and change offset from bytes to dwords) */
2034 offset = BYTES_TO_DWORDS(offset);
2035 if (dump)
2036 *(dump_buf + offset) = param_val;
2037 offset++;
2038
2039 return offset;
2040 }
2041
2042 /* Reads the FW version and writes it as a param to the specified buffer.
2043 * Returns the dumped size in dwords.
2044 */
ecore_dump_fw_ver_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2045 static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2046 struct ecore_ptt *p_ptt,
2047 u32 *dump_buf,
2048 bool dump)
2049 {
2050 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2051 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2052 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2053 struct fw_info fw_info = { { 0 }, { 0 } };
2054 u32 offset = 0;
2055
2056 if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2057 /* Read FW image/version from PRAM in a non-reset SEMI */
2058 bool found = false;
2059 u8 storm_id;
2060
2061 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2062 struct storm_defs *storm = &s_storm_defs[storm_id];
2063
2064 /* Read FW version/image */
2065 if (dev_data->block_in_reset[storm->block_id])
2066 continue;
2067
2068 /* Read FW info for the current Storm */
2069 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2070
2071 /* Create FW version/image strings */
2072 if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2073 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2074 switch (fw_info.ver.image_id) {
2075 case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2076 case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2077 case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2078 default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2079 }
2080
2081 found = true;
2082 }
2083 }
2084
2085 /* Dump FW version, image and timestamp */
2086 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2087 offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2088 offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2089
2090 return offset;
2091 }
2092
2093 /* Reads the MFW version and writes it as a param to the specified buffer.
2094 * Returns the dumped size in dwords.
2095 */
ecore_dump_mfw_ver_param(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2096 static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2097 struct ecore_ptt *p_ptt,
2098 u32 *dump_buf,
2099 bool dump)
2100 {
2101 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2102 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2103 bool is_emul;
2104
2105 is_emul = dev_data->platform_id == PLATFORM_EMUL_FULL || dev_data->platform_id == PLATFORM_EMUL_REDUCED;
2106
2107 if (dump && !is_emul && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2108 u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2109
2110 /* Find MCP public data GRC address. Needs to be ORed with
2111 * MCP_REG_SCRATCH due to a HW bug.
2112 */
2113 public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2114
2115 /* Find MCP public global section offset */
2116 global_section_offsize_addr = public_data_addr + offsetof(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2117 global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2118 global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2119
2120 /* Read MFW version from MCP public global section */
2121 mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + offsetof(struct public_global, mfw_ver));
2122
2123 /* Dump MFW version param */
2124 if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2125 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2126 }
2127
2128 return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2129 }
2130
2131 /* Writes a section header to the specified buffer.
2132 * Returns the dumped size in dwords.
2133 */
ecore_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)2134 static u32 ecore_dump_section_hdr(u32 *dump_buf,
2135 bool dump,
2136 const char *name,
2137 u32 num_params)
2138 {
2139 return ecore_dump_num_param(dump_buf, dump, name, num_params);
2140 }
2141
2142 /* Writes the common global params to the specified buffer.
2143 * Returns the dumped size in dwords.
2144 */
ecore_dump_common_global_params(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)2145 static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2146 struct ecore_ptt *p_ptt,
2147 u32 *dump_buf,
2148 bool dump,
2149 u8 num_specific_global_params)
2150 {
2151 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2152 u32 offset = 0;
2153 u8 num_params;
2154
2155 /* Dump global params section header */
2156 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2157 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2158
2159 /* Store params */
2160 offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2161 offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2162 offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2163 offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2164 offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2165 offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2166
2167 return offset;
2168 }
2169
2170 /* Writes the "last" section (including CRC) to the specified buffer at the
2171 * given offset. Returns the dumped size in dwords.
2172 */
ecore_dump_last_section(struct ecore_hwfn * p_hwfn,u32 * dump_buf,u32 offset,bool dump)2173 static u32 ecore_dump_last_section(struct ecore_hwfn *p_hwfn,
2174 u32 *dump_buf,
2175 u32 offset,
2176 bool dump)
2177 {
2178 u32 start_offset = offset;
2179
2180 /* Dump CRC section header */
2181 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2182
2183 /* Calculate CRC32 and add it to the dword after the "last" section */
2184 if (dump)
2185 *(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8*)dump_buf, DWORDS_TO_BYTES(offset));
2186
2187 offset++;
2188
2189 return offset - start_offset;
2190 }
2191
2192 /* Update blocks reset state */
ecore_update_blocks_reset_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2193 static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2194 struct ecore_ptt *p_ptt)
2195 {
2196 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2197 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2198 u32 i;
2199
2200 /* Read reset registers */
2201 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2202 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2203 reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2204
2205 /* Check if blocks are in reset */
2206 for (i = 0; i < MAX_BLOCK_ID; i++) {
2207 struct block_defs *block = s_block_defs[i];
2208
2209 dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2210 }
2211 }
2212
2213 /* Enable / disable the Debug block */
ecore_bus_enable_dbg_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool enable)2214 static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2215 struct ecore_ptt *p_ptt,
2216 bool enable)
2217 {
2218 ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2219 }
2220
2221 /* Resets the Debug block */
ecore_bus_reset_dbg_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2222 static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2223 struct ecore_ptt *p_ptt)
2224 {
2225 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2226 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2227
2228 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2229 old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2230 new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2231
2232 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2233 ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2234 }
2235
ecore_bus_set_framing_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum dbg_bus_frame_modes mode)2236 static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2237 struct ecore_ptt *p_ptt,
2238 enum dbg_bus_frame_modes mode)
2239 {
2240 ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2241 }
2242
2243 /* Enable / disable Debug Bus clients according to the specified mask
2244 * (1 = enable, 0 = disable).
2245 */
ecore_bus_enable_clients(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 client_mask)2246 static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2247 struct ecore_ptt *p_ptt,
2248 u32 client_mask)
2249 {
2250 ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2251 }
2252
2253 /* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
ecore_bus_enable_storm(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum dbg_storms storm_id,enum dbg_bus_filter_types filter_type)2254 static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2255 struct ecore_ptt *p_ptt,
2256 enum dbg_storms storm_id,
2257 enum dbg_bus_filter_types filter_type)
2258 {
2259 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2260 u32 base_addr, sem_filter_params = filter_type;
2261 struct dbg_bus_storm_data *storm_bus;
2262 struct storm_mode_defs *storm_mode;
2263 struct storm_defs *storm;
2264
2265 storm = &s_storm_defs[storm_id];
2266 storm_bus = &dev_data->bus.storms[storm_id];
2267 storm_mode = &s_storm_mode_defs[storm_bus->mode];
2268 base_addr = storm->sem_fast_mem_addr;
2269
2270 /* Config SEM */
2271 if (storm_mode->is_fast_dbg) {
2272
2273 /* Enable fast debug */
2274 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2275 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2276 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2277
2278 /* Enable all messages except STORE. Must be done after
2279 * enabling SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2280 * be dropped after the SEMI sync fifo is filled.
2281 */
2282 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE);
2283 }
2284 else {
2285
2286 /* Ensable slow debug */
2287 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2288 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2289 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2290 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2291 }
2292
2293 /* Config SEM cid filter */
2294 if (storm_bus->cid_filter_en) {
2295 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2296 sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2297 }
2298
2299 /* Config SEM eid filter */
2300 if (storm_bus->eid_filter_en) {
2301 const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2302
2303 if (storm_bus->eid_range_not_mask) {
2304 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2305 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2306 sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2307 }
2308 else {
2309 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2310 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2311 sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2312 }
2313 }
2314
2315 /* Config accumulaed SEM filter parameters (if any) */
2316 if (sem_filter_params)
2317 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2318 }
2319
2320 /* Disables Debug Bus block inputs */
ecore_bus_disable_inputs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool empty_semi_fifos)2321 static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2322 struct ecore_ptt *p_ptt,
2323 bool empty_semi_fifos)
2324 {
2325 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2326 u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2327 bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2328 u32 block_id;
2329
2330 /* Disable messages output in all Storms */
2331 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2332 struct storm_defs *storm = &s_storm_defs[storm_id];
2333
2334 if (!dev_data->block_in_reset[storm->block_id])
2335 ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE);
2336 }
2337
2338 /* Try to empty the SEMI sync fifo. Must be done after messages output
2339 * were disabled in all Storms (i.e. SEM_FAST_REG_DBG_MODE6_SRC_DISABLE
2340 * was set to all 1's.
2341 */
2342 while (num_fifos_to_empty) {
2343 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2344 struct storm_defs *storm = &s_storm_defs[storm_id];
2345
2346 if (is_fifo_empty[storm_id])
2347 continue;
2348
2349 /* Check if sync fifo got empty */
2350 if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2351 is_fifo_empty[storm_id] = true;
2352 num_fifos_to_empty--;
2353 }
2354 }
2355
2356 /* Check if need to continue polling */
2357 if (num_fifos_to_empty) {
2358 u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2359 u32 polling_count = 0;
2360
2361 if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2362 OSAL_MSLEEP(polling_ms);
2363 polling_count++;
2364 }
2365 else {
2366 DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2367 break;
2368 }
2369 }
2370 }
2371
2372 /* Disable debug in all Storms */
2373 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2374 struct storm_defs *storm = &s_storm_defs[storm_id];
2375 u32 base_addr = storm->sem_fast_mem_addr;
2376
2377 if (dev_data->block_in_reset[storm->block_id])
2378 continue;
2379
2380 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2381 ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2382 ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2383 ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2384 }
2385
2386 /* Disable all clients */
2387 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2388
2389 /* Disable all blocks */
2390 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2391 struct block_defs *block = s_block_defs[block_id];
2392
2393 if (block->has_dbg_bus[dev_data->chip_id] && !dev_data->block_in_reset[block_id])
2394 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2395 }
2396
2397 /* Disable timestamp */
2398 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2399
2400 /* Disable filters and triggers */
2401 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2402 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2403
2404 return DBG_STATUS_OK;
2405 }
2406
2407 /* Sets a Debug Bus trigger/filter constraint */
ecore_bus_set_constraint(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool is_filter,u8 constraint_id,u8 hw_op_val,u32 data_val,u32 data_mask,u8 frame_bit,u8 frame_mask,u16 dword_offset,u16 range,u8 cyclic_bit,u8 must_bit)2408 static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2409 struct ecore_ptt *p_ptt,
2410 bool is_filter,
2411 u8 constraint_id,
2412 u8 hw_op_val,
2413 u32 data_val,
2414 u32 data_mask,
2415 u8 frame_bit,
2416 u8 frame_mask,
2417 u16 dword_offset,
2418 u16 range,
2419 u8 cyclic_bit,
2420 u8 must_bit)
2421 {
2422 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2423 u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2424 u8 curr_trigger_state;
2425
2426 /* For trigger only - set register offset according to state */
2427 if (!is_filter) {
2428 curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2429 reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2430 }
2431
2432 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2433 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2434 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2435 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2436 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2437 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2438 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2439 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2440 ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2441 }
2442
2443 /* Reads the specified DBG Bus internal buffer range and copy it to the
2444 * specified buffer. Returns the dumped size in dwords.
2445 */
ecore_bus_dump_int_buf_range(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 start_line,u32 end_line)2446 static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2447 struct ecore_ptt *p_ptt,
2448 u32 *dump_buf,
2449 bool dump,
2450 u32 start_line,
2451 u32 end_line)
2452 {
2453 u32 line, reg_addr, i, offset = 0;
2454
2455 if (!dump)
2456 return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2457
2458 for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2459 line <= end_line;
2460 line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2461 for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2462 dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2463
2464 return offset;
2465 }
2466
2467 /* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2468 * Returns the dumped size in dwords.
2469 */
ecore_bus_dump_int_buf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2470 static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2471 struct ecore_ptt *p_ptt,
2472 u32 *dump_buf,
2473 bool dump)
2474 {
2475 u32 last_written_line, offset = 0;
2476
2477 last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2478
2479 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2480
2481 /* Internal buffer was wrapped: first dump from write pointer
2482 * to buffer end, then dump from buffer start to write pointer.
2483 */
2484 if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2485 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2486 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2487 }
2488 else if (last_written_line) {
2489
2490 /* Internal buffer wasn't wrapped: dump from buffer start until
2491 * write pointer.
2492 */
2493 if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2494 offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2495 else
2496 DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2497 }
2498
2499 return offset;
2500 }
2501
2502 /* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2503 * buffer. Returns the dumped size in dwords.
2504 */
ecore_bus_dump_pci_buf_range(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump,u32 start_line,u32 end_line)2505 static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2506 u32 *dump_buf,
2507 bool dump,
2508 u32 start_line,
2509 u32 end_line)
2510 {
2511 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2512 u32 offset = 0;
2513
2514 /* Extract PCI buffer pointer from virtual address */
2515 void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2516 u32 *pci_buf_start = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2517 u32 *pci_buf, line, i;
2518
2519 if (!dump)
2520 return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2521
2522 for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2523 line <= end_line;
2524 line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2525 for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2526 dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2527
2528 return offset;
2529 }
2530
2531 /* Copies the DBG Bus PCI buffer to the specified buffer.
2532 * Returns the dumped size in dwords.
2533 */
ecore_bus_dump_pci_buf(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2534 static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2535 struct ecore_ptt *p_ptt,
2536 u32 *dump_buf,
2537 bool dump)
2538 {
2539 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2540 u32 next_wr_byte_offset, next_wr_line_offset;
2541 struct dbg_bus_mem_addr next_wr_phys_addr;
2542 u32 pci_buf_size_in_lines, offset = 0;
2543
2544 pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2545
2546 /* Extract write pointer (physical address) */
2547 next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2548 next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2549
2550 /* Convert write pointer to offset */
2551 next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2552 if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2553 return 0;
2554 next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2555
2556 /* PCI buffer wrapped: first dump from write pointer to buffer end. */
2557 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2558 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2559
2560 /* Dump from buffer start until write pointer */
2561 if (next_wr_line_offset)
2562 offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2563
2564 return offset;
2565 }
2566
2567 /* Copies the DBG Bus recorded data to the specified buffer.
2568 * Returns the dumped size in dwords.
2569 */
ecore_bus_dump_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2570 static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2571 struct ecore_ptt *p_ptt,
2572 u32 *dump_buf,
2573 bool dump)
2574 {
2575 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2576
2577 switch (dev_data->bus.target) {
2578 case DBG_BUS_TARGET_ID_INT_BUF:
2579 return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2580 case DBG_BUS_TARGET_ID_PCI:
2581 return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2582 default:
2583 break;
2584 }
2585
2586 return 0;
2587 }
2588
2589 /* Frees the Debug Bus PCI buffer */
ecore_bus_free_pci_buf(struct ecore_hwfn * p_hwfn)2590 static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2591 {
2592 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2593 dma_addr_t pci_buf_phys_addr;
2594 void *virt_addr_lo;
2595 u32 *pci_buf;
2596
2597 /* Extract PCI buffer pointer from virtual address */
2598 virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2599 pci_buf = (u32*)(osal_uintptr_t)*((u64*)virt_addr_lo);
2600
2601 if (!dev_data->bus.pci_buf.size)
2602 return;
2603
2604 OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2605
2606 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2607
2608 dev_data->bus.pci_buf.size = 0;
2609 }
2610
2611 /* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2612 * Returns the dumped size in dwords.
2613 */
ecore_bus_dump_inputs(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump)2614 static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2615 u32 *dump_buf,
2616 bool dump)
2617 {
2618 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2619 char storm_name[8] = "?storm";
2620 u32 block_id, offset = 0;
2621 u8 storm_id;
2622
2623 /* Store storms */
2624 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2625 struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2626 struct storm_defs *storm = &s_storm_defs[storm_id];
2627
2628 if (!dev_data->bus.storms[storm_id].enabled)
2629 continue;
2630
2631 /* Dump section header */
2632 storm_name[0] = storm->letter;
2633 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2634 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2635 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2636 offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2637 }
2638
2639 /* Store blocks */
2640 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2641 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2642 struct block_defs *block = s_block_defs[block_id];
2643
2644 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2645 continue;
2646
2647 /* Dump section header */
2648 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2649 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2650 offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2651 offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2652 offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2653 }
2654
2655 return offset;
2656 }
2657
2658 /* Dumps the Debug Bus header (params, inputs, data header) to the specified
2659 * buffer. Returns the dumped size in dwords.
2660 */
ecore_bus_dump_hdr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)2661 static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2662 struct ecore_ptt *p_ptt,
2663 u32 *dump_buf,
2664 bool dump)
2665 {
2666 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2667 char hw_id_mask_str[16];
2668 u32 offset = 0;
2669
2670 if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2671 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2672
2673 /* Dump global params */
2674 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2675 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2676 offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2677 offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2678 offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2679 offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2680
2681 offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2682
2683 if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2684 u32 recorded_dwords = 0;
2685
2686 if (dump)
2687 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2688
2689 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2690 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2691 }
2692
2693 return offset;
2694 }
2695
ecore_is_mode_match(struct ecore_hwfn * p_hwfn,u16 * modes_buf_offset)2696 static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2697 u16 *modes_buf_offset)
2698 {
2699 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2700 bool arg1, arg2;
2701 u8 tree_val;
2702
2703 /* Get next element from modes tree buffer */
2704 tree_val = ((u8*)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2705
2706 switch (tree_val) {
2707 case INIT_MODE_OP_NOT:
2708 return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2709 case INIT_MODE_OP_OR:
2710 case INIT_MODE_OP_AND:
2711 arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2712 arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2713 return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2714 default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2715 }
2716 }
2717
2718 /* Returns true if the specified entity (indicated by GRC param) should be
2719 * included in the dump, false otherwise.
2720 */
ecore_grc_is_included(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param)2721 static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2722 enum dbg_grc_params grc_param)
2723 {
2724 return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2725 }
2726
2727 /* Returns true of the specified Storm should be included in the dump, false
2728 * otherwise.
2729 */
ecore_grc_is_storm_included(struct ecore_hwfn * p_hwfn,enum dbg_storms storm)2730 static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2731 enum dbg_storms storm)
2732 {
2733 return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2734 }
2735
2736 /* Returns true if the specified memory should be included in the dump, false
2737 * otherwise.
2738 */
ecore_grc_is_mem_included(struct ecore_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)2739 static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2740 enum block_id block_id,
2741 u8 mem_group_id)
2742 {
2743 struct block_defs *block = s_block_defs[block_id];
2744 u8 i;
2745
2746 /* Check Storm match */
2747 if (block->associated_to_storm &&
2748 !ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2749 return false;
2750
2751 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2752 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2753
2754 if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2755 return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2756 }
2757
2758 switch (mem_group_id) {
2759 case MEM_GROUP_PXP_ILT:
2760 case MEM_GROUP_PXP_MEM:
2761 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2762 case MEM_GROUP_RAM:
2763 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2764 case MEM_GROUP_PBUF:
2765 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2766 case MEM_GROUP_CAU_MEM:
2767 case MEM_GROUP_CAU_SB:
2768 case MEM_GROUP_CAU_PI:
2769 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2770 case MEM_GROUP_QM_MEM:
2771 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2772 case MEM_GROUP_CFC_MEM:
2773 case MEM_GROUP_CONN_CFC_MEM:
2774 case MEM_GROUP_TASK_CFC_MEM:
2775 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2776 case MEM_GROUP_IGU_MEM:
2777 case MEM_GROUP_IGU_MSIX:
2778 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2779 case MEM_GROUP_MULD_MEM:
2780 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2781 case MEM_GROUP_PRS_MEM:
2782 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2783 case MEM_GROUP_DMAE_MEM:
2784 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2785 case MEM_GROUP_TM_MEM:
2786 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2787 case MEM_GROUP_SDM_MEM:
2788 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2789 case MEM_GROUP_TDIF_CTX:
2790 case MEM_GROUP_RDIF_CTX:
2791 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2792 case MEM_GROUP_CM_MEM:
2793 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2794 case MEM_GROUP_IOR:
2795 return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2796 default:
2797 return true;
2798 }
2799 }
2800
2801 /* Stalls all Storms */
ecore_grc_stall_storms(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool stall)2802 static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2803 struct ecore_ptt *p_ptt,
2804 bool stall)
2805 {
2806 u32 reg_addr;
2807 u8 storm_id;
2808
2809 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2810 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2811 continue;
2812
2813 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2814 ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2815 }
2816
2817 OSAL_MSLEEP(STALL_DELAY_MS);
2818 }
2819
2820 /* Takes all blocks out of reset */
ecore_grc_unreset_blocks(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2821 static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2822 struct ecore_ptt *p_ptt)
2823 {
2824 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2825 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2826 u32 block_id, i;
2827
2828 /* Fill reset regs values */
2829 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2830 struct block_defs *block = s_block_defs[block_id];
2831
2832 if (block->has_reset_bit && block->unreset)
2833 reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2834 }
2835
2836 /* Write reset registers */
2837 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2838 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2839 continue;
2840
2841 reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2842
2843 if (reg_val[i])
2844 ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2845 }
2846 }
2847
2848 /* Returns the attention block data of the specified block */
ecore_get_block_attn_data(enum block_id block_id,enum dbg_attn_type attn_type)2849 static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2850 enum dbg_attn_type attn_type)
2851 {
2852 const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block*)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2853
2854 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2855 }
2856
2857 /* Returns the attention registers of the specified block */
ecore_get_block_attn_regs(enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)2858 static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2859 enum dbg_attn_type attn_type,
2860 u8 *num_attn_regs)
2861 {
2862 const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2863
2864 *num_attn_regs = block_type_data->num_regs;
2865
2866 return &((const struct dbg_attn_reg*)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2867 }
2868
2869 /* For each block, clear the status of all parities */
ecore_grc_clear_all_prty(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2870 static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2871 struct ecore_ptt *p_ptt)
2872 {
2873 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2874 const struct dbg_attn_reg *attn_reg_arr;
2875 u8 reg_idx, num_attn_regs;
2876 u32 block_id;
2877
2878 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2879 if (dev_data->block_in_reset[block_id])
2880 continue;
2881
2882 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2883
2884 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2885 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2886 u16 modes_buf_offset;
2887 bool eval_mode;
2888
2889 /* Check mode */
2890 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2891 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2892
2893 /* If Mode match: clear parity status */
2894 if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2895 ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2896 }
2897 }
2898 }
2899
2900 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2901 * the following parameters are dumped:
2902 * - count: no. of dumped entries
2903 * - split: split type
2904 * - id: split ID (dumped only if split_id >= 0)
2905 * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2906 * and param_val != OSAL_NULL).
2907 */
ecore_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,const char * split_type,int split_id,const char * param_name,const char * param_val)2908 static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2909 bool dump,
2910 u32 num_reg_entries,
2911 const char *split_type,
2912 int split_id,
2913 const char *param_name,
2914 const char *param_val)
2915 {
2916 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2917 u32 offset = 0;
2918
2919 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2920 offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2921 offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2922 if (split_id >= 0)
2923 offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2924 if (param_name && param_val)
2925 offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2926
2927 return offset;
2928 }
2929
2930 /* Dumps the GRC registers in the specified address range.
2931 * Returns the dumped size in dwords.
2932 * The addr and len arguments are specified in dwords.
2933 */
ecore_grc_dump_addr_range(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus)2934 static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2935 struct ecore_ptt *p_ptt,
2936 u32 *dump_buf,
2937 bool dump,
2938 u32 addr,
2939 u32 len,
2940 bool wide_bus)
2941 {
2942 u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2943
2944 if (!dump)
2945 return len;
2946
2947 for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2948 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, byte_addr);
2949
2950 return offset;
2951 }
2952
2953 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2954 * The addr and len arguments are specified in dwords.
2955 */
ecore_grc_dump_reg_entry_hdr(u32 * dump_buf,bool dump,u32 addr,u32 len)2956 static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
2957 bool dump,
2958 u32 addr,
2959 u32 len)
2960 {
2961 if (dump)
2962 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2963
2964 return 1;
2965 }
2966
2967 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2968 * The addr and len arguments are specified in dwords.
2969 */
ecore_grc_dump_reg_entry(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus)2970 static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
2971 struct ecore_ptt *p_ptt,
2972 u32 *dump_buf,
2973 bool dump,
2974 u32 addr,
2975 u32 len,
2976 bool wide_bus)
2977 {
2978 u32 offset = 0;
2979
2980 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2981 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
2982
2983 return offset;
2984 }
2985
2986 /* Dumps GRC registers sequence with skip cycle.
2987 * Returns the dumped size in dwords.
2988 * - addr: start GRC address in dwords
2989 * - total_len: total no. of dwords to dump
2990 * - read_len: no. consecutive dwords to read
2991 * - skip_len: no. of dwords to skip (and fill with zeros)
2992 */
ecore_grc_dump_reg_entry_skip(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 total_len,u32 read_len,u32 skip_len)2993 static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
2994 struct ecore_ptt *p_ptt,
2995 u32 *dump_buf,
2996 bool dump,
2997 u32 addr,
2998 u32 total_len,
2999 u32 read_len,
3000 u32 skip_len)
3001 {
3002 u32 offset = 0, reg_offset = 0;
3003
3004 offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3005
3006 if (!dump)
3007 return offset + total_len;
3008
3009 while (reg_offset < total_len) {
3010 u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3011
3012 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3013 reg_offset += curr_len;
3014 addr += curr_len;
3015
3016 if (reg_offset < total_len) {
3017 curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3018 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3019 offset += curr_len;
3020 reg_offset += curr_len;
3021 addr += curr_len;
3022 }
3023 }
3024
3025 return offset;
3026 }
3027
3028 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
ecore_grc_dump_regs_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)3029 static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3030 struct ecore_ptt *p_ptt,
3031 struct dbg_array input_regs_arr,
3032 u32 *dump_buf,
3033 bool dump,
3034 bool block_enable[MAX_BLOCK_ID],
3035 u32 *num_dumped_reg_entries)
3036 {
3037 u32 i, offset = 0, input_offset = 0;
3038 bool mode_match = true;
3039
3040 *num_dumped_reg_entries = 0;
3041
3042 while (input_offset < input_regs_arr.size_in_dwords) {
3043 const struct dbg_dump_cond_hdr* cond_hdr = (const struct dbg_dump_cond_hdr*)&input_regs_arr.ptr[input_offset++];
3044 u16 modes_buf_offset;
3045 bool eval_mode;
3046
3047 /* Check mode/block */
3048 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3049 if (eval_mode) {
3050 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3051 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3052 }
3053
3054 if (!mode_match || !block_enable[cond_hdr->block_id]) {
3055 input_offset += cond_hdr->data_size;
3056 continue;
3057 }
3058
3059 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3060 const struct dbg_dump_reg *reg = (const struct dbg_dump_reg*)&input_regs_arr.ptr[input_offset];
3061
3062 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3063 GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3064 GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3065 GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3066 (*num_dumped_reg_entries)++;
3067 }
3068 }
3069
3070 return offset;
3071 }
3072
3073 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
ecore_grc_dump_split_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * split_type_name,u32 split_id,const char * param_name,const char * param_val)3074 static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3075 struct ecore_ptt *p_ptt,
3076 struct dbg_array input_regs_arr,
3077 u32 *dump_buf,
3078 bool dump,
3079 bool block_enable[MAX_BLOCK_ID],
3080 const char *split_type_name,
3081 u32 split_id,
3082 const char *param_name,
3083 const char *param_val)
3084 {
3085 u32 num_dumped_reg_entries, offset;
3086
3087 /* Calculate register dump header size (and skip it for now) */
3088 offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3089
3090 /* Dump registers */
3091 offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3092
3093 /* Write register dump header */
3094 if (dump && num_dumped_reg_entries > 0)
3095 ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3096
3097 return num_dumped_reg_entries > 0 ? offset : 0;
3098 }
3099
3100 /* Dumps registers according to the input registers array. Returns the dumped
3101 * size in dwords.
3102 */
ecore_grc_dump_registers(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * param_name,const char * param_val)3103 static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3104 struct ecore_ptt *p_ptt,
3105 u32 *dump_buf,
3106 bool dump,
3107 bool block_enable[MAX_BLOCK_ID],
3108 const char *param_name,
3109 const char *param_val)
3110 {
3111 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3112 struct chip_platform_defs *chip_platform;
3113 u32 offset = 0, input_offset = 0;
3114 u8 port_id, pf_id, vf_id;
3115
3116 chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3117
3118 if (dump)
3119 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping registers...\n");
3120
3121 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3122 const struct dbg_dump_split_hdr *split_hdr;
3123 struct dbg_array curr_input_regs_arr;
3124 u32 split_data_size;
3125 u8 split_type_id;
3126
3127 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3128 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3129 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3130 curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3131 curr_input_regs_arr.size_in_dwords = split_data_size;
3132
3133 switch(split_type_id) {
3134 case SPLIT_TYPE_NONE:
3135 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3136 break;
3137
3138 case SPLIT_TYPE_PORT:
3139 for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3140 if (dump)
3141 ecore_port_pretend(p_hwfn, p_ptt, port_id);
3142 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3143 }
3144 break;
3145
3146 case SPLIT_TYPE_PF:
3147 case SPLIT_TYPE_PORT_PF:
3148 for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3149 if (dump)
3150 ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3151 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3152 }
3153 break;
3154
3155 case SPLIT_TYPE_VF:
3156 for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3157 if (dump)
3158 ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3159 offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3160 }
3161 break;
3162
3163 default:
3164 break;
3165 }
3166
3167 input_offset += split_data_size;
3168 }
3169
3170 /* Pretend to original PF */
3171 if (dump)
3172 ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3173
3174 return offset;
3175 }
3176
3177 /* Dump reset registers. Returns the dumped size in dwords. */
ecore_grc_dump_reset_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3178 static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3179 struct ecore_ptt *p_ptt,
3180 u32 *dump_buf,
3181 bool dump)
3182 {
3183 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3184 u32 i, offset = 0, num_regs = 0;
3185
3186 /* Calculate header size */
3187 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3188
3189 /* Write reset registers */
3190 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3191 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3192 continue;
3193
3194 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3195 num_regs++;
3196 }
3197
3198 /* Write header */
3199 if (dump)
3200 ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3201
3202 return offset;
3203 }
3204
3205 /* Dump registers that are modified during GRC Dump and therefore must be
3206 * dumped first. Returns the dumped size in dwords.
3207 */
ecore_grc_dump_modified_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3208 static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3209 struct ecore_ptt *p_ptt,
3210 u32 *dump_buf,
3211 bool dump)
3212 {
3213 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3214 u32 block_id, offset = 0, num_reg_entries = 0;
3215 const struct dbg_attn_reg *attn_reg_arr;
3216 u8 storm_id, reg_idx, num_attn_regs;
3217
3218 /* Calculate header size */
3219 offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3220
3221 /* Write parity registers */
3222 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3223 if (dev_data->block_in_reset[block_id] && dump)
3224 continue;
3225
3226 attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3227
3228 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3229 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3230 u16 modes_buf_offset;
3231 bool eval_mode;
3232
3233 /* Check mode */
3234 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3235 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3236 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3237 continue;
3238
3239 /* Mode match: read & dump registers */
3240 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3241 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3242 num_reg_entries += 2;
3243 }
3244 }
3245
3246 /* Write Storm stall status registers */
3247 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3248 struct storm_defs *storm = &s_storm_defs[storm_id];
3249
3250 if (dev_data->block_in_reset[storm->block_id] && dump)
3251 continue;
3252
3253 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3254 BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3255 num_reg_entries++;
3256 }
3257
3258 /* Write header */
3259 if (dump)
3260 ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3261
3262 return offset;
3263 }
3264
3265 /* Dumps registers that can't be represented in the debug arrays */
ecore_grc_dump_special_regs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3266 static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3267 struct ecore_ptt *p_ptt,
3268 u32 *dump_buf,
3269 bool dump)
3270 {
3271 u32 offset = 0;
3272
3273 offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3274
3275 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3276 * skipped).
3277 */
3278 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3279 offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3280
3281 return offset;
3282 }
3283
3284 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3285 * dwords. The following parameters are dumped:
3286 * - name: dumped only if it's not OSAL_NULL.
3287 * - addr: in dwords, dumped only if name is OSAL_NULL.
3288 * - len: in dwords, always dumped.
3289 * - width: dumped if it's not zero.
3290 * - packed: dumped only if it's not false.
3291 * - mem_group: always dumped.
3292 * - is_storm: true only if the memory is related to a Storm.
3293 * - storm_letter: valid only if is_storm is true.
3294 *
3295 */
ecore_grc_dump_mem_hdr(struct ecore_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)3296 static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3297 u32 *dump_buf,
3298 bool dump,
3299 const char *name,
3300 u32 addr,
3301 u32 len,
3302 u32 bit_width,
3303 bool packed,
3304 const char *mem_group,
3305 bool is_storm,
3306 char storm_letter)
3307 {
3308 u8 num_params = 3;
3309 u32 offset = 0;
3310 char buf[64];
3311
3312 if (!len)
3313 DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3314
3315 if (bit_width)
3316 num_params++;
3317 if (packed)
3318 num_params++;
3319
3320 /* Dump section header */
3321 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3322
3323 if (name) {
3324
3325 /* Dump name */
3326 if (is_storm) {
3327 OSAL_STRCPY(buf, "?STORM_");
3328 buf[0] = storm_letter;
3329 OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3330 }
3331 else {
3332 OSAL_STRCPY(buf, name);
3333 }
3334
3335 offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3336 if (dump)
3337 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from %s...\n", len, buf);
3338 }
3339 else {
3340
3341 /* Dump address */
3342 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3343
3344 offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3345 if (dump && len > 64)
3346 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers from address 0x%x...\n", len, addr_in_bytes);
3347 }
3348
3349 /* Dump len */
3350 offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3351
3352 /* Dump bit width */
3353 if (bit_width)
3354 offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3355
3356 /* Dump packed */
3357 if (packed)
3358 offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3359
3360 /* Dump reg type */
3361 if (is_storm) {
3362 OSAL_STRCPY(buf, "?STORM_");
3363 buf[0] = storm_letter;
3364 OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3365 }
3366 else {
3367 OSAL_STRCPY(buf, mem_group);
3368 }
3369
3370 offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3371
3372 return offset;
3373 }
3374
3375 /* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3376 * Returns the dumped size in dwords.
3377 * The addr and len arguments are specified in dwords.
3378 */
ecore_grc_dump_mem(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,bool wide_bus,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)3379 static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3380 struct ecore_ptt *p_ptt,
3381 u32 *dump_buf,
3382 bool dump,
3383 const char *name,
3384 u32 addr,
3385 u32 len,
3386 bool wide_bus,
3387 u32 bit_width,
3388 bool packed,
3389 const char *mem_group,
3390 bool is_storm,
3391 char storm_letter)
3392 {
3393 u32 offset = 0;
3394
3395 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3396 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3397
3398 return offset;
3399 }
3400
3401 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
ecore_grc_dump_mem_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct dbg_array input_mems_arr,u32 * dump_buf,bool dump)3402 static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3403 struct ecore_ptt *p_ptt,
3404 struct dbg_array input_mems_arr,
3405 u32 *dump_buf,
3406 bool dump)
3407 {
3408 u32 i, offset = 0, input_offset = 0;
3409 bool mode_match = true;
3410
3411 while (input_offset < input_mems_arr.size_in_dwords) {
3412 const struct dbg_dump_cond_hdr* cond_hdr;
3413 u16 modes_buf_offset;
3414 u32 num_entries;
3415 bool eval_mode;
3416
3417 cond_hdr = (const struct dbg_dump_cond_hdr*)&input_mems_arr.ptr[input_offset++];
3418 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3419
3420 /* Check required mode */
3421 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3422 if (eval_mode) {
3423 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3424 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3425 }
3426
3427 if (!mode_match) {
3428 input_offset += cond_hdr->data_size;
3429 continue;
3430 }
3431
3432 for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3433 const struct dbg_dump_mem *mem = (const struct dbg_dump_mem*)&input_mems_arr.ptr[input_offset];
3434 u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3435 bool is_storm = false, mem_wide_bus;
3436 char storm_letter = 'a';
3437 u32 mem_addr, mem_len;
3438
3439 if (mem_group_id >= MEM_GROUPS_NUM) {
3440 DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3441 return 0;
3442 }
3443
3444 if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3445 continue;
3446
3447 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3448 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3449 mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3450
3451 /* Update memory length for CCFC/TCFC memories
3452 * according to number of LCIDs/LTIDs.
3453 */
3454 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3455 if (mem_len % MAX_LCIDS) {
3456 DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3457 return 0;
3458 }
3459
3460 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3461 }
3462 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3463 if (mem_len % MAX_LTIDS) {
3464 DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3465 return 0;
3466 }
3467
3468 mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3469 }
3470
3471 /* If memory is associated with Storm, udpate Storm
3472 * details.
3473 */
3474 if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3475 is_storm = true;
3476 storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3477 }
3478
3479 /* Dump memory */
3480 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3481 0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3482 }
3483 }
3484
3485 return offset;
3486 }
3487
3488 /* Dumps GRC memories according to the input array dump_mem.
3489 * Returns the dumped size in dwords.
3490 */
ecore_grc_dump_memories(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3491 static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3492 struct ecore_ptt *p_ptt,
3493 u32 *dump_buf,
3494 bool dump)
3495 {
3496 u32 offset = 0, input_offset = 0;
3497
3498 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3499 const struct dbg_dump_split_hdr *split_hdr;
3500 struct dbg_array curr_input_mems_arr;
3501 u32 split_data_size;
3502 u8 split_type_id;
3503
3504 split_hdr = (const struct dbg_dump_split_hdr*)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3505 split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3506 split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3507 curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3508 curr_input_mems_arr.size_in_dwords = split_data_size;
3509
3510 switch (split_type_id) {
3511 case SPLIT_TYPE_NONE:
3512 offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3513 break;
3514
3515 default:
3516 DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3517 break;
3518 }
3519
3520 input_offset += split_data_size;
3521 }
3522
3523 return offset;
3524 }
3525
3526 /* Dumps GRC context data for the specified Storm.
3527 * Returns the dumped size in dwords.
3528 * The lid_size argument is specified in quad-regs.
3529 */
ecore_grc_dump_ctx_data(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,u32 lid_size,u32 rd_reg_addr,u8 storm_id)3530 static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3531 struct ecore_ptt *p_ptt,
3532 u32 *dump_buf,
3533 bool dump,
3534 const char *name,
3535 u32 num_lids,
3536 u32 lid_size,
3537 u32 rd_reg_addr,
3538 u8 storm_id)
3539 {
3540 struct storm_defs *storm = &s_storm_defs[storm_id];
3541 u32 i, lid, total_size, offset = 0;
3542
3543 if (!lid_size)
3544 return 0;
3545
3546 lid_size *= BYTES_IN_DWORD;
3547 total_size = num_lids * lid_size;
3548
3549 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3550
3551 if (!dump)
3552 return offset + total_size;
3553
3554 /* Dump context data */
3555 for (lid = 0; lid < num_lids; lid++) {
3556 for (i = 0; i < lid_size; i++, offset++) {
3557 ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3558 *(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3559 }
3560 }
3561
3562 return offset;
3563 }
3564
3565 /* Dumps GRC contexts. Returns the dumped size in dwords. */
ecore_grc_dump_ctx(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3566 static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3567 struct ecore_ptt *p_ptt,
3568 u32 *dump_buf,
3569 bool dump)
3570 {
3571 u32 offset = 0;
3572 u8 storm_id;
3573
3574 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3575 struct storm_defs *storm = &s_storm_defs[storm_id];
3576
3577 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3578 continue;
3579
3580 /* Dump Conn AG context size */
3581 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3582 storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3583
3584 /* Dump Conn ST context size */
3585 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3586 storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3587
3588 /* Dump Task AG context size */
3589 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3590 storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3591
3592 /* Dump Task ST context size */
3593 offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3594 storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3595 }
3596
3597 return offset;
3598 }
3599
3600 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
ecore_grc_dump_iors(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3601 static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3602 struct ecore_ptt *p_ptt,
3603 u32 *dump_buf,
3604 bool dump)
3605 {
3606 char buf[10] = "IOR_SET_?";
3607 u32 addr, offset = 0;
3608 u8 storm_id, set_id;
3609
3610 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3611 struct storm_defs *storm = &s_storm_defs[storm_id];
3612
3613 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3614 continue;
3615
3616 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3617 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3618 buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3619 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3620 }
3621 }
3622
3623 return offset;
3624 }
3625
3626 /* Dump VFC CAM. Returns the dumped size in dwords. */
ecore_grc_dump_vfc_cam(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)3627 static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3628 struct ecore_ptt *p_ptt,
3629 u32 *dump_buf,
3630 bool dump,
3631 u8 storm_id)
3632 {
3633 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3634 struct storm_defs *storm = &s_storm_defs[storm_id];
3635 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3636 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3637 u32 row, i, offset = 0;
3638
3639 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3640
3641 if (!dump)
3642 return offset + total_size;
3643
3644 /* Prepare CAM address */
3645 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3646
3647 for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3648
3649 /* Write VFC CAM command */
3650 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3651 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3652
3653 /* Write VFC CAM address */
3654 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3655
3656 /* Read VFC CAM read response */
3657 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3658 }
3659
3660 return offset;
3661 }
3662
3663 /* Dump VFC RAM. Returns the dumped size in dwords. */
ecore_grc_dump_vfc_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)3664 static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3665 struct ecore_ptt *p_ptt,
3666 u32 *dump_buf,
3667 bool dump,
3668 u8 storm_id,
3669 struct vfc_ram_defs *ram_defs)
3670 {
3671 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3672 struct storm_defs *storm = &s_storm_defs[storm_id];
3673 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3674 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3675 u32 row, i, offset = 0;
3676
3677 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3678
3679 /* Prepare RAM address */
3680 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3681
3682 if (!dump)
3683 return offset + total_size;
3684
3685 for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3686
3687 /* Write VFC RAM command */
3688 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3689
3690 /* Write VFC RAM address */
3691 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3692 ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3693
3694 /* Read VFC RAM read response */
3695 ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3696 }
3697
3698 return offset;
3699 }
3700
3701 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
ecore_grc_dump_vfc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3702 static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3703 struct ecore_ptt *p_ptt,
3704 u32 *dump_buf,
3705 bool dump)
3706 {
3707 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3708 u8 storm_id, i;
3709 u32 offset = 0;
3710
3711 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3712 if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3713 !s_storm_defs[storm_id].has_vfc ||
3714 (storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3715 continue;
3716
3717 /* Read CAM */
3718 offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3719
3720 /* Read RAM */
3721 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3722 offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3723 }
3724
3725 return offset;
3726 }
3727
3728 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
ecore_grc_dump_rss(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3729 static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3730 struct ecore_ptt *p_ptt,
3731 u32 *dump_buf,
3732 bool dump)
3733 {
3734 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3735 u32 offset = 0;
3736 u8 rss_mem_id;
3737
3738 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3739 u32 rss_addr, num_entries, entry_width, total_dwords, i;
3740 struct rss_mem_defs *rss_defs;
3741 bool packed;
3742
3743 rss_defs = &s_rss_mem_defs[rss_mem_id];
3744 rss_addr = rss_defs->addr;
3745 num_entries = rss_defs->num_entries[dev_data->chip_id];
3746 entry_width = rss_defs->entry_width[dev_data->chip_id];
3747 total_dwords = (num_entries * entry_width) / 32;
3748 packed = (entry_width == 16);
3749
3750 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3751 entry_width, packed, rss_defs->type_name, false, 0);
3752
3753 /* Dump RSS data */
3754 if (!dump) {
3755 offset += total_dwords;
3756 continue;
3757 }
3758
3759 for (i = 0; i < total_dwords; i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
3760 ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3761 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), RSS_REG_RSS_RAM_DATA_SIZE, false);
3762 }
3763 }
3764
3765 return offset;
3766 }
3767
3768 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
ecore_grc_dump_big_ram(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3769 static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3770 struct ecore_ptt *p_ptt,
3771 u32 *dump_buf,
3772 bool dump,
3773 u8 big_ram_id)
3774 {
3775 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3776 u32 total_blocks, ram_size, offset = 0, i;
3777 char mem_name[12] = "???_BIG_RAM";
3778 char type_name[8] = "???_RAM";
3779 struct big_ram_defs *big_ram;
3780
3781 big_ram = &s_big_ram_defs[big_ram_id];
3782 total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3783 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3784
3785 OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3786 OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3787
3788 /* Dump memory header */
3789 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, BIG_RAM_BLOCK_SIZE_BYTES * 8, false, type_name, false, 0);
3790
3791 /* Read and dump Big RAM data */
3792 if (!dump)
3793 return offset + ram_size;
3794
3795 /* Dump Big RAM */
3796 for (i = 0; i < total_blocks / 2; i++) {
3797 ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3798 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), 2 * BIG_RAM_BLOCK_SIZE_DWORDS, false);
3799 }
3800
3801 return offset;
3802 }
3803
ecore_grc_dump_mcp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3804 static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3805 struct ecore_ptt *p_ptt,
3806 u32 *dump_buf,
3807 bool dump)
3808 {
3809 bool block_enable[MAX_BLOCK_ID] = { 0 };
3810 bool halted = false;
3811 u32 offset = 0;
3812
3813 /* Halt MCP */
3814 if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3815 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3816 if (!halted)
3817 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3818 }
3819
3820 /* Dump MCP scratchpad */
3821 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, false, 0, false, "MCP", false, 0);
3822
3823 /* Dump MCP cpu_reg_file */
3824 offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3825
3826 /* Dump MCP registers */
3827 block_enable[BLOCK_MCP] = true;
3828 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3829
3830 /* Dump required non-MCP registers */
3831 offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3832 offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3833
3834 /* Release MCP */
3835 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3836 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3837
3838 return offset;
3839 }
3840
3841 /* Dumps the tbus indirect memory for all PHYs. */
ecore_grc_dump_phy(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3842 static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3843 struct ecore_ptt *p_ptt,
3844 u32 *dump_buf,
3845 bool dump)
3846 {
3847 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3848 char mem_name[32];
3849 u8 phy_id;
3850
3851 for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3852 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3853 struct phy_defs *phy_defs;
3854 u8 *bytes_buf;
3855
3856 phy_defs = &s_phy_defs[phy_id];
3857 addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3858 addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3859 data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3860 data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3861 bytes_buf = (u8*)(dump_buf + offset);
3862
3863 if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3864 DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3865
3866 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3867
3868 if (!dump) {
3869 offset += PHY_DUMP_SIZE_DWORDS;
3870 continue;
3871 }
3872
3873 for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3874 ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3875 for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3876 ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3877 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3878 *(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3879 }
3880 }
3881
3882 offset += PHY_DUMP_SIZE_DWORDS;
3883 }
3884
3885 return offset;
3886 }
3887
ecore_config_dbg_line(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)3888 static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3889 struct ecore_ptt *p_ptt,
3890 enum block_id block_id,
3891 u8 line_id,
3892 u8 enable_mask,
3893 u8 right_shift,
3894 u8 force_valid_mask,
3895 u8 force_frame_mask)
3896 {
3897 struct block_defs *block = s_block_defs[block_id];
3898
3899 ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3900 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3901 ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3902 ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3903 ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3904 }
3905
3906 /* Dumps Static Debug data. Returns the dumped size in dwords. */
ecore_grc_dump_static_debug(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)3907 static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3908 struct ecore_ptt *p_ptt,
3909 u32 *dump_buf,
3910 bool dump)
3911 {
3912 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3913 u32 block_id, line_id, offset = 0;
3914
3915 /* Skip static debug if a debug bus recording is in progress */
3916 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3917 return 0;
3918
3919 if (dump) {
3920 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping static debug data...\n");
3921
3922 /* Disable all blocks debug output */
3923 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3924 struct block_defs *block = s_block_defs[block_id];
3925
3926 if (block->has_dbg_bus[dev_data->chip_id])
3927 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3928 }
3929
3930 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3931 ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3932 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3933 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3934 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3935 }
3936
3937 /* Dump all static debug lines for each relevant block */
3938 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3939 struct block_defs *block = s_block_defs[block_id];
3940 struct dbg_bus_block *block_desc;
3941 u32 block_dwords;
3942
3943 if (!block->has_dbg_bus[dev_data->chip_id])
3944 continue;
3945
3946 block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
3947 block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
3948
3949 /* Dump static section params */
3950 offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
3951
3952 if (!dump) {
3953 offset += block_dwords;
3954 continue;
3955 }
3956
3957 /* If all lines are invalid - dump zeros */
3958 if (dev_data->block_in_reset[block_id]) {
3959 OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
3960 offset += block_dwords;
3961 continue;
3962 }
3963
3964 /* Enable block's client */
3965 ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
3966 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
3967
3968 /* Configure debug line ID */
3969 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
3970
3971 /* Read debug line info */
3972 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
3973 }
3974
3975 /* Disable block's client and debug output */
3976 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3977 ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3978 }
3979
3980 if (dump) {
3981 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3982 ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
3983 }
3984
3985 return offset;
3986 }
3987
3988 /* Performs GRC Dump to the specified buffer.
3989 * Returns the dumped size in dwords.
3990 */
ecore_grc_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)3991 static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
3992 struct ecore_ptt *p_ptt,
3993 u32 *dump_buf,
3994 bool dump,
3995 u32 *num_dumped_dwords)
3996 {
3997 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3998 bool is_emul, parities_masked = false;
3999 u8 i, port_mode = 0;
4000 u32 offset = 0;
4001
4002 is_emul = dev_data->platform_id == PLATFORM_EMUL_FULL || dev_data->platform_id == PLATFORM_EMUL_REDUCED;
4003
4004 *num_dumped_dwords = 0;
4005
4006 ;
4007
4008 if (dump) {
4009
4010 /* Find port mode */
4011 switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4012 case 0: port_mode = 1; break;
4013 case 1: port_mode = 2; break;
4014 case 2: port_mode = 4; break;
4015 }
4016
4017 /* Update reset state */
4018 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4019 }
4020
4021 /* Dump global params */
4022 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4023 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4024 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4025 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4026 offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4027
4028 /* Dump reset registers (dumped before taking blocks out of reset ) */
4029 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4030 offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4031
4032 /* Take all blocks out of reset (using reset registers) */
4033 if (dump) {
4034 ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4035 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4036 }
4037
4038 /* Disable all parities using MFW command */
4039 if (dump && !is_emul && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4040 parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4041 if (!parities_masked) {
4042 DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4043 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4044 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4045 }
4046 }
4047
4048 /* Dump modified registers (dumped before modifying them) */
4049 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4050 offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4051
4052 /* Stall storms */
4053 if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4054 ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4055
4056 /* Dump all regs */
4057 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4058 bool block_enable[MAX_BLOCK_ID];
4059
4060 /* Dump all blocks except MCP */
4061 for (i = 0; i < MAX_BLOCK_ID; i++)
4062 block_enable[i] = true;
4063 block_enable[BLOCK_MCP] = false;
4064 offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4065
4066 /* Dump special registers */
4067 offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4068 }
4069
4070 /* Dump memories */
4071 offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4072
4073 /* Dump MCP */
4074 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4075 offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4076
4077 /* Dump context */
4078 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4079 offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4080
4081 /* Dump RSS memories */
4082 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4083 offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4084
4085 /* Dump Big RAM */
4086 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4087 if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4088 offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4089
4090 /* Dump IORs */
4091 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4092 offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4093
4094 /* Dump VFC */
4095 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4096 offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4097
4098 /* Dump PHY tbus */
4099 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4100 offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4101
4102 /* Dump static debug data */
4103 if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4104 offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4105
4106 /* Dump last section */
4107 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4108
4109 if (dump) {
4110
4111 /* Unstall storms */
4112 if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4113 ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4114
4115 /* Clear parity status */
4116 if (!is_emul)
4117 ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4118
4119 /* Enable all parities using MFW command */
4120 if (parities_masked)
4121 ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4122 }
4123
4124 *num_dumped_dwords = offset;
4125
4126 ;
4127
4128 return DBG_STATUS_OK;
4129 }
4130
4131 /* Writes the specified failing Idle Check rule to the specified buffer.
4132 * Returns the dumped size in dwords.
4133 */
ecore_idle_chk_dump_failure(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)4134 static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4135 struct ecore_ptt *p_ptt,
4136 u32 *dump_buf,
4137 bool dump,
4138 u16 rule_id,
4139 const struct dbg_idle_chk_rule *rule,
4140 u16 fail_entry_id,
4141 u32 *cond_reg_values)
4142 {
4143 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4144 const struct dbg_idle_chk_cond_reg *cond_regs;
4145 const struct dbg_idle_chk_info_reg *info_regs;
4146 u32 i, next_reg_offset = 0, offset = 0;
4147 struct dbg_idle_chk_result_hdr *hdr;
4148 const union dbg_idle_chk_reg *regs;
4149 u8 reg_id;
4150
4151 hdr = (struct dbg_idle_chk_result_hdr*)dump_buf;
4152 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4153 cond_regs = ®s[0].cond_reg;
4154 info_regs = ®s[rule->num_cond_regs].info_reg;
4155
4156 /* Dump rule data */
4157 if (dump) {
4158 OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4159 hdr->rule_id = rule_id;
4160 hdr->mem_entry_id = fail_entry_id;
4161 hdr->severity = rule->severity;
4162 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4163 }
4164
4165 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4166
4167 /* Dump condition register values */
4168 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4169 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4170 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4171
4172 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4173
4174 /* Write register header */
4175 if (!dump) {
4176 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4177 continue;
4178 }
4179
4180 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4181 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4182 reg_hdr->start_entry = reg->start_entry;
4183 reg_hdr->size = reg->entry_size;
4184 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4185 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4186
4187 /* Write register values */
4188 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4189 dump_buf[offset] = cond_reg_values[next_reg_offset];
4190 }
4191
4192 /* Dump info register values */
4193 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4194 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4195 u32 block_id;
4196
4197 /* Check if register's block is in reset */
4198 if (!dump) {
4199 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4200 continue;
4201 }
4202
4203 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4204 if (block_id >= MAX_BLOCK_ID) {
4205 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4206 return 0;
4207 }
4208
4209 if (!dev_data->block_in_reset[block_id]) {
4210 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4211 bool wide_bus, eval_mode, mode_match = true;
4212 u16 modes_buf_offset;
4213 u32 addr;
4214
4215 reg_hdr = (struct dbg_idle_chk_result_reg_hdr*)(dump_buf + offset);
4216
4217 /* Check mode */
4218 eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4219 if (eval_mode) {
4220 modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4221 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4222 }
4223
4224 if (!mode_match)
4225 continue;
4226
4227 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4228 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4229
4230 /* Write register header */
4231 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4232 hdr->num_dumped_info_regs++;
4233 OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4234 reg_hdr->size = reg->size;
4235 SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4236
4237 /* Write register values */
4238 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4239 }
4240 }
4241
4242 return offset;
4243 }
4244
4245 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
ecore_idle_chk_dump_rule_entries(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)4246 static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4247 struct ecore_ptt *p_ptt,
4248 u32 *dump_buf,
4249 bool dump,
4250 const struct dbg_idle_chk_rule *input_rules,
4251 u32 num_input_rules,
4252 u32 *num_failing_rules)
4253 {
4254 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4255 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4256 u32 i, offset = 0;
4257 u16 entry_id;
4258 u8 reg_id;
4259
4260 *num_failing_rules = 0;
4261
4262 for (i = 0; i < num_input_rules; i++) {
4263 const struct dbg_idle_chk_cond_reg *cond_regs;
4264 const struct dbg_idle_chk_rule *rule;
4265 const union dbg_idle_chk_reg *regs;
4266 u16 num_reg_entries = 1;
4267 bool check_rule = true;
4268 const u32 *imm_values;
4269
4270 rule = &input_rules[i];
4271 regs = &((const union dbg_idle_chk_reg*)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4272 cond_regs = ®s[0].cond_reg;
4273 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4274
4275 /* Check if all condition register blocks are out of reset, and
4276 * find maximal number of entries (all condition registers that
4277 * are memories must have the same size, which is > 1).
4278 */
4279 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4280 u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4281
4282 if (block_id >= MAX_BLOCK_ID) {
4283 DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4284 return 0;
4285 }
4286
4287 check_rule = !dev_data->block_in_reset[block_id];
4288 if (cond_regs[reg_id].num_entries > num_reg_entries)
4289 num_reg_entries = cond_regs[reg_id].num_entries;
4290 }
4291
4292 if (!check_rule && dump)
4293 continue;
4294
4295 /* Go over all register entries (number of entries is the same for all
4296 * condition registers).
4297 */
4298 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4299 u32 next_reg_offset = 0;
4300
4301 if (!dump) {
4302 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, entry_id, OSAL_NULL);
4303 (*num_failing_rules)++;
4304 break;
4305 }
4306
4307 /* Read current entry of all condition registers */
4308 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4309 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4310 u32 padded_entry_size, addr;
4311 bool wide_bus;
4312
4313 /* Find GRC address (if it's a memory, the address of the
4314 * specific entry is calculated).
4315 */
4316 addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4317 wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4318 if (reg->num_entries > 1 || reg->start_entry > 0) {
4319 padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4320 addr += (reg->start_entry + entry_id) * padded_entry_size;
4321 }
4322
4323 /* Read registers */
4324 if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4325 DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4326 return 0;
4327 }
4328
4329 next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4330 }
4331
4332 /* Call rule condition function. if returns true, it's a failure.*/
4333 if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4334 offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4335 (*num_failing_rules)++;
4336 break;
4337 }
4338 }
4339 }
4340
4341 return offset;
4342 }
4343
4344 /* Performs Idle Check Dump to the specified buffer.
4345 * Returns the dumped size in dwords.
4346 */
ecore_idle_chk_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)4347 static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4348 struct ecore_ptt *p_ptt,
4349 u32 *dump_buf,
4350 bool dump)
4351 {
4352 u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4353
4354 /* Dump global params */
4355 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4356 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4357
4358 /* Dump idle check section header with a single parameter */
4359 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4360 num_failing_rules_offset = offset;
4361 offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4362
4363 while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4364 const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4365 bool eval_mode, mode_match = true;
4366 u32 curr_failing_rules;
4367 u16 modes_buf_offset;
4368
4369 /* Check mode */
4370 eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4371 if (eval_mode) {
4372 modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4373 mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4374 }
4375
4376 if (mode_match) {
4377 offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule*)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4378 num_failing_rules += curr_failing_rules;
4379 }
4380
4381 input_offset += cond_hdr->data_size;
4382 }
4383
4384 /* Overwrite num_rules parameter */
4385 if (dump)
4386 ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4387
4388 /* Dump last section */
4389 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4390
4391 return offset;
4392 }
4393
4394 /* Finds the meta data image in NVRAM */
ecore_find_nvram_image(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes)4395 static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4396 struct ecore_ptt *p_ptt,
4397 u32 image_type,
4398 u32 *nvram_offset_bytes,
4399 u32 *nvram_size_bytes)
4400 {
4401 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4402 struct mcp_file_att file_att;
4403 int nvm_result;
4404
4405 /* Call NVRAM get file command */
4406 nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32*)&file_att);
4407
4408 /* Check response */
4409 if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4410 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4411
4412 /* Update return values */
4413 *nvram_offset_bytes = file_att.nvm_start_addr;
4414 *nvram_size_bytes = file_att.len;
4415
4416 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4417
4418 /* Check alignment */
4419 if (*nvram_size_bytes & 0x3)
4420 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4421
4422 return DBG_STATUS_OK;
4423 }
4424
4425 /* Reads data from NVRAM */
ecore_nvram_read(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf)4426 static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4427 struct ecore_ptt *p_ptt,
4428 u32 nvram_offset_bytes,
4429 u32 nvram_size_bytes,
4430 u32 *ret_buf)
4431 {
4432 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4433 s32 bytes_left = nvram_size_bytes;
4434 u32 read_offset = 0;
4435
4436 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4437
4438 do {
4439 bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4440
4441 /* Call NVRAM read command */
4442 if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32*)((u8*)ret_buf + read_offset)))
4443 return DBG_STATUS_NVRAM_READ_FAILED;
4444
4445 /* Check response */
4446 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4447 return DBG_STATUS_NVRAM_READ_FAILED;
4448
4449 /* Update read offset */
4450 read_offset += ret_read_size;
4451 bytes_left -= ret_read_size;
4452 } while (bytes_left > 0);
4453
4454 return DBG_STATUS_OK;
4455 }
4456
4457 /* Get info on the MCP Trace data in the scratchpad:
4458 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4459 * - trace_data_size (OUT): trace data size in bytes (without the header)
4460 */
ecore_mcp_trace_get_data_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size)4461 static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4462 struct ecore_ptt *p_ptt,
4463 u32 *trace_data_grc_addr,
4464 u32 *trace_data_size)
4465 {
4466 u32 spad_trace_offsize, signature;
4467
4468 /* Read trace section offsize structure from MCP scratchpad */
4469 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4470
4471 /* Extract trace section address from offsize (in scratchpad) */
4472 *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4473
4474 /* Read signature from MCP trace section */
4475 signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + offsetof(struct mcp_trace, signature));
4476
4477 if (signature != MFW_TRACE_SIGNATURE)
4478 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4479
4480 /* Read trace size from MCP trace section */
4481 *trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + offsetof(struct mcp_trace, size));
4482
4483 return DBG_STATUS_OK;
4484 }
4485
4486 /* Reads MCP trace meta data image from NVRAM
4487 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4488 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4489 * loaded from file).
4490 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4491 */
ecore_mcp_trace_get_meta_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset,u32 * trace_meta_size)4492 static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4493 struct ecore_ptt *p_ptt,
4494 u32 trace_data_size_bytes,
4495 u32 *running_bundle_id,
4496 u32 *trace_meta_offset,
4497 u32 *trace_meta_size)
4498 {
4499 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4500
4501 /* Read MCP trace section offsize structure from MCP scratchpad */
4502 spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4503
4504 /* Find running bundle ID */
4505 running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4506 *running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4507 if (*running_bundle_id > 1)
4508 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4509
4510 /* Find image in NVRAM */
4511 nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4512 return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4513 }
4514
4515 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
ecore_mcp_trace_read_meta(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)4516 static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4517 struct ecore_ptt *p_ptt,
4518 u32 nvram_offset_in_bytes,
4519 u32 size_in_bytes,
4520 u32 *buf)
4521 {
4522 u8 modules_num, module_len, i, *byte_buf = (u8*)buf;
4523 enum dbg_status status;
4524 u32 signature;
4525
4526 /* Read meta data from NVRAM */
4527 status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4528 if (status != DBG_STATUS_OK)
4529 return status;
4530
4531 /* Extract and check first signature */
4532 signature = ecore_read_unaligned_dword(byte_buf);
4533 byte_buf += sizeof(signature);
4534 if (signature != NVM_MAGIC_VALUE)
4535 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4536
4537 /* Extract number of modules */
4538 modules_num = *(byte_buf++);
4539
4540 /* Skip all modules */
4541 for (i = 0; i < modules_num; i++) {
4542 module_len = *(byte_buf++);
4543 byte_buf += module_len;
4544 }
4545
4546 /* Extract and check second signature */
4547 signature = ecore_read_unaligned_dword(byte_buf);
4548 byte_buf += sizeof(signature);
4549 if (signature != NVM_MAGIC_VALUE)
4550 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4551
4552 return DBG_STATUS_OK;
4553 }
4554
4555 /* Dump MCP Trace */
ecore_mcp_trace_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4556 static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4557 struct ecore_ptt *p_ptt,
4558 u32 *dump_buf,
4559 bool dump,
4560 u32 *num_dumped_dwords)
4561 {
4562 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4563 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4564 u32 running_bundle_id, offset = 0;
4565 enum dbg_status status;
4566 bool mcp_access;
4567 int halted = 0;
4568
4569 *num_dumped_dwords = 0;
4570
4571 mcp_access = !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4572
4573 /* Get trace data info */
4574 status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4575 if (status != DBG_STATUS_OK)
4576 return status;
4577
4578 /* Dump global params */
4579 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4580 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4581
4582 /* Halt MCP while reading from scratchpad so the read data will be
4583 * consistent. if halt fails, MCP trace is taken anyway, with a small
4584 * risk that it may be corrupt.
4585 */
4586 if (dump && mcp_access) {
4587 halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4588 if (!halted)
4589 DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4590 }
4591
4592 /* Find trace data size */
4593 trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4594
4595 /* Dump trace data section header and param */
4596 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4597 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4598
4599 /* Read trace data from scratchpad into dump buffer */
4600 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4601
4602 /* Resume MCP (only if halt succeeded) */
4603 if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4604 DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4605
4606 /* Dump trace meta section header */
4607 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4608
4609 /* Read trace meta only if NVRAM access is enabled
4610 * (trace_meta_size_bytes is dword-aligned).
4611 */
4612 if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4613 status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4614 if (status == DBG_STATUS_OK)
4615 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4616 }
4617
4618 /* Dump trace meta size param */
4619 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4620
4621 /* Read trace meta image into dump buffer */
4622 if (dump && trace_meta_size_dwords)
4623 status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4624 if (status == DBG_STATUS_OK)
4625 offset += trace_meta_size_dwords;
4626
4627 /* Dump last section */
4628 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4629
4630 *num_dumped_dwords = offset;
4631
4632 /* If no mcp access, indicate that the dump doesn't contain the meta
4633 * data from NVRAM.
4634 */
4635 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4636 }
4637
4638 /* Dump GRC FIFO */
ecore_reg_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4639 static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4640 struct ecore_ptt *p_ptt,
4641 u32 *dump_buf,
4642 bool dump,
4643 u32 *num_dumped_dwords)
4644 {
4645 u32 dwords_read, size_param_offset, offset = 0;
4646 bool fifo_has_data;
4647
4648 *num_dumped_dwords = 0;
4649
4650 /* Dump global params */
4651 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4652 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4653
4654 /* Dump fifo data section header and param. The size param is 0 for
4655 * now, and is overwritten after reading the FIFO.
4656 */
4657 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4658 size_param_offset = offset;
4659 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4660
4661 if (dump) {
4662 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4663
4664 /* Pull available data from fifo. Use DMAE since this is
4665 * widebus memory and must be accessed atomically. Test for
4666 * dwords_read not passing buffer size since more entries could
4667 * be added to the buffer as we
4668 * are emptying it.
4669 */
4670 for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += REG_FIFO_ELEMENT_DWORDS) {
4671 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, (u64)(osal_uintptr_t)(&dump_buf[offset]), REG_FIFO_ELEMENT_DWORDS, 0))
4672 return DBG_STATUS_DMAE_FAILED;
4673 fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4674 }
4675
4676 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4677 }
4678 else {
4679
4680 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4681 * test how much data is available, except for reading it.
4682 */
4683 offset += REG_FIFO_DEPTH_DWORDS;
4684 }
4685
4686 /* Dump last section */
4687 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4688
4689 *num_dumped_dwords = offset;
4690
4691 return DBG_STATUS_OK;
4692 }
4693
4694 /* Dump IGU FIFO */
ecore_igu_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4695 static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4696 struct ecore_ptt *p_ptt,
4697 u32 *dump_buf,
4698 bool dump,
4699 u32 *num_dumped_dwords)
4700 {
4701 u32 dwords_read, size_param_offset, offset = 0;
4702 bool fifo_has_data;
4703
4704 *num_dumped_dwords = 0;
4705
4706 /* Dump global params */
4707 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4708 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4709
4710 /* Dump fifo data section header and param. The size param is 0 for
4711 * now, and is overwritten after reading the FIFO.
4712 */
4713 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4714 size_param_offset = offset;
4715 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4716
4717 if (dump) {
4718 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4719
4720 /* Pull available data from fifo. Use DMAE since this is
4721 * widebus memory and must be accessed atomically. Test for
4722 * dwords_read not passing buffer size since more entries could
4723 * be added to the buffer as we are emptying it.
4724 */
4725 for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += IGU_FIFO_ELEMENT_DWORDS) {
4726 if (ecore_dmae_grc2host(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_MEMORY, (u64)(osal_uintptr_t)(&dump_buf[offset]), IGU_FIFO_ELEMENT_DWORDS, 0))
4727 return DBG_STATUS_DMAE_FAILED;
4728 fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4729 }
4730
4731 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4732 }
4733 else {
4734
4735 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4736 * test how much data is available, except for reading it.
4737 */
4738 offset += IGU_FIFO_DEPTH_DWORDS;
4739 }
4740
4741 /* Dump last section */
4742 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4743
4744 *num_dumped_dwords = offset;
4745
4746 return DBG_STATUS_OK;
4747 }
4748
4749 /* Protection Override dump */
ecore_protection_override_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4750 static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4751 struct ecore_ptt *p_ptt,
4752 u32 *dump_buf,
4753 bool dump,
4754 u32 *num_dumped_dwords)
4755 {
4756 u32 size_param_offset, override_window_dwords, offset = 0;
4757
4758 *num_dumped_dwords = 0;
4759
4760 /* Dump global params */
4761 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4762 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4763
4764 /* Dump data section header and param. The size param is 0 for now,
4765 * and is overwritten after reading the data.
4766 */
4767 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4768 size_param_offset = offset;
4769 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4770
4771 if (dump) {
4772 /* Add override window info to buffer */
4773 override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4774 if (ecore_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_PROTECTION_OVERRIDE_WINDOW, (u64)(osal_uintptr_t)(dump_buf + offset), override_window_dwords, 0))
4775 return DBG_STATUS_DMAE_FAILED;
4776 offset += override_window_dwords;
4777 ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4778 }
4779 else {
4780 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4781 }
4782
4783 /* Dump last section */
4784 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4785
4786 *num_dumped_dwords = offset;
4787
4788 return DBG_STATUS_OK;
4789 }
4790
4791 /* Performs FW Asserts Dump to the specified buffer.
4792 * Returns the dumped size in dwords.
4793 */
ecore_fw_asserts_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,bool dump)4794 static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4795 struct ecore_ptt *p_ptt,
4796 u32 *dump_buf,
4797 bool dump)
4798 {
4799 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4800 struct fw_asserts_ram_section *asserts;
4801 char storm_letter_str[2] = "?";
4802 struct fw_info fw_info;
4803 u32 offset = 0;
4804 u8 storm_id;
4805
4806 /* Dump global params */
4807 offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4808 offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4809
4810 /* Find Storm dump size */
4811 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4812 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4813 struct storm_defs *storm = &s_storm_defs[storm_id];
4814
4815 if (dev_data->block_in_reset[storm->block_id])
4816 continue;
4817
4818 /* Read FW info for the current Storm */
4819 ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4820
4821 asserts = &fw_info.fw_asserts_section;
4822
4823 /* Dump FW Asserts section header and params */
4824 storm_letter_str[0] = storm->letter;
4825 offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4826 offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4827 offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4828
4829 /* Read and dump FW Asserts data */
4830 if (!dump) {
4831 offset += asserts->list_element_dword_size;
4832 continue;
4833 }
4834
4835 fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4836 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4837 next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4838 next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4839 last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4840 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4841 last_list_idx * asserts->list_element_dword_size;
4842 offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4843 }
4844
4845 /* Dump last section */
4846 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, dump);
4847
4848 return offset;
4849 }
4850
4851 /***************************** Public Functions *******************************/
4852
ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)4853 enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4854 {
4855 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr*)bin_ptr;
4856 u8 buf_id;
4857
4858 /* convert binary data to debug arrays */
4859 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4860 s_dbg_arrays[buf_id].ptr = (u32*)(bin_ptr + buf_array[buf_id].offset);
4861 s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4862 }
4863
4864 return DBG_STATUS_OK;
4865 }
4866
ecore_dbg_set_app_ver(u32 ver)4867 enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4868 {
4869 if (ver < TOOLS_VERSION)
4870 return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4871
4872 s_app_ver = ver;
4873
4874 return DBG_STATUS_OK;
4875 }
4876
ecore_dbg_get_fw_func_ver(void)4877 u32 ecore_dbg_get_fw_func_ver(void)
4878 {
4879 return TOOLS_VERSION;
4880 }
4881
ecore_dbg_get_chip_id(struct ecore_hwfn * p_hwfn)4882 enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4883 {
4884 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4885
4886 return (enum chip_ids)dev_data->chip_id;
4887 }
4888
ecore_dbg_bus_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool one_shot_en,u8 force_hw_dwords,bool unify_inputs,bool grc_input_en)4889 enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4890 struct ecore_ptt *p_ptt,
4891 bool one_shot_en,
4892 u8 force_hw_dwords,
4893 bool unify_inputs,
4894 bool grc_input_en)
4895 {
4896 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4897 enum dbg_status status;
4898
4899 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4900 if (status != DBG_STATUS_OK)
4901 return status;
4902
4903 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4904
4905 if (force_hw_dwords &&
4906 force_hw_dwords != 4 &&
4907 force_hw_dwords != 8)
4908 return DBG_STATUS_INVALID_ARGS;
4909
4910 if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4911 return DBG_STATUS_DBG_BUS_IN_USE;
4912
4913 /* Update reset state of all blocks */
4914 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4915
4916 /* Disable all debug inputs */
4917 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4918 if (status != DBG_STATUS_OK)
4919 return status;
4920
4921 /* Reset DBG block */
4922 ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4923
4924 /* Set one-shot / wrap-around */
4925 ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4926
4927 /* Init state params */
4928 OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4929 dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4930 dev_data->bus.state = DBG_BUS_STATE_READY;
4931 dev_data->bus.one_shot_en = one_shot_en;
4932 dev_data->bus.hw_dwords = force_hw_dwords;
4933 dev_data->bus.grc_input_en = grc_input_en;
4934 dev_data->bus.unify_inputs = unify_inputs;
4935 dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4936
4937 /* Init special DBG block */
4938 if (grc_input_en)
4939 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4940
4941 return DBG_STATUS_OK;
4942 }
4943
ecore_dbg_bus_set_pci_output(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 buf_size_kb)4944 enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4945 struct ecore_ptt *p_ptt,
4946 u16 buf_size_kb)
4947 {
4948 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4949 dma_addr_t pci_buf_phys_addr;
4950 void *pci_buf;
4951
4952 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4953
4954 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4955 return DBG_STATUS_OUTPUT_ALREADY_SET;
4956 if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4957 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
4958
4959 dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
4960 dev_data->bus.pci_buf.size = buf_size_kb * 1024;
4961 if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
4962 return DBG_STATUS_INVALID_ARGS;
4963
4964 pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
4965 if (!pci_buf)
4966 return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
4967
4968 OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
4969
4970 dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
4971 dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
4972
4973 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
4974 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
4975 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
4976 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
4977 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
4978 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
4979 ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
4980 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
4981 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
4982
4983 return DBG_STATUS_OK;
4984 }
4985
ecore_dbg_bus_set_nw_output(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 port_id,u32 dest_addr_lo32,u16 dest_addr_hi16,u16 data_limit_size_kb,bool send_to_other_engine,bool rcv_from_other_engine)4986 enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
4987 struct ecore_ptt *p_ptt,
4988 u8 port_id,
4989 u32 dest_addr_lo32,
4990 u16 dest_addr_hi16,
4991 u16 data_limit_size_kb,
4992 bool send_to_other_engine,
4993 bool rcv_from_other_engine)
4994 {
4995 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4996
4997 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
4998
4999 if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5000 return DBG_STATUS_OUTPUT_ALREADY_SET;
5001 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5002 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5003 if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5004 return DBG_STATUS_INVALID_ARGS;
5005
5006 dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5007 dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5008
5009 ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5010 ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5011
5012 if (send_to_other_engine)
5013 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5014 else
5015 ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5016
5017 if (rcv_from_other_engine) {
5018 ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5019 }
5020 else {
5021
5022 /* Configure ethernet header of 14 bytes */
5023 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5024 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5025 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5026 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5027 ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5028 ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5029 if (data_limit_size_kb)
5030 ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5031 }
5032
5033 return DBG_STATUS_OK;
5034 }
5035
ecore_is_overlapping_enable_mask(struct ecore_hwfn * p_hwfn,u8 enable_mask,u8 right_shift)5036 bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5037 u8 enable_mask,
5038 u8 right_shift)
5039 {
5040 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5041 u8 curr_shifted_enable_mask, shifted_enable_mask;
5042 u32 block_id;
5043
5044 shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5045
5046 if (dev_data->bus.num_enabled_blocks) {
5047 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5048 struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5049
5050 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5051 continue;
5052
5053 curr_shifted_enable_mask =
5054 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5055 VALUES_PER_CYCLE,
5056 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5057 if (shifted_enable_mask & curr_shifted_enable_mask)
5058 return true;
5059 }
5060 }
5061
5062 return false;
5063 }
5064
ecore_dbg_bus_enable_block(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 line_num,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)5065 enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5066 struct ecore_ptt *p_ptt,
5067 enum block_id block_id,
5068 u8 line_num,
5069 u8 enable_mask,
5070 u8 right_shift,
5071 u8 force_valid_mask,
5072 u8 force_frame_mask)
5073 {
5074 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5075 struct block_defs *block = s_block_defs[block_id];
5076 struct dbg_bus_block_data *block_bus;
5077 struct dbg_bus_block *block_desc;
5078
5079 block_bus = &dev_data->bus.blocks[block_id];
5080 block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5081
5082 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5083
5084 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5085 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5086 if (block_id >= MAX_BLOCK_ID)
5087 return DBG_STATUS_INVALID_ARGS;
5088 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5089 return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5090 if (!block->has_dbg_bus[dev_data->chip_id] ||
5091 line_num >= NUM_DBG_LINES(block_desc) ||
5092 !enable_mask ||
5093 enable_mask > MAX_CYCLE_VALUES_MASK ||
5094 force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5095 force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5096 right_shift > VALUES_PER_CYCLE - 1)
5097 return DBG_STATUS_INVALID_ARGS;
5098 if (dev_data->block_in_reset[block_id])
5099 return DBG_STATUS_BLOCK_IN_RESET;
5100 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5101 return DBG_STATUS_INPUT_OVERLAP;
5102
5103 dev_data->bus.blocks[block_id].line_num = line_num;
5104 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5105 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5106 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5107 SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5108
5109 dev_data->bus.num_enabled_blocks++;
5110
5111 return DBG_STATUS_OK;
5112 }
5113
ecore_dbg_bus_enable_storm(struct ecore_hwfn * p_hwfn,enum dbg_storms storm,enum dbg_bus_storm_modes storm_mode)5114 enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5115 enum dbg_storms storm,
5116 enum dbg_bus_storm_modes storm_mode)
5117 {
5118 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5119
5120 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm, storm_mode);
5121
5122 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5123 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5124 if (dev_data->bus.hw_dwords >= 4)
5125 return DBG_STATUS_HW_ONLY_RECORDING;
5126 if (storm >= MAX_DBG_STORMS)
5127 return DBG_STATUS_INVALID_ARGS;
5128 if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5129 return DBG_STATUS_INVALID_ARGS;
5130 if (dev_data->bus.unify_inputs)
5131 return DBG_STATUS_INVALID_ARGS;
5132
5133 if (dev_data->bus.storms[storm].enabled)
5134 return DBG_STATUS_STORM_ALREADY_ENABLED;
5135
5136 dev_data->bus.storms[storm].enabled = true;
5137 dev_data->bus.storms[storm].mode = (u8)storm_mode;
5138 dev_data->bus.storms[storm].hw_id = dev_data->bus.num_enabled_storms;
5139
5140 dev_data->bus.num_enabled_storms++;
5141
5142 return DBG_STATUS_OK;
5143 }
5144
ecore_dbg_bus_enable_timestamp(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 valid_mask,u8 frame_mask,u32 tick_len)5145 enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5146 struct ecore_ptt *p_ptt,
5147 u8 valid_mask,
5148 u8 frame_mask,
5149 u32 tick_len)
5150 {
5151 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5152
5153 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5154
5155 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5156 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5157 if (valid_mask > 0x7 || frame_mask > 0x7)
5158 return DBG_STATUS_INVALID_ARGS;
5159 if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5160 return DBG_STATUS_INPUT_OVERLAP;
5161
5162 dev_data->bus.timestamp_input_en = true;
5163 dev_data->bus.num_enabled_blocks++;
5164
5165 SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5166
5167 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5168 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5169 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5170
5171 return DBG_STATUS_OK;
5172 }
5173
ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,u8 min_eid,u8 max_eid)5174 enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5175 enum dbg_storms storm_id,
5176 u8 min_eid,
5177 u8 max_eid)
5178 {
5179 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5180 struct dbg_bus_storm_data *storm_bus;
5181
5182 storm_bus = &dev_data->bus.storms[storm_id];
5183
5184 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5185
5186 if (storm_id >= MAX_DBG_STORMS)
5187 return DBG_STATUS_INVALID_ARGS;
5188 if (min_eid > max_eid)
5189 return DBG_STATUS_INVALID_ARGS;
5190 if (!storm_bus->enabled)
5191 return DBG_STATUS_STORM_NOT_ENABLED;
5192
5193 storm_bus->eid_filter_en = 1;
5194 storm_bus->eid_range_not_mask = 1;
5195 storm_bus->eid_filter_params.range.min = min_eid;
5196 storm_bus->eid_filter_params.range.max = max_eid;
5197
5198 return DBG_STATUS_OK;
5199 }
5200
ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,u8 eid_val,u8 eid_mask)5201 enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5202 enum dbg_storms storm_id,
5203 u8 eid_val,
5204 u8 eid_mask)
5205 {
5206 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5207 struct dbg_bus_storm_data *storm_bus;
5208
5209 storm_bus = &dev_data->bus.storms[storm_id];
5210
5211 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5212
5213 if (storm_id >= MAX_DBG_STORMS)
5214 return DBG_STATUS_INVALID_ARGS;
5215 if (!storm_bus->enabled)
5216 return DBG_STATUS_STORM_NOT_ENABLED;
5217
5218 storm_bus->eid_filter_en = 1;
5219 storm_bus->eid_range_not_mask = 0;
5220 storm_bus->eid_filter_params.mask.val = eid_val;
5221 storm_bus->eid_filter_params.mask.mask = eid_mask;
5222
5223 return DBG_STATUS_OK;
5224 }
5225
ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn * p_hwfn,enum dbg_storms storm_id,u32 cid)5226 enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5227 enum dbg_storms storm_id,
5228 u32 cid)
5229 {
5230 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5231 struct dbg_bus_storm_data *storm_bus;
5232
5233 storm_bus = &dev_data->bus.storms[storm_id];
5234
5235 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5236
5237 if (storm_id >= MAX_DBG_STORMS)
5238 return DBG_STATUS_INVALID_ARGS;
5239 if (!storm_bus->enabled)
5240 return DBG_STATUS_STORM_NOT_ENABLED;
5241
5242 storm_bus->cid_filter_en = 1;
5243 storm_bus->cid = cid;
5244
5245 return DBG_STATUS_OK;
5246 }
5247
ecore_dbg_bus_enable_filter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 const_msg_len)5248 enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5249 struct ecore_ptt *p_ptt,
5250 enum block_id block_id,
5251 u8 const_msg_len)
5252 {
5253 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5254
5255 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5256
5257 if (dev_data->bus.state != DBG_BUS_STATE_READY)
5258 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5259 if (dev_data->bus.filter_en)
5260 return DBG_STATUS_FILTER_ALREADY_ENABLED;
5261 if (block_id >= MAX_BLOCK_ID)
5262 return DBG_STATUS_INVALID_ARGS;
5263 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5264 return DBG_STATUS_BLOCK_NOT_ENABLED;
5265 if (!dev_data->bus.unify_inputs)
5266 return DBG_STATUS_FILTER_BUG;
5267
5268 dev_data->bus.filter_en = true;
5269 dev_data->bus.next_constraint_id = 0;
5270 dev_data->bus.adding_filter = true;
5271
5272 /* HW ID is set to 0 due to required unifyInputs */
5273 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5274 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5275 if (const_msg_len > 0)
5276 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5277
5278 return DBG_STATUS_OK;
5279 }
5280
ecore_dbg_bus_enable_trigger(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool rec_pre_trigger,u8 pre_chunks,bool rec_post_trigger,u32 post_cycles,bool filter_pre_trigger,bool filter_post_trigger)5281 enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5282 struct ecore_ptt *p_ptt,
5283 bool rec_pre_trigger,
5284 u8 pre_chunks,
5285 bool rec_post_trigger,
5286 u32 post_cycles,
5287 bool filter_pre_trigger,
5288 bool filter_post_trigger)
5289 {
5290 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5291 enum dbg_bus_post_trigger_types post_trigger_type;
5292 enum dbg_bus_pre_trigger_types pre_trigger_type;
5293 struct dbg_bus_data *bus = &dev_data->bus;
5294
5295 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5296
5297 if (bus->state != DBG_BUS_STATE_READY)
5298 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5299 if (bus->trigger_en)
5300 return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5301 if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5302 return DBG_STATUS_INVALID_ARGS;
5303
5304 bus->trigger_en = true;
5305 bus->filter_pre_trigger = filter_pre_trigger;
5306 bus->filter_post_trigger = filter_post_trigger;
5307
5308 if (rec_pre_trigger) {
5309 pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5310 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5311 }
5312 else {
5313 pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5314 }
5315
5316 if (rec_post_trigger) {
5317 post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5318 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5319 }
5320 else {
5321 post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5322 }
5323
5324 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5325 ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5326 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5327
5328 return DBG_STATUS_OK;
5329 }
5330
ecore_dbg_bus_add_trigger_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,u8 const_msg_len,u16 count_to_next)5331 enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5332 struct ecore_ptt *p_ptt,
5333 enum block_id block_id,
5334 u8 const_msg_len,
5335 u16 count_to_next)
5336 {
5337 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5338 struct dbg_bus_data *bus = &dev_data->bus;
5339 struct dbg_bus_block_data *block_bus;
5340 u8 reg_offset;
5341
5342 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5343
5344 block_bus = &bus->blocks[block_id];
5345
5346 if (!bus->trigger_en)
5347 return DBG_STATUS_TRIGGER_NOT_ENABLED;
5348 if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5349 return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5350 if (block_id >= MAX_BLOCK_ID)
5351 return DBG_STATUS_INVALID_ARGS;
5352 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5353 return DBG_STATUS_BLOCK_NOT_ENABLED;
5354 if (!count_to_next)
5355 return DBG_STATUS_INVALID_ARGS;
5356
5357 bus->next_constraint_id = 0;
5358 bus->adding_filter = false;
5359
5360 /* Store block's shifted enable mask */
5361 SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5362 VALUES_PER_CYCLE,
5363 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5364
5365 /* Set trigger state registers */
5366 reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5367 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5368 if (const_msg_len > 0)
5369 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5370
5371 /* Set trigger set registers */
5372 reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5373 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5374
5375 /* Set next state to final state, and overwrite previous next state
5376 * (if any).
5377 */
5378 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5379 if (bus->next_trigger_state > 0) {
5380 reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5381 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5382 }
5383
5384 bus->next_trigger_state++;
5385
5386 return DBG_STATUS_OK;
5387 }
5388
ecore_dbg_bus_add_constraint(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum dbg_bus_constraint_ops constraint_op,u32 data_val,u32 data_mask,bool compare_frame,u8 frame_bit,u8 cycle_offset,u8 dword_offset_in_cycle,bool is_mandatory)5389 enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5390 struct ecore_ptt *p_ptt,
5391 enum dbg_bus_constraint_ops constraint_op,
5392 u32 data_val,
5393 u32 data_mask,
5394 bool compare_frame,
5395 u8 frame_bit,
5396 u8 cycle_offset,
5397 u8 dword_offset_in_cycle,
5398 bool is_mandatory)
5399 {
5400 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5401 struct dbg_bus_data *bus = &dev_data->bus;
5402 u16 dword_offset, range = 0;
5403
5404 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5405
5406 if (!bus->filter_en && !dev_data->bus.trigger_en)
5407 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5408 if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5409 return DBG_STATUS_CANT_ADD_CONSTRAINT;
5410 if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5411 return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5412 if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5413 return DBG_STATUS_INVALID_ARGS;
5414 if (compare_frame &&
5415 constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5416 constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5417 return DBG_STATUS_INVALID_ARGS;
5418
5419 dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5420
5421 if (!bus->adding_filter) {
5422 u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5423 struct dbg_bus_trigger_state_data *trigger_state;
5424
5425 trigger_state = &bus->trigger_states[curr_trigger_state_id];
5426
5427 /* Check if the selected dword is enabled in the block */
5428 if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5429 return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5430
5431 /* Add selected dword to trigger state's dword mask */
5432 SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5433 }
5434
5435 /* Prepare data mask and range */
5436 if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5437 constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5438 data_mask = ~data_mask;
5439 }
5440 else {
5441 u8 lsb, width;
5442
5443 /* Extract lsb and width from mask */
5444 if (!data_mask)
5445 return DBG_STATUS_INVALID_ARGS;
5446
5447 for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++)
5448 data_mask >>= 1;
5449
5450 for (width = 0; width < 32 - lsb && (data_mask & 1); width++)
5451 data_mask >>= 1;
5452 if (data_mask)
5453 return DBG_STATUS_INVALID_ARGS;
5454 range = (lsb << 5) | (width - 1);
5455 }
5456
5457 /* Add constraint */
5458 ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5459 dev_data->bus.next_constraint_id,
5460 s_constraint_op_defs[constraint_op].hw_op_val,
5461 data_val, data_mask, frame_bit,
5462 compare_frame ? 0 : 1, dword_offset, range,
5463 s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5464 is_mandatory ? 1 : 0);
5465
5466 /* If first constraint, fill other 3 constraints with dummy constraints
5467 * that always match (using the same offset).
5468 */
5469 if (!dev_data->bus.next_constraint_id) {
5470 u8 i;
5471
5472 for (i = 1; i < MAX_CONSTRAINTS; i++)
5473 ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5474 i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5475 0, 1, dword_offset, 0, 0, 1);
5476 }
5477
5478 bus->next_constraint_id++;
5479
5480 return DBG_STATUS_OK;
5481 }
5482
5483 /* Configure the DBG block client mask */
ecore_config_dbg_block_client_mask(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5484 void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5485 struct ecore_ptt *p_ptt)
5486 {
5487 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5488 struct dbg_bus_data *bus = &dev_data->bus;
5489 u32 block_id, client_mask = 0;
5490 u8 storm_id;
5491
5492 /* Update client mask for Storm inputs */
5493 if (bus->num_enabled_storms)
5494 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5495 struct storm_defs *storm = &s_storm_defs[storm_id];
5496
5497 if (bus->storms[storm_id].enabled)
5498 client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5499 }
5500
5501 /* Update client mask for block inputs */
5502 if (bus->num_enabled_blocks) {
5503 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5504 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5505 struct block_defs *block = s_block_defs[block_id];
5506
5507 if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5508 client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5509 }
5510 }
5511
5512 /* Update client mask for GRC input */
5513 if (bus->grc_input_en)
5514 client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5515
5516 /* Update client mask for timestamp input */
5517 if (bus->timestamp_input_en)
5518 client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5519
5520 ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5521 }
5522
5523 /* Configure the DBG block framing mode */
ecore_config_dbg_block_framing_mode(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5524 enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5525 struct ecore_ptt *p_ptt)
5526 {
5527 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5528 struct dbg_bus_data *bus = &dev_data->bus;
5529 enum dbg_bus_frame_modes dbg_framing_mode;
5530 u32 block_id;
5531
5532 if (!bus->hw_dwords && bus->num_enabled_blocks) {
5533 struct dbg_bus_line *line_desc;
5534 u8 hw_dwords;
5535
5536 /* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5537 * (256-bit mode).
5538 */
5539 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5540 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5541
5542 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5543 continue;
5544
5545 line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5546 hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5547
5548 if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5549 return DBG_STATUS_NON_MATCHING_LINES;
5550
5551 /* The DBG block doesn't support triggers and
5552 * filters on 256b debug lines.
5553 */
5554 if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5555 return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5556
5557 bus->hw_dwords = hw_dwords;
5558 }
5559 }
5560
5561 switch (bus->hw_dwords) {
5562 case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5563 case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5564 case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5565 default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5566 }
5567 ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5568
5569 return DBG_STATUS_OK;
5570 }
5571
5572 /* Configure the DBG block Storm data */
ecore_config_storm_inputs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5573 enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5574 struct ecore_ptt *p_ptt)
5575 {
5576 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5577 struct dbg_bus_data *bus = &dev_data->bus;
5578 u8 storm_id, i, next_storm_id = 0;
5579 u32 storm_id_mask = 0;
5580
5581 /* Check if SEMI sync FIFO is empty */
5582 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5583 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5584 struct storm_defs *storm = &s_storm_defs[storm_id];
5585
5586 if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5587 return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5588 }
5589
5590 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5591 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5592
5593 if (storm_bus->enabled)
5594 storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5595 }
5596
5597 ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5598
5599 /* Disable storm stall if recording to internal buffer in one-shot */
5600 ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5601
5602 /* Configure calendar */
5603 for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5604
5605 /* Find next enabled Storm */
5606 for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5607
5608 /* Configure calendar slot */
5609 ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5610 }
5611
5612 return DBG_STATUS_OK;
5613 }
5614
5615 /* Assign HW ID to each dword/qword:
5616 * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5617 * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5618 * data synchronization issues. however, we need to check if there is a trigger
5619 * state for which more than one dword has a constraint. if there is, we cannot
5620 * assign a different HW ID to each dword (since a trigger state has a single
5621 * HW ID), so we assign a different HW ID to each block.
5622 */
ecore_assign_hw_ids(struct ecore_hwfn * p_hwfn,u8 hw_ids[VALUES_PER_CYCLE])5623 void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5624 u8 hw_ids[VALUES_PER_CYCLE])
5625 {
5626 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5627 struct dbg_bus_data *bus = &dev_data->bus;
5628 bool hw_id_per_dword = true;
5629 u8 val_id, state_id;
5630 u32 block_id;
5631
5632 OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5633
5634 if (bus->unify_inputs)
5635 return;
5636
5637 if (bus->trigger_en) {
5638 for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5639 u8 num_dwords = 0;
5640
5641 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5642 if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5643 num_dwords++;
5644
5645 if (num_dwords > 1)
5646 hw_id_per_dword = false;
5647 }
5648 }
5649
5650 if (hw_id_per_dword) {
5651
5652 /* Assign a different HW ID for each dword */
5653 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5654 hw_ids[val_id] = val_id;
5655 }
5656 else {
5657 u8 shifted_enable_mask, next_hw_id = 0;
5658
5659 /* Assign HW IDs according to blocks enable / */
5660 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5661 struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5662
5663 if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5664 continue;
5665
5666 block_bus->hw_id = next_hw_id++;
5667 if (!block_bus->hw_id)
5668 continue;
5669
5670 shifted_enable_mask =
5671 SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5672 VALUES_PER_CYCLE,
5673 GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5674
5675 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5676 if (shifted_enable_mask & (1 << val_id))
5677 hw_ids[val_id] = block_bus->hw_id;
5678 }
5679 }
5680 }
5681
5682 /* Configure the DBG block HW blocks data */
ecore_config_block_inputs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5683 void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5684 struct ecore_ptt *p_ptt)
5685 {
5686 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5687 struct dbg_bus_data *bus = &dev_data->bus;
5688 u8 hw_ids[VALUES_PER_CYCLE];
5689 u8 val_id, state_id;
5690
5691 ecore_assign_hw_ids(p_hwfn, hw_ids);
5692
5693 /* Assign a HW ID to each trigger state */
5694 if (dev_data->bus.trigger_en) {
5695 for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5696 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5697 u8 state_data = bus->trigger_states[state_id].data;
5698
5699 if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5700 ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5701 break;
5702 }
5703 }
5704 }
5705 }
5706
5707 /* Configure HW ID mask */
5708 dev_data->bus.hw_id_mask = 0;
5709 for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5710 bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5711 ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5712
5713 /* Configure additional K2 PCIE registers */
5714 if (dev_data->chip_id == CHIP_K2 &&
5715 (GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5716 GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5717 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5718 ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5719 }
5720 }
5721
ecore_dbg_bus_start(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5722 enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5723 struct ecore_ptt *p_ptt)
5724 {
5725 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5726 struct dbg_bus_data *bus = &dev_data->bus;
5727 enum dbg_bus_filter_types filter_type;
5728 enum dbg_status status;
5729 u32 block_id;
5730 u8 storm_id;
5731
5732 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5733
5734 if (bus->state != DBG_BUS_STATE_READY)
5735 return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5736
5737 /* Check if any input was enabled */
5738 if (!bus->num_enabled_storms &&
5739 !bus->num_enabled_blocks &&
5740 !bus->rcv_from_other_engine)
5741 return DBG_STATUS_NO_INPUT_ENABLED;
5742
5743 /* Check if too many input types were enabled (storm+dbgmux) */
5744 if (bus->num_enabled_storms && bus->num_enabled_blocks)
5745 return DBG_STATUS_TOO_MANY_INPUTS;
5746
5747 /* Configure framing mode */
5748 if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5749 return status;
5750
5751 /* Configure DBG block for Storm inputs */
5752 if (bus->num_enabled_storms)
5753 if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5754 return status;
5755
5756 /* Configure DBG block for block inputs */
5757 if (bus->num_enabled_blocks)
5758 ecore_config_block_inputs(p_hwfn, p_ptt);
5759
5760 /* Configure filter type */
5761 if (bus->filter_en) {
5762 if (bus->trigger_en) {
5763 if (bus->filter_pre_trigger)
5764 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5765 else
5766 filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5767 }
5768 else {
5769 filter_type = DBG_BUS_FILTER_TYPE_ON;
5770 }
5771 }
5772 else {
5773 filter_type = DBG_BUS_FILTER_TYPE_OFF;
5774 }
5775 ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5776
5777 /* Restart timestamp */
5778 ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5779
5780 /* Enable debug block */
5781 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5782
5783 /* Configure enabled blocks - must be done before the DBG block is
5784 * enabled.
5785 */
5786 if (dev_data->bus.num_enabled_blocks) {
5787 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5788 if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5789 continue;
5790
5791 ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5792 dev_data->bus.blocks[block_id].line_num,
5793 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5794 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5795 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5796 GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5797 }
5798 }
5799
5800 /* Configure client mask */
5801 ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5802
5803 /* Configure enabled Storms - must be done after the DBG block is
5804 * enabled.
5805 */
5806 if (dev_data->bus.num_enabled_storms)
5807 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5808 if (dev_data->bus.storms[storm_id].enabled)
5809 ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id, filter_type);
5810
5811 dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5812
5813 return DBG_STATUS_OK;
5814 }
5815
ecore_dbg_bus_stop(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)5816 enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5817 struct ecore_ptt *p_ptt)
5818 {
5819 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5820 struct dbg_bus_data *bus = &dev_data->bus;
5821 enum dbg_status status = DBG_STATUS_OK;
5822
5823 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5824
5825 if (bus->state != DBG_BUS_STATE_RECORDING)
5826 return DBG_STATUS_RECORDING_NOT_STARTED;
5827
5828 status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5829 if (status != DBG_STATUS_OK)
5830 return status;
5831
5832 ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5833
5834 OSAL_MSLEEP(FLUSH_DELAY_MS);
5835
5836 ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5837
5838 /* Check if trigger worked */
5839 if (bus->trigger_en) {
5840 u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5841
5842 if (trigger_state != MAX_TRIGGER_STATES)
5843 return DBG_STATUS_DATA_DIDNT_TRIGGER;
5844 }
5845
5846 bus->state = DBG_BUS_STATE_STOPPED;
5847
5848 return status;
5849 }
5850
ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)5851 enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5852 struct ecore_ptt *p_ptt,
5853 u32 *buf_size)
5854 {
5855 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5856 struct dbg_bus_data *bus = &dev_data->bus;
5857 enum dbg_status status;
5858
5859 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5860
5861 *buf_size = 0;
5862
5863 if (status != DBG_STATUS_OK)
5864 return status;
5865
5866 /* Add dump header */
5867 *buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5868
5869 switch (bus->target) {
5870 case DBG_BUS_TARGET_ID_INT_BUF:
5871 *buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5872 case DBG_BUS_TARGET_ID_PCI:
5873 *buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5874 default:
5875 break;
5876 }
5877
5878 /* Dump last section */
5879 *buf_size += ecore_dump_last_section(p_hwfn, OSAL_NULL, 0, false);
5880
5881 return DBG_STATUS_OK;
5882 }
5883
ecore_dbg_bus_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5884 enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5885 struct ecore_ptt *p_ptt,
5886 u32 *dump_buf,
5887 u32 buf_size_in_dwords,
5888 u32 *num_dumped_dwords)
5889 {
5890 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5891 u32 min_buf_size_in_dwords, block_id, offset = 0;
5892 struct dbg_bus_data *bus = &dev_data->bus;
5893 enum dbg_status status;
5894 u8 storm_id;
5895
5896 *num_dumped_dwords = 0;
5897
5898 status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5899 if (status != DBG_STATUS_OK)
5900 return status;
5901
5902 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5903
5904 if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5905 return DBG_STATUS_RECORDING_NOT_STARTED;
5906
5907 if (bus->state == DBG_BUS_STATE_RECORDING) {
5908 enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5909 if (stop_state != DBG_STATUS_OK)
5910 return stop_state;
5911 }
5912
5913 if (buf_size_in_dwords < min_buf_size_in_dwords)
5914 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5915
5916 if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5917 return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5918
5919 /* Dump header */
5920 offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5921
5922 /* Dump recorded data */
5923 if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5924 u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5925
5926 if (!recorded_dwords)
5927 return DBG_STATUS_NO_DATA_RECORDED;
5928 if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5929 return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5930 offset += recorded_dwords;
5931 }
5932
5933 /* Dump last section */
5934 offset += ecore_dump_last_section(p_hwfn, dump_buf, offset, true);
5935
5936 /* If recorded to PCI buffer - free the buffer */
5937 ecore_bus_free_pci_buf(p_hwfn);
5938
5939 /* Clear debug bus parameters */
5940 bus->state = DBG_BUS_STATE_IDLE;
5941 bus->num_enabled_blocks = 0;
5942 bus->num_enabled_storms = 0;
5943 bus->filter_en = bus->trigger_en = 0;
5944
5945 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5946 SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5947
5948 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5949 struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5950
5951 storm_bus->enabled = false;
5952 storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5953 }
5954
5955 *num_dumped_dwords = offset;
5956
5957 return DBG_STATUS_OK;
5958 }
5959
ecore_dbg_grc_config(struct ecore_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)5960 enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
5961 enum dbg_grc_params grc_param,
5962 u32 val)
5963 {
5964 int i;
5965
5966 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5967
5968 /* Initializes the GRC parameters (if not initialized). Needed in order
5969 * to set the default parameter values for the first time.
5970 */
5971 ecore_dbg_grc_init_params(p_hwfn);
5972
5973 if (grc_param >= MAX_DBG_GRC_PARAMS)
5974 return DBG_STATUS_INVALID_ARGS;
5975 if (val < s_grc_param_defs[grc_param].min ||
5976 val > s_grc_param_defs[grc_param].max)
5977 return DBG_STATUS_INVALID_ARGS;
5978
5979 if (s_grc_param_defs[grc_param].is_preset) {
5980
5981 /* Preset param */
5982
5983 /* Disabling a preset is not allowed. Call
5984 * dbg_grc_set_params_default instead.
5985 */
5986 if (!val)
5987 return DBG_STATUS_INVALID_ARGS;
5988
5989 /* Update all params with the preset values */
5990 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5991 u32 preset_val;
5992
5993 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5994 preset_val = s_grc_param_defs[i].exclude_all_preset_val;
5995 else if (grc_param == DBG_GRC_PARAM_CRASH)
5996 preset_val = s_grc_param_defs[i].crash_preset_val;
5997 else
5998 return DBG_STATUS_INVALID_ARGS;
5999
6000 ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6001 }
6002 }
6003 else {
6004
6005 /* Regular param - set its value */
6006 ecore_grc_set_param(p_hwfn, grc_param, val);
6007 }
6008
6009 return DBG_STATUS_OK;
6010 }
6011
6012 /* Assign default GRC param values */
ecore_dbg_grc_set_params_default(struct ecore_hwfn * p_hwfn)6013 void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6014 {
6015 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6016 u32 i;
6017
6018 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6019 dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6020 }
6021
ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6022 enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6023 struct ecore_ptt *p_ptt,
6024 u32 *buf_size)
6025 {
6026 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6027
6028 *buf_size = 0;
6029
6030 if (status != DBG_STATUS_OK)
6031 return status;
6032
6033 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6034 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6035 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6036
6037 return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6038 }
6039
ecore_dbg_grc_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6040 enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6041 struct ecore_ptt *p_ptt,
6042 u32 *dump_buf,
6043 u32 buf_size_in_dwords,
6044 u32 *num_dumped_dwords)
6045 {
6046 u32 needed_buf_size_in_dwords;
6047 enum dbg_status status;
6048
6049 *num_dumped_dwords = 0;
6050
6051 status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6052 if (status != DBG_STATUS_OK)
6053 return status;
6054
6055 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6056 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6057
6058 /* Doesn't do anything, needed for compile time asserts */
6059 ecore_static_asserts();
6060
6061 /* GRC Dump */
6062 status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6063
6064 /* Reveret GRC params to their default */
6065 ecore_dbg_grc_set_params_default(p_hwfn);
6066
6067 return status;
6068 }
6069
ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6070 enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6071 struct ecore_ptt *p_ptt,
6072 u32 *buf_size)
6073 {
6074 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6075 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6076 enum dbg_status status;
6077
6078 *buf_size = 0;
6079
6080 status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6081 if (status != DBG_STATUS_OK)
6082 return status;
6083
6084 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6085 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6086 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6087
6088 if (!idle_chk->buf_size_set) {
6089 idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6090 idle_chk->buf_size_set = true;
6091 }
6092
6093 *buf_size = idle_chk->buf_size;
6094
6095 return DBG_STATUS_OK;
6096 }
6097
ecore_dbg_idle_chk_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6098 enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6099 struct ecore_ptt *p_ptt,
6100 u32 *dump_buf,
6101 u32 buf_size_in_dwords,
6102 u32 *num_dumped_dwords)
6103 {
6104 u32 needed_buf_size_in_dwords;
6105 enum dbg_status status;
6106
6107 *num_dumped_dwords = 0;
6108
6109 status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6110 if (status != DBG_STATUS_OK)
6111 return status;
6112
6113 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6114 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6115
6116 /* Update reset state */
6117 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6118
6119 /* Idle Check Dump */
6120 *num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6121
6122 /* Reveret GRC params to their default */
6123 ecore_dbg_grc_set_params_default(p_hwfn);
6124
6125 return DBG_STATUS_OK;
6126 }
6127
ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6128 enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6129 struct ecore_ptt *p_ptt,
6130 u32 *buf_size)
6131 {
6132 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6133
6134 *buf_size = 0;
6135
6136 if (status != DBG_STATUS_OK)
6137 return status;
6138
6139 return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6140 }
6141
ecore_dbg_mcp_trace_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6142 enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6143 struct ecore_ptt *p_ptt,
6144 u32 *dump_buf,
6145 u32 buf_size_in_dwords,
6146 u32 *num_dumped_dwords)
6147 {
6148 u32 needed_buf_size_in_dwords;
6149 enum dbg_status status;
6150
6151 status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6152 if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6153 return status;
6154
6155 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6156 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6157
6158 /* Update reset state */
6159 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6160
6161 /* Perform dump */
6162 status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6163
6164 /* Reveret GRC params to their default */
6165 ecore_dbg_grc_set_params_default(p_hwfn);
6166
6167 return status;
6168 }
6169
ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6170 enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6171 struct ecore_ptt *p_ptt,
6172 u32 *buf_size)
6173 {
6174 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6175
6176 *buf_size = 0;
6177
6178 if (status != DBG_STATUS_OK)
6179 return status;
6180
6181 return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6182 }
6183
ecore_dbg_reg_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6184 enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6185 struct ecore_ptt *p_ptt,
6186 u32 *dump_buf,
6187 u32 buf_size_in_dwords,
6188 u32 *num_dumped_dwords)
6189 {
6190 u32 needed_buf_size_in_dwords;
6191 enum dbg_status status;
6192
6193 *num_dumped_dwords = 0;
6194
6195 status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6196 if (status != DBG_STATUS_OK)
6197 return status;
6198
6199 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6200 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6201
6202 /* Update reset state */
6203 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6204
6205 status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6206
6207 /* Reveret GRC params to their default */
6208 ecore_dbg_grc_set_params_default(p_hwfn);
6209
6210 return status;
6211 }
6212
ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6213 enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6214 struct ecore_ptt *p_ptt,
6215 u32 *buf_size)
6216 {
6217 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6218
6219 *buf_size = 0;
6220
6221 if (status != DBG_STATUS_OK)
6222 return status;
6223
6224 return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6225 }
6226
ecore_dbg_igu_fifo_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6227 enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6228 struct ecore_ptt *p_ptt,
6229 u32 *dump_buf,
6230 u32 buf_size_in_dwords,
6231 u32 *num_dumped_dwords)
6232 {
6233 u32 needed_buf_size_in_dwords;
6234 enum dbg_status status;
6235
6236 *num_dumped_dwords = 0;
6237
6238 status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6239 if (status != DBG_STATUS_OK)
6240 return status;
6241
6242 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6243 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6244
6245 /* Update reset state */
6246 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6247
6248 status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6249
6250 /* Reveret GRC params to their default */
6251 ecore_dbg_grc_set_params_default(p_hwfn);
6252
6253 return status;
6254 }
6255
ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6256 enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6257 struct ecore_ptt *p_ptt,
6258 u32 *buf_size)
6259 {
6260 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6261
6262 *buf_size = 0;
6263
6264 if (status != DBG_STATUS_OK)
6265 return status;
6266
6267 return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6268 }
6269
ecore_dbg_protection_override_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6270 enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6271 struct ecore_ptt *p_ptt,
6272 u32 *dump_buf,
6273 u32 buf_size_in_dwords,
6274 u32 *num_dumped_dwords)
6275 {
6276 u32 needed_buf_size_in_dwords;
6277 enum dbg_status status;
6278
6279 *num_dumped_dwords = 0;
6280
6281 status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6282 if (status != DBG_STATUS_OK)
6283 return status;
6284
6285 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6286 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6287
6288 /* Update reset state */
6289 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6290
6291 status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6292
6293 /* Reveret GRC params to their default */
6294 ecore_dbg_grc_set_params_default(p_hwfn);
6295
6296 return status;
6297 }
6298
ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * buf_size)6299 enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6300 struct ecore_ptt *p_ptt,
6301 u32 *buf_size)
6302 {
6303 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6304
6305 *buf_size = 0;
6306
6307 if (status != DBG_STATUS_OK)
6308 return status;
6309
6310 /* Update reset state */
6311 ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6312
6313 *buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6314
6315 return DBG_STATUS_OK;
6316 }
6317
ecore_dbg_fw_asserts_dump(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)6318 enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6319 struct ecore_ptt *p_ptt,
6320 u32 *dump_buf,
6321 u32 buf_size_in_dwords,
6322 u32 *num_dumped_dwords)
6323 {
6324 u32 needed_buf_size_in_dwords;
6325 enum dbg_status status;
6326
6327 *num_dumped_dwords = 0;
6328
6329 status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6330 if (status != DBG_STATUS_OK)
6331 return status;
6332
6333 if (buf_size_in_dwords < needed_buf_size_in_dwords)
6334 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6335
6336 *num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6337
6338 /* Reveret GRC params to their default */
6339 ecore_dbg_grc_set_params_default(p_hwfn);
6340
6341 return DBG_STATUS_OK;
6342 }
6343
ecore_dbg_read_attn(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id,enum dbg_attn_type attn_type,bool clear_status,struct dbg_attn_block_result * results)6344 enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6345 struct ecore_ptt *p_ptt,
6346 enum block_id block_id,
6347 enum dbg_attn_type attn_type,
6348 bool clear_status,
6349 struct dbg_attn_block_result *results)
6350 {
6351 enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6352 u8 reg_idx, num_attn_regs, num_result_regs = 0;
6353 const struct dbg_attn_reg *attn_reg_arr;
6354
6355 if (status != DBG_STATUS_OK)
6356 return status;
6357
6358 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6359 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6360
6361 attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6362
6363 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6364 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6365 struct dbg_attn_reg_result *reg_result;
6366 u32 sts_addr, sts_val;
6367 u16 modes_buf_offset;
6368 bool eval_mode;
6369
6370 /* Check mode */
6371 eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6372 modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6373 if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6374 continue;
6375
6376 /* Mode match - read attention status register */
6377 sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6378 sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6379 if (!sts_val)
6380 continue;
6381
6382 /* Non-zero attention status - add to results */
6383 reg_result = &results->reg_results[num_result_regs];
6384 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6385 SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6386 reg_result->block_attn_offset = reg_data->block_attn_offset;
6387 reg_result->sts_val = sts_val;
6388 reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6389 num_result_regs++;
6390 }
6391
6392 results->block_id = (u8)block_id;
6393 results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6394 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6395 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6396
6397 return DBG_STATUS_OK;
6398 }
6399
ecore_dbg_print_attn(struct ecore_hwfn * p_hwfn,struct dbg_attn_block_result * results)6400 enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6401 struct dbg_attn_block_result *results)
6402 {
6403 enum dbg_attn_type attn_type;
6404 u8 num_regs, i;
6405
6406 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6407 attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6408
6409 for (i = 0; i < num_regs; i++) {
6410 struct dbg_attn_reg_result *reg_result;
6411 const char *attn_type_str;
6412 u32 sts_addr;
6413
6414 reg_result = &results->reg_results[i];
6415 attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6416 sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6417 DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6418 }
6419
6420 return DBG_STATUS_OK;
6421 }
6422
ecore_is_block_in_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum block_id block_id)6423 bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6424 struct ecore_ptt *p_ptt,
6425 enum block_id block_id)
6426 {
6427 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6428 struct block_defs *block = s_block_defs[block_id];
6429 u32 reset_reg;
6430
6431 if (!block->has_reset_bit)
6432 return false;
6433
6434 reset_reg = block->reset_reg;
6435
6436 return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6437 !(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) : true;
6438 }
6439
6440