xref: /linux/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c (revision 54fcc7f6ec3944ae7c1b0246a999744e33839cdb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015-2018 Linaro Limited.
4  *
5  * Author: Tor Jeremiassen <tor@ti.com>
6  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7  */
8 
9 #include <asm/bug.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/zalloc.h>
14 #include <stdlib.h>
15 #include <opencsd/c_api/opencsd_c_api.h>
16 
17 #include "cs-etm.h"
18 #include "cs-etm-decoder.h"
19 #include "debug.h"
20 #include "intlist.h"
21 
22 /* use raw logging */
23 #ifdef CS_DEBUG_RAW
24 #define CS_LOG_RAW_FRAMES
25 #define CS_PKT_MON	1
26 #ifdef CS_RAW_PACKED
27 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
28 			    OCSD_DFRMTR_PACKED_RAW_OUT)
29 #else
30 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
31 #endif
32 #else
33 #define CS_PKT_MON	0
34 #endif
35 
36 /*
37  * Assume a maximum of 0.1ns elapsed per instruction. This would be the
38  * case with a theoretical 10GHz core executing 1 instruction per cycle.
39  * Used to estimate the sample time for synthesized instructions because
40  * Coresight only emits a timestamp for a range of instructions rather
41  * than per instruction.
42  */
43 const u32 INSTR_PER_NS = 10;
44 
45 struct cs_etm_decoder {
46 	void *data;
47 	void (*packet_printer)(const char *msg, void *data);
48 	bool suppress_printing;
49 	dcd_tree_handle_t dcd_tree;
50 	cs_etm_mem_cb_type mem_access;
51 	ocsd_datapath_resp_t prev_return;
52 	const char *decoder_name;
53 };
54 
55 static u32
56 cs_etm_decoder__mem_access(const void *context,
57 			   const ocsd_vaddr_t address,
58 			   const ocsd_mem_space_acc_t mem_space,
59 			   const u8 trace_chan_id,
60 			   const u32 req_size,
61 			   u8 *buffer)
62 {
63 	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
64 
65 	return decoder->mem_access(decoder->data, trace_chan_id, address,
66 				   req_size, buffer, mem_space);
67 }
68 
69 int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
70 				      u64 start, u64 end,
71 				      cs_etm_mem_cb_type cb_func)
72 {
73 	decoder->mem_access = cb_func;
74 
75 	if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
76 					       OCSD_MEM_SPACE_ANY,
77 					       cs_etm_decoder__mem_access,
78 					       decoder))
79 		return -1;
80 
81 	return 0;
82 }
83 
84 int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
85 {
86 	ocsd_datapath_resp_t dp_ret;
87 
88 	decoder->prev_return = OCSD_RESP_CONT;
89 	decoder->suppress_printing = true;
90 	dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
91 				      0, 0, NULL, NULL);
92 	decoder->suppress_printing = false;
93 	if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
94 		return -1;
95 
96 	return 0;
97 }
98 
99 int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
100 			       struct cs_etm_packet *packet)
101 {
102 	if (!packet_queue || !packet)
103 		return -EINVAL;
104 
105 	/* Nothing to do, might as well just return */
106 	if (packet_queue->packet_count == 0)
107 		return 0;
108 	/*
109 	 * The queueing process in function cs_etm_decoder__buffer_packet()
110 	 * increments the tail *before* using it.  This is somewhat counter
111 	 * intuitive but it has the advantage of centralizing tail management
112 	 * at a single location.  Because of that we need to follow the same
113 	 * heuristic with the head, i.e we increment it before using its
114 	 * value.  Otherwise the first element of the packet queue is not
115 	 * used.
116 	 */
117 	packet_queue->head = (packet_queue->head + 1) &
118 			     (CS_ETM_PACKET_MAX_BUFFER - 1);
119 
120 	*packet = packet_queue->packet_buffer[packet_queue->head];
121 
122 	packet_queue->packet_count--;
123 
124 	return 1;
125 }
126 
127 /*
128  * Calculate the number of nanoseconds elapsed.
129  *
130  * instr_count is updated in place with the remainder of the instructions
131  * which didn't make up a whole nanosecond.
132  */
133 static u32 cs_etm_decoder__dec_instr_count_to_ns(u32 *instr_count)
134 {
135 	const u32 instr_copy = *instr_count;
136 
137 	*instr_count %= INSTR_PER_NS;
138 	return instr_copy / INSTR_PER_NS;
139 }
140 
141 static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
142 					    ocsd_etmv3_cfg *config)
143 {
144 	config->reg_idr = params->etmv3.reg_idr;
145 	config->reg_ctrl = params->etmv3.reg_ctrl;
146 	config->reg_ccer = params->etmv3.reg_ccer;
147 	config->reg_trc_id = params->etmv3.reg_trc_id;
148 	config->arch_ver = ARCH_V7;
149 	config->core_prof = profile_CortexA;
150 
151 	return 0;
152 }
153 
154 #define TRCIDR1_TRCARCHMIN_SHIFT 4
155 #define TRCIDR1_TRCARCHMIN_MASK  GENMASK(7, 4)
156 #define TRCIDR1_TRCARCHMIN(x)    (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT)
157 
158 static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)
159 {
160 	/*
161 	 * For ETMv4 if the trace minor version is 4 or more then we can assume
162 	 * the architecture is ARCH_AA64 rather than just V8.
163 	 * ARCH_V8 = V8 architecture
164 	 * ARCH_AA64 = Min v8r3 plus additional AA64 PE features
165 	 */
166 	return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8;
167 }
168 
169 static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
170 					     ocsd_etmv4_cfg *config)
171 {
172 	config->reg_configr = params->etmv4.reg_configr;
173 	config->reg_traceidr = params->etmv4.reg_traceidr;
174 	config->reg_idr0 = params->etmv4.reg_idr0;
175 	config->reg_idr1 = params->etmv4.reg_idr1;
176 	config->reg_idr2 = params->etmv4.reg_idr2;
177 	config->reg_idr8 = params->etmv4.reg_idr8;
178 	config->reg_idr9 = 0;
179 	config->reg_idr10 = 0;
180 	config->reg_idr11 = 0;
181 	config->reg_idr12 = 0;
182 	config->reg_idr13 = 0;
183 	config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1);
184 	config->core_prof = profile_CortexA;
185 }
186 
187 static void cs_etm_decoder__gen_ete_config(struct cs_etm_trace_params *params,
188 					   ocsd_ete_cfg *config)
189 {
190 	config->reg_configr = params->ete.reg_configr;
191 	config->reg_traceidr = params->ete.reg_traceidr;
192 	config->reg_idr0 = params->ete.reg_idr0;
193 	config->reg_idr1 = params->ete.reg_idr1;
194 	config->reg_idr2 = params->ete.reg_idr2;
195 	config->reg_idr8 = params->ete.reg_idr8;
196 	config->reg_devarch = params->ete.reg_devarch;
197 	config->arch_ver = ARCH_AA64;
198 	config->core_prof = profile_CortexA;
199 }
200 
201 static void cs_etm_decoder__print_str_cb(const void *p_context,
202 					 const char *msg,
203 					 const int str_len)
204 {
205 	const struct cs_etm_decoder *decoder = p_context;
206 
207 	if (p_context && str_len && !decoder->suppress_printing)
208 		decoder->packet_printer(msg, decoder->data);
209 }
210 
211 static int
212 cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
213 					 struct cs_etm_decoder *decoder)
214 {
215 	int ret = 0;
216 
217 	if (d_params->packet_printer == NULL)
218 		return -1;
219 
220 	decoder->packet_printer = d_params->packet_printer;
221 
222 	/*
223 	 * Set up a library default logger to process any printers
224 	 * (packet/raw frame) we add later.
225 	 */
226 	ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
227 	if (ret != 0)
228 		return -1;
229 
230 	/* no stdout / err / file output */
231 	ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
232 	if (ret != 0)
233 		return -1;
234 
235 	/*
236 	 * Set the string CB for the default logger, passes strings to
237 	 * perf print logger.
238 	 */
239 	ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
240 					      (void *)decoder,
241 					      cs_etm_decoder__print_str_cb);
242 	if (ret != 0)
243 		return -1;
244 
245 #ifdef CS_LOG_RAW_FRAMES
246 	/*
247 	 * Only log raw frames if --dump operation and hardware is actually
248 	 * generating formatted CoreSight trace frames
249 	 */
250 	if ((d_params->operation == CS_ETM_OPERATION_PRINT) &&
251 	    (d_params->formatted == true)) {
252 		/* use the built in library printer for the raw frames */
253 		ret = ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
254 						    CS_RAW_DEBUG_FLAGS);
255 		if (ret != 0)
256 			return -1;
257 	}
258 #endif
259 	return 0;
260 }
261 
262 static ocsd_datapath_resp_t
263 cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
264 				  struct cs_etm_packet_queue *packet_queue,
265 				  const uint8_t trace_chan_id)
266 {
267 	u64 estimated_ts;
268 
269 	/* No timestamp packet has been received, nothing to do */
270 	if (!packet_queue->next_cs_timestamp)
271 		return OCSD_RESP_CONT;
272 
273 	estimated_ts = packet_queue->cs_timestamp +
274 			cs_etm_decoder__dec_instr_count_to_ns(&packet_queue->instr_count);
275 
276 	/* Estimated TS can never be higher than the next real one in the trace */
277 	packet_queue->cs_timestamp = min(packet_queue->next_cs_timestamp, estimated_ts);
278 
279 	/* Tell the front end which traceid_queue needs attention */
280 	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
281 
282 	return OCSD_RESP_WAIT;
283 }
284 
285 static ocsd_datapath_resp_t
286 cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
287 				  const ocsd_generic_trace_elem *elem,
288 				  const uint8_t trace_chan_id,
289 				  const ocsd_trc_index_t indx)
290 {
291 	struct cs_etm_packet_queue *packet_queue;
292 	u64 converted_timestamp;
293 	u64 estimated_first_ts;
294 
295 	/* First get the packet queue for this traceID */
296 	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
297 	if (!packet_queue)
298 		return OCSD_RESP_FATAL_SYS_ERR;
299 
300 	/*
301 	 * Coresight timestamps are raw timer values which need to be scaled to ns. Assume
302 	 * 0 is a bad value so don't try to convert it.
303 	 */
304 	converted_timestamp = elem->timestamp ?
305 				cs_etm__convert_sample_time(etmq, elem->timestamp) : 0;
306 
307 	/*
308 	 * We've seen a timestamp packet before - simply record the new value.
309 	 * Function do_soft_timestamp() will report the value to the front end,
310 	 * hence asking the decoder to keep decoding rather than stopping.
311 	 */
312 	if (packet_queue->next_cs_timestamp) {
313 		/*
314 		 * What was next is now where new ranges start from, overwriting
315 		 * any previous estimate in cs_timestamp
316 		 */
317 		packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
318 		packet_queue->next_cs_timestamp = converted_timestamp;
319 		return OCSD_RESP_CONT;
320 	}
321 
322 	if (!converted_timestamp) {
323 		/*
324 		 * Zero timestamps can be seen due to misconfiguration or hardware bugs.
325 		 * Warn once, and don't try to subtract instr_count as it would result in an
326 		 * underflow.
327 		 */
328 		packet_queue->cs_timestamp = 0;
329 		if (!cs_etm__etmq_is_timeless(etmq))
330 			pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
331 					". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n",
332 					indx);
333 
334 	} else if (packet_queue->instr_count / INSTR_PER_NS > converted_timestamp) {
335 		/*
336 		 * Sanity check that the elem->timestamp - packet_queue->instr_count would not
337 		 * result in an underflow. Warn and clamp at 0 if it would.
338 		 */
339 		packet_queue->cs_timestamp = 0;
340 		pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
341 	} else {
342 		/*
343 		 * This is the first timestamp we've seen since the beginning of traces
344 		 * or a discontinuity.  Since timestamps packets are generated *after*
345 		 * range packets have been generated, we need to estimate the time at
346 		 * which instructions started by subtracting the number of instructions
347 		 * executed to the timestamp. Don't estimate earlier than the last used
348 		 * timestamp though.
349 		 */
350 		estimated_first_ts = converted_timestamp -
351 					(packet_queue->instr_count / INSTR_PER_NS);
352 		packet_queue->cs_timestamp = max(packet_queue->cs_timestamp, estimated_first_ts);
353 	}
354 	packet_queue->next_cs_timestamp = converted_timestamp;
355 	packet_queue->instr_count = 0;
356 
357 	/* Tell the front end which traceid_queue needs attention */
358 	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
359 
360 	/* Halt processing until we are being told to proceed */
361 	return OCSD_RESP_WAIT;
362 }
363 
364 static void
365 cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
366 {
367 	packet_queue->next_cs_timestamp = 0;
368 	packet_queue->instr_count = 0;
369 }
370 
371 static ocsd_datapath_resp_t
372 cs_etm_decoder__buffer_packet(struct cs_etm_queue *etmq,
373 			      struct cs_etm_packet_queue *packet_queue,
374 			      const u8 trace_chan_id,
375 			      enum cs_etm_sample_type sample_type)
376 {
377 	u32 et = 0;
378 	int cpu;
379 
380 	if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
381 		return OCSD_RESP_FATAL_SYS_ERR;
382 
383 	if (cs_etm__get_cpu(etmq, trace_chan_id, &cpu) < 0)
384 		return OCSD_RESP_FATAL_SYS_ERR;
385 
386 	et = packet_queue->tail;
387 	et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
388 	packet_queue->tail = et;
389 	packet_queue->packet_count++;
390 
391 	packet_queue->packet_buffer[et].sample_type = sample_type;
392 	packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
393 	packet_queue->packet_buffer[et].cpu = cpu;
394 	packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
395 	packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
396 	packet_queue->packet_buffer[et].instr_count = 0;
397 	packet_queue->packet_buffer[et].last_instr_taken_branch = false;
398 	packet_queue->packet_buffer[et].last_instr_size = 0;
399 	packet_queue->packet_buffer[et].last_instr_type = 0;
400 	packet_queue->packet_buffer[et].last_instr_subtype = 0;
401 	packet_queue->packet_buffer[et].last_instr_cond = 0;
402 	packet_queue->packet_buffer[et].flags = 0;
403 	packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
404 	packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
405 
406 	if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
407 		return OCSD_RESP_WAIT;
408 
409 	return OCSD_RESP_CONT;
410 }
411 
412 static ocsd_datapath_resp_t
413 cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
414 			     struct cs_etm_packet_queue *packet_queue,
415 			     const ocsd_generic_trace_elem *elem,
416 			     const uint8_t trace_chan_id)
417 {
418 	int ret = 0;
419 	struct cs_etm_packet *packet;
420 
421 	ret = cs_etm_decoder__buffer_packet(etmq, packet_queue, trace_chan_id,
422 					    CS_ETM_RANGE);
423 	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
424 		return ret;
425 
426 	packet = &packet_queue->packet_buffer[packet_queue->tail];
427 
428 	switch (elem->isa) {
429 	case ocsd_isa_aarch64:
430 		packet->isa = CS_ETM_ISA_A64;
431 		break;
432 	case ocsd_isa_arm:
433 		packet->isa = CS_ETM_ISA_A32;
434 		break;
435 	case ocsd_isa_thumb2:
436 		packet->isa = CS_ETM_ISA_T32;
437 		break;
438 	case ocsd_isa_tee:
439 	case ocsd_isa_jazelle:
440 	case ocsd_isa_custom:
441 	case ocsd_isa_unknown:
442 	default:
443 		packet->isa = CS_ETM_ISA_UNKNOWN;
444 	}
445 
446 	packet->start_addr = elem->st_addr;
447 	packet->end_addr = elem->en_addr;
448 	packet->instr_count = elem->num_instr_range;
449 	packet->last_instr_type = elem->last_i_type;
450 	packet->last_instr_subtype = elem->last_i_subtype;
451 	packet->last_instr_cond = elem->last_instr_cond;
452 
453 	if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
454 		packet->last_instr_taken_branch = elem->last_instr_exec;
455 	else
456 		packet->last_instr_taken_branch = false;
457 
458 	packet->last_instr_size = elem->last_instr_sz;
459 
460 	/* per-thread scenario, no need to generate a timestamp */
461 	if (cs_etm__etmq_is_timeless(etmq))
462 		goto out;
463 
464 	/*
465 	 * The packet queue is full and we haven't seen a timestamp (had we
466 	 * seen one the packet queue wouldn't be full).  Let the front end
467 	 * deal with it.
468 	 */
469 	if (ret == OCSD_RESP_WAIT)
470 		goto out;
471 
472 	packet_queue->instr_count += elem->num_instr_range;
473 	/* Tell the front end we have a new timestamp to process */
474 	ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
475 						trace_chan_id);
476 out:
477 	return ret;
478 }
479 
480 static ocsd_datapath_resp_t
481 cs_etm_decoder__buffer_discontinuity(struct cs_etm_queue *etmq,
482 				     struct cs_etm_packet_queue *queue,
483 				     const uint8_t trace_chan_id)
484 {
485 	/*
486 	 * Something happened and who knows when we'll get new traces so
487 	 * reset time statistics.
488 	 */
489 	cs_etm_decoder__reset_timestamp(queue);
490 	return cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
491 					     CS_ETM_DISCONTINUITY);
492 }
493 
494 static ocsd_datapath_resp_t
495 cs_etm_decoder__buffer_exception(struct cs_etm_queue *etmq,
496 				 struct cs_etm_packet_queue *queue,
497 				 const ocsd_generic_trace_elem *elem,
498 				 const uint8_t trace_chan_id)
499 {	int ret = 0;
500 	struct cs_etm_packet *packet;
501 
502 	ret = cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
503 					    CS_ETM_EXCEPTION);
504 	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
505 		return ret;
506 
507 	packet = &queue->packet_buffer[queue->tail];
508 	packet->exception_number = elem->exception_number;
509 
510 	return ret;
511 }
512 
513 static ocsd_datapath_resp_t
514 cs_etm_decoder__buffer_exception_ret(struct cs_etm_queue *etmq,
515 				     struct cs_etm_packet_queue *queue,
516 				     const uint8_t trace_chan_id)
517 {
518 	return cs_etm_decoder__buffer_packet(etmq, queue, trace_chan_id,
519 					     CS_ETM_EXCEPTION_RET);
520 }
521 
522 static ocsd_datapath_resp_t
523 cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
524 			struct cs_etm_packet_queue *packet_queue,
525 			const ocsd_generic_trace_elem *elem,
526 			const uint8_t trace_chan_id)
527 {
528 	pid_t tid = -1;
529 
530 	/*
531 	 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
532 	 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
533 	 * as VMID, Format attribute 'contextid2' is set in this case.
534 	 */
535 	switch (cs_etm__get_pid_fmt(etmq)) {
536 	case CS_ETM_PIDFMT_CTXTID:
537 		if (elem->context.ctxt_id_valid)
538 			tid = elem->context.context_id;
539 		break;
540 	case CS_ETM_PIDFMT_CTXTID2:
541 		if (elem->context.vmid_valid)
542 			tid = elem->context.vmid;
543 		break;
544 	case CS_ETM_PIDFMT_NONE:
545 	default:
546 		break;
547 	}
548 
549 	if (cs_etm__etmq_set_tid_el(etmq, tid, trace_chan_id,
550 				    elem->context.exception_level))
551 		return OCSD_RESP_FATAL_SYS_ERR;
552 
553 	if (tid == -1)
554 		return OCSD_RESP_CONT;
555 
556 	/*
557 	 * A timestamp is generated after a PE_CONTEXT element so make sure
558 	 * to rely on that coming one.
559 	 */
560 	cs_etm_decoder__reset_timestamp(packet_queue);
561 
562 	return OCSD_RESP_CONT;
563 }
564 
565 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
566 				const void *context,
567 				const ocsd_trc_index_t indx,
568 				const u8 trace_chan_id __maybe_unused,
569 				const ocsd_generic_trace_elem *elem)
570 {
571 	ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
572 	ocsd_gen_trc_elem_t type;
573 	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
574 	struct cs_etm_queue *etmq = decoder->data;
575 	struct cs_etm_packet_queue *packet_queue;
576 
577 	/* First get the packet queue for this traceID */
578 	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
579 	if (!packet_queue)
580 		return OCSD_RESP_FATAL_SYS_ERR;
581 
582 	type = elem->elem_type;
583 
584 	if (type == OCSD_GEN_TRC_ELEM_EO_TRACE ||
585 	    type == OCSD_GEN_TRC_ELEM_NO_SYNC ||
586 	    type == OCSD_GEN_TRC_ELEM_TRACE_ON)
587 		resp = cs_etm_decoder__buffer_discontinuity(etmq, packet_queue,
588 							    trace_chan_id);
589 	else if (type == OCSD_GEN_TRC_ELEM_INSTR_RANGE)
590 		resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
591 						    trace_chan_id);
592 	else if (type == OCSD_GEN_TRC_ELEM_EXCEPTION)
593 		resp = cs_etm_decoder__buffer_exception(etmq, packet_queue, elem,
594 							trace_chan_id);
595 	else if (type == OCSD_GEN_TRC_ELEM_EXCEPTION_RET)
596 		resp = cs_etm_decoder__buffer_exception_ret(etmq, packet_queue,
597 							    trace_chan_id);
598 	else if (type == OCSD_GEN_TRC_ELEM_TIMESTAMP)
599 		resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
600 							 trace_chan_id,
601 							 indx);
602 	else if (type == OCSD_GEN_TRC_ELEM_PE_CONTEXT)
603 		resp = cs_etm_decoder__set_tid(etmq, packet_queue,
604 					       elem, trace_chan_id);
605 
606 	return resp;
607 }
608 
609 static int
610 cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
611 				   struct cs_etm_trace_params *t_params,
612 				   struct cs_etm_decoder *decoder)
613 {
614 	ocsd_etmv3_cfg config_etmv3;
615 	ocsd_etmv4_cfg trace_config_etmv4;
616 	ocsd_ete_cfg trace_config_ete;
617 	void *trace_config;
618 	u8 csid;
619 
620 	switch (t_params->protocol) {
621 	case CS_ETM_PROTO_ETMV3:
622 	case CS_ETM_PROTO_PTM:
623 		csid = (t_params->etmv3.reg_idr & CORESIGHT_TRACE_ID_VAL_MASK);
624 		cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
625 		decoder->decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
626 							OCSD_BUILTIN_DCD_ETMV3 :
627 							OCSD_BUILTIN_DCD_PTM;
628 		trace_config = &config_etmv3;
629 		break;
630 	case CS_ETM_PROTO_ETMV4i:
631 		csid = (t_params->etmv4.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
632 		cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
633 		decoder->decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
634 		trace_config = &trace_config_etmv4;
635 		break;
636 	case CS_ETM_PROTO_ETE:
637 		csid = (t_params->ete.reg_traceidr & CORESIGHT_TRACE_ID_VAL_MASK);
638 		cs_etm_decoder__gen_ete_config(t_params, &trace_config_ete);
639 		decoder->decoder_name = OCSD_BUILTIN_DCD_ETE;
640 		trace_config = &trace_config_ete;
641 		break;
642 	default:
643 		return -1;
644 	}
645 
646 	if (d_params->operation == CS_ETM_OPERATION_DECODE) {
647 		int decode_flags = OCSD_CREATE_FLG_FULL_DECODER;
648 #ifdef OCSD_OPFLG_N_UNCOND_DIR_BR_CHK
649 		decode_flags |= OCSD_OPFLG_N_UNCOND_DIR_BR_CHK | OCSD_OPFLG_CHK_RANGE_CONTINUE |
650 				ETM4_OPFLG_PKTDEC_AA64_OPCODE_CHK;
651 #endif
652 		if (ocsd_dt_create_decoder(decoder->dcd_tree,
653 					   decoder->decoder_name,
654 					   decode_flags,
655 					   trace_config, &csid))
656 			return -1;
657 
658 		if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
659 					       cs_etm_decoder__gen_trace_elem_printer,
660 					       decoder))
661 			return -1;
662 
663 		return 0;
664 	} else if (d_params->operation == CS_ETM_OPERATION_PRINT) {
665 		if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder->decoder_name,
666 					   OCSD_CREATE_FLG_PACKET_PROC,
667 					   trace_config, &csid))
668 			return -1;
669 
670 		if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, CS_PKT_MON))
671 			return -1;
672 
673 		return 0;
674 	}
675 
676 	return -1;
677 }
678 
679 struct cs_etm_decoder *
680 cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
681 		    struct cs_etm_trace_params t_params[])
682 {
683 	struct cs_etm_decoder *decoder;
684 	ocsd_dcd_tree_src_t format;
685 	u32 flags;
686 	int i, ret;
687 
688 	if ((!t_params) || (!d_params))
689 		return NULL;
690 
691 	decoder = zalloc(sizeof(*decoder));
692 
693 	if (!decoder)
694 		return NULL;
695 
696 	decoder->data = d_params->data;
697 	decoder->prev_return = OCSD_RESP_CONT;
698 	format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
699 					 OCSD_TRC_SRC_SINGLE);
700 	flags = 0;
701 	flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
702 	flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
703 	flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
704 
705 	/*
706 	 * Drivers may add barrier frames when used with perf, set up to
707 	 * handle this. Barriers const of FSYNC packet repeated 4 times.
708 	 */
709 	flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
710 
711 	/* Create decode tree for the data source */
712 	decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
713 
714 	if (decoder->dcd_tree == 0)
715 		goto err_free_decoder;
716 
717 	/* init library print logging support */
718 	ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
719 	if (ret != 0)
720 		goto err_free_decoder;
721 
722 	for (i = 0; i < decoders; i++) {
723 		ret = cs_etm_decoder__create_etm_decoder(d_params,
724 							 &t_params[i],
725 							 decoder);
726 		if (ret != 0)
727 			goto err_free_decoder;
728 	}
729 
730 	return decoder;
731 
732 err_free_decoder:
733 	cs_etm_decoder__free(decoder);
734 	return NULL;
735 }
736 
737 int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
738 				       u64 indx, const u8 *buf,
739 				       size_t len, size_t *consumed)
740 {
741 	int ret = 0;
742 	ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
743 	ocsd_datapath_resp_t prev_return = decoder->prev_return;
744 	size_t processed = 0;
745 	u32 count;
746 
747 	while (processed < len) {
748 		if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
749 			cur = ocsd_dt_process_data(decoder->dcd_tree,
750 						   OCSD_OP_FLUSH,
751 						   0,
752 						   0,
753 						   NULL,
754 						   NULL);
755 		} else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
756 			cur = ocsd_dt_process_data(decoder->dcd_tree,
757 						   OCSD_OP_DATA,
758 						   indx + processed,
759 						   len - processed,
760 						   &buf[processed],
761 						   &count);
762 			processed += count;
763 		} else {
764 			ret = -EINVAL;
765 			break;
766 		}
767 
768 		/*
769 		 * Return to the input code if the packet buffer is full.
770 		 * Flushing will get done once the packet buffer has been
771 		 * processed.
772 		 */
773 		if (OCSD_DATA_RESP_IS_WAIT(cur))
774 			break;
775 
776 		prev_return = cur;
777 	}
778 
779 	decoder->prev_return = cur;
780 	*consumed = processed;
781 
782 	return ret;
783 }
784 
785 void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
786 {
787 	if (!decoder)
788 		return;
789 
790 	ocsd_destroy_dcd_tree(decoder->dcd_tree);
791 	decoder->dcd_tree = NULL;
792 	free(decoder);
793 }
794 
795 const char *cs_etm_decoder__get_name(struct cs_etm_decoder *decoder)
796 {
797 	return decoder->decoder_name;
798 }
799