xref: /freebsd/contrib/processor-trace/libipt/src/pt_insn_decoder.c (revision 85f87cf491bec6f90948a85b10f5523ea24db9e3)
1 /*
2  * Copyright (c) 2013-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "pt_insn_decoder.h"
30 #include "pt_insn.h"
31 #include "pt_config.h"
32 #include "pt_asid.h"
33 #include "pt_compiler.h"
34 
35 #include "intel-pt.h"
36 
37 #include <string.h>
38 #include <stdlib.h>
39 
40 
41 static int pt_insn_check_ip_event(struct pt_insn_decoder *,
42 				  const struct pt_insn *,
43 				  const struct pt_insn_ext *);
44 
45 
pt_insn_reset(struct pt_insn_decoder * decoder)46 static void pt_insn_reset(struct pt_insn_decoder *decoder)
47 {
48 	if (!decoder)
49 		return;
50 
51 	decoder->mode = ptem_unknown;
52 	decoder->ip = 0ull;
53 	decoder->status = 0;
54 	decoder->enabled = 0;
55 	decoder->process_event = 0;
56 	decoder->speculative = 0;
57 	decoder->process_insn = 0;
58 	decoder->bound_paging = 0;
59 	decoder->bound_vmcs = 0;
60 	decoder->bound_ptwrite = 0;
61 
62 	pt_retstack_init(&decoder->retstack);
63 	pt_asid_init(&decoder->asid);
64 }
65 
pt_insn_status(const struct pt_insn_decoder * decoder,int flags)66 static int pt_insn_status(const struct pt_insn_decoder *decoder, int flags)
67 {
68 	int status;
69 
70 	if (!decoder)
71 		return -pte_internal;
72 
73 	status = decoder->status;
74 
75 	/* Indicate whether tracing is disabled or enabled.
76 	 *
77 	 * This duplicates the indication in struct pt_insn and covers the case
78 	 * where we indicate the status after synchronizing.
79 	 */
80 	if (!decoder->enabled)
81 		flags |= pts_ip_suppressed;
82 
83 	/* Forward end-of-trace indications.
84 	 *
85 	 * Postpone it as long as we're still processing events, though.
86 	 */
87 	if ((status & pts_eos) && !decoder->process_event)
88 		flags |= pts_eos;
89 
90 	return flags;
91 }
92 
93 /* Initialize the query decoder flags based on our flags. */
94 
pt_insn_init_qry_flags(struct pt_conf_flags * qflags,const struct pt_conf_flags * flags)95 static int pt_insn_init_qry_flags(struct pt_conf_flags *qflags,
96 				  const struct pt_conf_flags *flags)
97 {
98 	if (!qflags || !flags)
99 		return -pte_internal;
100 
101 	memset(qflags, 0, sizeof(*qflags));
102 	qflags->variant.query.keep_tcal_on_ovf =
103 		flags->variant.insn.keep_tcal_on_ovf;
104 
105 	return 0;
106 }
107 
pt_insn_decoder_init(struct pt_insn_decoder * decoder,const struct pt_config * uconfig)108 int pt_insn_decoder_init(struct pt_insn_decoder *decoder,
109 			 const struct pt_config *uconfig)
110 {
111 	struct pt_config config;
112 	int errcode;
113 
114 	if (!decoder)
115 		return -pte_internal;
116 
117 	errcode = pt_config_from_user(&config, uconfig);
118 	if (errcode < 0)
119 		return errcode;
120 
121 	/* The user supplied decoder flags. */
122 	decoder->flags = config.flags;
123 
124 	/* Set the flags we need for the query decoder we use. */
125 	errcode = pt_insn_init_qry_flags(&config.flags, &decoder->flags);
126 	if (errcode < 0)
127 		return errcode;
128 
129 	errcode = pt_qry_decoder_init(&decoder->query, &config);
130 	if (errcode < 0)
131 		return errcode;
132 
133 	pt_image_init(&decoder->default_image, NULL);
134 	decoder->image = &decoder->default_image;
135 
136 	errcode = pt_msec_cache_init(&decoder->scache);
137 	if (errcode < 0)
138 		return errcode;
139 
140 	pt_insn_reset(decoder);
141 
142 	return 0;
143 }
144 
pt_insn_decoder_fini(struct pt_insn_decoder * decoder)145 void pt_insn_decoder_fini(struct pt_insn_decoder *decoder)
146 {
147 	if (!decoder)
148 		return;
149 
150 	pt_msec_cache_fini(&decoder->scache);
151 	pt_image_fini(&decoder->default_image);
152 	pt_qry_decoder_fini(&decoder->query);
153 }
154 
pt_insn_alloc_decoder(const struct pt_config * config)155 struct pt_insn_decoder *pt_insn_alloc_decoder(const struct pt_config *config)
156 {
157 	struct pt_insn_decoder *decoder;
158 	int errcode;
159 
160 	decoder = malloc(sizeof(*decoder));
161 	if (!decoder)
162 		return NULL;
163 
164 	errcode = pt_insn_decoder_init(decoder, config);
165 	if (errcode < 0) {
166 		free(decoder);
167 		return NULL;
168 	}
169 
170 	return decoder;
171 }
172 
pt_insn_free_decoder(struct pt_insn_decoder * decoder)173 void pt_insn_free_decoder(struct pt_insn_decoder *decoder)
174 {
175 	if (!decoder)
176 		return;
177 
178 	pt_insn_decoder_fini(decoder);
179 	free(decoder);
180 }
181 
182 /* Maybe synthesize a tick event.
183  *
184  * If we're not already processing events, check the current time against the
185  * last event's time.  If it changed, synthesize a tick event with the new time.
186  *
187  * Returns zero if no tick event has been created.
188  * Returns a positive integer if a tick event has been created.
189  * Returns a negative error code otherwise.
190  */
pt_insn_tick(struct pt_insn_decoder * decoder,uint64_t ip)191 static int pt_insn_tick(struct pt_insn_decoder *decoder, uint64_t ip)
192 {
193 	struct pt_event *ev;
194 	uint64_t tsc;
195 	uint32_t lost_mtc, lost_cyc;
196 	int errcode;
197 
198 	if (!decoder)
199 		return -pte_internal;
200 
201 	/* We're not generating tick events if tracing is disabled. */
202 	if (!decoder->enabled)
203 		return -pte_internal;
204 
205 	/* Events already provide a timestamp so there is no need to synthesize
206 	 * an artificial tick event.  There's no room, either, since this would
207 	 * overwrite the in-progress event.
208 	 *
209 	 * In rare cases where we need to proceed to an event location using
210 	 * trace this may cause us to miss a timing update if the event is not
211 	 * forwarded to the user.
212 	 *
213 	 * The only case I can come up with at the moment is a MODE.EXEC binding
214 	 * to the TIP IP of a far branch.
215 	 */
216 	if (decoder->process_event)
217 		return 0;
218 
219 	errcode = pt_qry_time(&decoder->query, &tsc, &lost_mtc, &lost_cyc);
220 	if (errcode < 0) {
221 		/* If we don't have wall-clock time, we use relative time. */
222 		if (errcode != -pte_no_time)
223 			return errcode;
224 	}
225 
226 	ev = &decoder->event;
227 
228 	/* We're done if time has not changed since the last event. */
229 	if (tsc == ev->tsc)
230 		return 0;
231 
232 	/* Time has changed so we create a new tick event. */
233 	memset(ev, 0, sizeof(*ev));
234 	ev->type = ptev_tick;
235 	ev->variant.tick.ip = ip;
236 
237 	/* Indicate if we have wall-clock time or only relative time. */
238 	if (errcode != -pte_no_time)
239 		ev->has_tsc = 1;
240 	ev->tsc = tsc;
241 	ev->lost_mtc = lost_mtc;
242 	ev->lost_cyc = lost_cyc;
243 
244 	/* We now have an event to process. */
245 	decoder->process_event = 1;
246 
247 	return 1;
248 }
249 
250 /* Query an indirect branch.
251  *
252  * Returns zero on success, a negative error code otherwise.
253  */
pt_insn_indirect_branch(struct pt_insn_decoder * decoder,uint64_t * ip)254 static int pt_insn_indirect_branch(struct pt_insn_decoder *decoder,
255 				   uint64_t *ip)
256 {
257 	uint64_t evip;
258 	int status, errcode;
259 
260 	if (!decoder)
261 		return -pte_internal;
262 
263 	evip = decoder->ip;
264 
265 	status = pt_qry_indirect_branch(&decoder->query, ip);
266 	if (status < 0)
267 		return status;
268 
269 	if (decoder->flags.variant.insn.enable_tick_events) {
270 		errcode = pt_insn_tick(decoder, evip);
271 		if (errcode < 0)
272 			return errcode;
273 	}
274 
275 	return status;
276 }
277 
278 /* Query a conditional branch.
279  *
280  * Returns zero on success, a negative error code otherwise.
281  */
pt_insn_cond_branch(struct pt_insn_decoder * decoder,int * taken)282 static int pt_insn_cond_branch(struct pt_insn_decoder *decoder, int *taken)
283 {
284 	int status, errcode;
285 
286 	if (!decoder)
287 		return -pte_internal;
288 
289 	status = pt_qry_cond_branch(&decoder->query, taken);
290 	if (status < 0)
291 		return status;
292 
293 	if (decoder->flags.variant.insn.enable_tick_events) {
294 		errcode = pt_insn_tick(decoder, decoder->ip);
295 		if (errcode < 0)
296 			return errcode;
297 	}
298 
299 	return status;
300 }
301 
pt_insn_start(struct pt_insn_decoder * decoder,int status)302 static int pt_insn_start(struct pt_insn_decoder *decoder, int status)
303 {
304 	if (!decoder)
305 		return -pte_internal;
306 
307 	if (status < 0)
308 		return status;
309 
310 	decoder->status = status;
311 
312 	if (!(status & pts_ip_suppressed))
313 		decoder->enabled = 1;
314 
315 	/* Process any initial events.
316 	 *
317 	 * Some events are processed after proceeding to the next IP in order to
318 	 * indicate things like tracing disable or trace stop in the preceding
319 	 * instruction.  Those events will be processed without such an
320 	 * indication before decoding the current instruction.
321 	 *
322 	 * We do this already here so we can indicate user-events that precede
323 	 * the first instruction.
324 	 */
325 	return pt_insn_check_ip_event(decoder, NULL, NULL);
326 }
327 
pt_insn_sync_forward(struct pt_insn_decoder * decoder)328 int pt_insn_sync_forward(struct pt_insn_decoder *decoder)
329 {
330 	int status;
331 
332 	if (!decoder)
333 		return -pte_invalid;
334 
335 	pt_insn_reset(decoder);
336 
337 	status = pt_qry_sync_forward(&decoder->query, &decoder->ip);
338 
339 	return pt_insn_start(decoder, status);
340 }
341 
pt_insn_sync_backward(struct pt_insn_decoder * decoder)342 int pt_insn_sync_backward(struct pt_insn_decoder *decoder)
343 {
344 	int status;
345 
346 	if (!decoder)
347 		return -pte_invalid;
348 
349 	pt_insn_reset(decoder);
350 
351 	status = pt_qry_sync_backward(&decoder->query, &decoder->ip);
352 
353 	return pt_insn_start(decoder, status);
354 }
355 
pt_insn_sync_set(struct pt_insn_decoder * decoder,uint64_t offset)356 int pt_insn_sync_set(struct pt_insn_decoder *decoder, uint64_t offset)
357 {
358 	int status;
359 
360 	if (!decoder)
361 		return -pte_invalid;
362 
363 	pt_insn_reset(decoder);
364 
365 	status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset);
366 
367 	return pt_insn_start(decoder, status);
368 }
369 
pt_insn_get_offset(const struct pt_insn_decoder * decoder,uint64_t * offset)370 int pt_insn_get_offset(const struct pt_insn_decoder *decoder, uint64_t *offset)
371 {
372 	if (!decoder)
373 		return -pte_invalid;
374 
375 	return pt_qry_get_offset(&decoder->query, offset);
376 }
377 
pt_insn_get_sync_offset(const struct pt_insn_decoder * decoder,uint64_t * offset)378 int pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
379 			    uint64_t *offset)
380 {
381 	if (!decoder)
382 		return -pte_invalid;
383 
384 	return pt_qry_get_sync_offset(&decoder->query, offset);
385 }
386 
pt_insn_get_image(struct pt_insn_decoder * decoder)387 struct pt_image *pt_insn_get_image(struct pt_insn_decoder *decoder)
388 {
389 	if (!decoder)
390 		return NULL;
391 
392 	return decoder->image;
393 }
394 
pt_insn_set_image(struct pt_insn_decoder * decoder,struct pt_image * image)395 int pt_insn_set_image(struct pt_insn_decoder *decoder,
396 		      struct pt_image *image)
397 {
398 	if (!decoder)
399 		return -pte_invalid;
400 
401 	if (!image)
402 		image = &decoder->default_image;
403 
404 	decoder->image = image;
405 	return 0;
406 }
407 
408 const struct pt_config *
pt_insn_get_config(const struct pt_insn_decoder * decoder)409 pt_insn_get_config(const struct pt_insn_decoder *decoder)
410 {
411 	if (!decoder)
412 		return NULL;
413 
414 	return pt_qry_get_config(&decoder->query);
415 }
416 
pt_insn_time(struct pt_insn_decoder * decoder,uint64_t * time,uint32_t * lost_mtc,uint32_t * lost_cyc)417 int pt_insn_time(struct pt_insn_decoder *decoder, uint64_t *time,
418 		 uint32_t *lost_mtc, uint32_t *lost_cyc)
419 {
420 	if (!decoder || !time)
421 		return -pte_invalid;
422 
423 	return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc);
424 }
425 
pt_insn_core_bus_ratio(struct pt_insn_decoder * decoder,uint32_t * cbr)426 int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder, uint32_t *cbr)
427 {
428 	if (!decoder || !cbr)
429 		return -pte_invalid;
430 
431 	return pt_qry_core_bus_ratio(&decoder->query, cbr);
432 }
433 
pt_insn_asid(const struct pt_insn_decoder * decoder,struct pt_asid * asid,size_t size)434 int pt_insn_asid(const struct pt_insn_decoder *decoder, struct pt_asid *asid,
435 		 size_t size)
436 {
437 	if (!decoder || !asid)
438 		return -pte_invalid;
439 
440 	return pt_asid_to_user(asid, &decoder->asid, size);
441 }
442 
event_pending(struct pt_insn_decoder * decoder)443 static inline int event_pending(struct pt_insn_decoder *decoder)
444 {
445 	int status;
446 
447 	if (!decoder)
448 		return -pte_invalid;
449 
450 	if (decoder->process_event)
451 		return 1;
452 
453 	status = decoder->status;
454 	if (!(status & pts_event_pending))
455 		return 0;
456 
457 	status = pt_qry_event(&decoder->query, &decoder->event,
458 			      sizeof(decoder->event));
459 	if (status < 0)
460 		return status;
461 
462 	decoder->process_event = 1;
463 	decoder->status = status;
464 	return 1;
465 }
466 
check_erratum_skd022(struct pt_insn_decoder * decoder)467 static int check_erratum_skd022(struct pt_insn_decoder *decoder)
468 {
469 	struct pt_insn_ext iext;
470 	struct pt_insn insn;
471 	int errcode;
472 
473 	if (!decoder)
474 		return -pte_internal;
475 
476 	insn.mode = decoder->mode;
477 	insn.ip = decoder->ip;
478 
479 	errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
480 	if (errcode < 0)
481 		return 0;
482 
483 	switch (iext.iclass) {
484 	default:
485 		return 0;
486 
487 	case PTI_INST_VMLAUNCH:
488 	case PTI_INST_VMRESUME:
489 		return 1;
490 	}
491 }
492 
handle_erratum_skd022(struct pt_insn_decoder * decoder)493 static inline int handle_erratum_skd022(struct pt_insn_decoder *decoder)
494 {
495 	struct pt_event *ev;
496 	uint64_t ip;
497 	int errcode;
498 
499 	if (!decoder)
500 		return -pte_internal;
501 
502 	errcode = check_erratum_skd022(decoder);
503 	if (errcode <= 0)
504 		return errcode;
505 
506 	/* We turn the async disable into a sync disable.  It will be processed
507 	 * after decoding the instruction.
508 	 */
509 	ev = &decoder->event;
510 
511 	ip = ev->variant.async_disabled.ip;
512 
513 	ev->type = ptev_disabled;
514 	ev->variant.disabled.ip = ip;
515 
516 	return 1;
517 }
518 
pt_insn_proceed(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)519 static int pt_insn_proceed(struct pt_insn_decoder *decoder,
520 			   const struct pt_insn *insn,
521 			   const struct pt_insn_ext *iext)
522 {
523 	if (!decoder || !insn || !iext)
524 		return -pte_internal;
525 
526 	/* Branch displacements apply to the next instruction. */
527 	decoder->ip += insn->size;
528 
529 	/* We handle non-branches, non-taken conditional branches, and
530 	 * compressed returns directly in the switch and do some pre-work for
531 	 * calls.
532 	 *
533 	 * All kinds of branches are handled below the switch.
534 	 */
535 	switch (insn->iclass) {
536 	case ptic_ptwrite:
537 	case ptic_other:
538 		return 0;
539 
540 	case ptic_cond_jump: {
541 		int status, taken;
542 
543 		status = pt_insn_cond_branch(decoder, &taken);
544 		if (status < 0)
545 			return status;
546 
547 		decoder->status = status;
548 		if (!taken)
549 			return 0;
550 
551 		break;
552 	}
553 
554 	case ptic_call:
555 		/* Log the call for return compression.
556 		 *
557 		 * Unless this is a call to the next instruction as is used
558 		 * for position independent code.
559 		 */
560 		if (iext->variant.branch.displacement ||
561 		    !iext->variant.branch.is_direct)
562 			pt_retstack_push(&decoder->retstack, decoder->ip);
563 
564 		break;
565 
566 	case ptic_return: {
567 		int taken, status;
568 
569 		/* Check for a compressed return. */
570 		status = pt_insn_cond_branch(decoder, &taken);
571 		if (status >= 0) {
572 			decoder->status = status;
573 
574 			/* A compressed return is indicated by a taken
575 			 * conditional branch.
576 			 */
577 			if (!taken)
578 				return -pte_bad_retcomp;
579 
580 			return pt_retstack_pop(&decoder->retstack,
581 					       &decoder->ip);
582 		}
583 
584 		break;
585 	}
586 
587 	case ptic_jump:
588 	case ptic_far_call:
589 	case ptic_far_return:
590 	case ptic_far_jump:
591 		break;
592 
593 	case ptic_error:
594 		return -pte_bad_insn;
595 	}
596 
597 	/* Process a direct or indirect branch.
598 	 *
599 	 * This combines calls, uncompressed returns, taken conditional jumps,
600 	 * and all flavors of far transfers.
601 	 */
602 	if (iext->variant.branch.is_direct)
603 		decoder->ip += (uint64_t) (int64_t)
604 			iext->variant.branch.displacement;
605 	else {
606 		int status;
607 
608 		status = pt_insn_indirect_branch(decoder, &decoder->ip);
609 
610 		if (status < 0)
611 			return status;
612 
613 		decoder->status = status;
614 
615 		/* We do need an IP to proceed. */
616 		if (status & pts_ip_suppressed)
617 			return -pte_noip;
618 	}
619 
620 	return 0;
621 }
622 
pt_insn_at_skl014(const struct pt_event * ev,const struct pt_insn * insn,const struct pt_insn_ext * iext,const struct pt_config * config)623 static int pt_insn_at_skl014(const struct pt_event *ev,
624 			     const struct pt_insn *insn,
625 			     const struct pt_insn_ext *iext,
626 			     const struct pt_config *config)
627 {
628 	uint64_t ip;
629 	int status;
630 
631 	if (!ev || !insn || !iext || !config)
632 		return -pte_internal;
633 
634 	if (!ev->ip_suppressed)
635 		return 0;
636 
637 	switch (insn->iclass) {
638 	case ptic_call:
639 	case ptic_jump:
640 		/* The erratum only applies to unconditional direct branches. */
641 		if (!iext->variant.branch.is_direct)
642 			break;
643 
644 		/* Check the filter against the branch target. */
645 		ip = insn->ip;
646 		ip += insn->size;
647 		ip += (uint64_t) (int64_t) iext->variant.branch.displacement;
648 
649 		status = pt_filter_addr_check(&config->addr_filter, ip);
650 		if (status <= 0) {
651 			if (status < 0)
652 				return status;
653 
654 			return 1;
655 		}
656 		break;
657 
658 	default:
659 		break;
660 	}
661 
662 	return 0;
663 }
664 
pt_insn_at_disabled_event(const struct pt_event * ev,const struct pt_insn * insn,const struct pt_insn_ext * iext,const struct pt_config * config)665 static int pt_insn_at_disabled_event(const struct pt_event *ev,
666 				     const struct pt_insn *insn,
667 				     const struct pt_insn_ext *iext,
668 				     const struct pt_config *config)
669 {
670 	if (!ev || !insn || !iext || !config)
671 		return -pte_internal;
672 
673 	if (ev->ip_suppressed) {
674 		if (pt_insn_is_far_branch(insn, iext) ||
675 		    pt_insn_changes_cpl(insn, iext) ||
676 		    pt_insn_changes_cr3(insn, iext))
677 			return 1;
678 
679 		/* If we don't have a filter configuration we assume that no
680 		 * address filters were used and the erratum does not apply.
681 		 *
682 		 * We might otherwise disable tracing too early.
683 		 */
684 		if (config->addr_filter.config.addr_cfg &&
685 		    config->errata.skl014 &&
686 		    pt_insn_at_skl014(ev, insn, iext, config))
687 			return 1;
688 	} else {
689 		switch (insn->iclass) {
690 		case ptic_ptwrite:
691 		case ptic_other:
692 			break;
693 
694 		case ptic_call:
695 		case ptic_jump:
696 			/* If we got an IP with the disabled event, we may
697 			 * ignore direct branches that go to a different IP.
698 			 */
699 			if (iext->variant.branch.is_direct) {
700 				uint64_t ip;
701 
702 				ip = insn->ip;
703 				ip += insn->size;
704 				ip += (uint64_t) (int64_t)
705 					iext->variant.branch.displacement;
706 
707 				if (ip != ev->variant.disabled.ip)
708 					break;
709 			}
710 
711 			fallthrough;
712 		case ptic_return:
713 		case ptic_far_call:
714 		case ptic_far_return:
715 		case ptic_far_jump:
716 		case ptic_cond_jump:
717 			return 1;
718 
719 		case ptic_error:
720 			return -pte_bad_insn;
721 		}
722 	}
723 
724 	return 0;
725 }
726 
727 /* Postpone proceeding past @insn/@iext and indicate a pending event.
728  *
729  * There may be further events pending on @insn/@iext.  Postpone proceeding past
730  * @insn/@iext until we processed all events that bind to it.
731  *
732  * Returns a non-negative pt_status_flag bit-vector indicating a pending event
733  * on success, a negative pt_error_code otherwise.
734  */
pt_insn_postpone(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)735 static int pt_insn_postpone(struct pt_insn_decoder *decoder,
736 			    const struct pt_insn *insn,
737 			    const struct pt_insn_ext *iext)
738 {
739 	if (!decoder || !insn || !iext)
740 		return -pte_internal;
741 
742 	if (!decoder->process_insn) {
743 		decoder->process_insn = 1;
744 		decoder->insn = *insn;
745 		decoder->iext = *iext;
746 	}
747 
748 	return pt_insn_status(decoder, pts_event_pending);
749 }
750 
751 /* Remove any postponed instruction from @decoder.
752  *
753  * Returns zero on success, a negative pt_error_code otherwise.
754  */
pt_insn_clear_postponed(struct pt_insn_decoder * decoder)755 static int pt_insn_clear_postponed(struct pt_insn_decoder *decoder)
756 {
757 	if (!decoder)
758 		return -pte_internal;
759 
760 	decoder->process_insn = 0;
761 	decoder->bound_paging = 0;
762 	decoder->bound_vmcs = 0;
763 	decoder->bound_ptwrite = 0;
764 
765 	return 0;
766 }
767 
768 /* Proceed past a postponed instruction.
769  *
770  * Returns zero on success, a negative pt_error_code otherwise.
771  */
pt_insn_proceed_postponed(struct pt_insn_decoder * decoder)772 static int pt_insn_proceed_postponed(struct pt_insn_decoder *decoder)
773 {
774 	int status;
775 
776 	if (!decoder)
777 		return -pte_internal;
778 
779 	if (!decoder->process_insn)
780 		return -pte_internal;
781 
782 	/* There's nothing to do if tracing got disabled. */
783 	if (!decoder->enabled)
784 		return pt_insn_clear_postponed(decoder);
785 
786 	status = pt_insn_proceed(decoder, &decoder->insn, &decoder->iext);
787 	if (status < 0)
788 		return status;
789 
790 	return pt_insn_clear_postponed(decoder);
791 }
792 
793 /* Check for events that bind to instruction.
794  *
795  * Check whether an event is pending that binds to @insn/@iext, and, if that is
796  * the case, proceed past @insn/@iext and indicate the event by setting
797  * pts_event_pending.
798  *
799  * If that is not the case, we return zero.  This is what pt_insn_status() would
800  * return since:
801  *
802  *   - we suppress pts_eos as long as we're processing events
803  *   - we do not set pts_ip_suppressed since tracing must be enabled
804  *
805  * Returns a non-negative pt_status_flag bit-vector on success, a negative error
806  * code otherwise.
807  */
pt_insn_check_insn_event(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)808 static int pt_insn_check_insn_event(struct pt_insn_decoder *decoder,
809 				    const struct pt_insn *insn,
810 				    const struct pt_insn_ext *iext)
811 {
812 	struct pt_event *ev;
813 	int status;
814 
815 	if (!decoder)
816 		return -pte_internal;
817 
818 	status = event_pending(decoder);
819 	if (status <= 0)
820 		return status;
821 
822 	ev = &decoder->event;
823 	switch (ev->type) {
824 	case ptev_enabled:
825 	case ptev_overflow:
826 	case ptev_async_paging:
827 	case ptev_async_vmcs:
828 	case ptev_async_disabled:
829 	case ptev_async_branch:
830 	case ptev_exec_mode:
831 	case ptev_tsx:
832 	case ptev_stop:
833 	case ptev_exstop:
834 	case ptev_mwait:
835 	case ptev_pwre:
836 	case ptev_pwrx:
837 	case ptev_tick:
838 	case ptev_cbr:
839 	case ptev_mnt:
840 		/* We're only interested in events that bind to instructions. */
841 		return 0;
842 
843 	case ptev_disabled:
844 		status = pt_insn_at_disabled_event(ev, insn, iext,
845 						   &decoder->query.config);
846 		if (status <= 0)
847 			return status;
848 
849 		/* We're at a synchronous disable event location.
850 		 *
851 		 * Let's determine the IP at which we expect tracing to resume.
852 		 */
853 		status = pt_insn_next_ip(&decoder->ip, insn, iext);
854 		if (status < 0) {
855 			/* We don't know the IP on error. */
856 			decoder->ip = 0ull;
857 
858 			/* For indirect calls, assume that we return to the next
859 			 * instruction.
860 			 *
861 			 * We only check the instruction class, not the
862 			 * is_direct property, since direct calls would have
863 			 * been handled by pt_insn_nex_ip() or would have
864 			 * provoked a different error.
865 			 */
866 			if (status != -pte_bad_query)
867 				return status;
868 
869 			switch (insn->iclass) {
870 			case ptic_call:
871 			case ptic_far_call:
872 				decoder->ip = insn->ip + insn->size;
873 				break;
874 
875 			default:
876 				break;
877 			}
878 		}
879 
880 		break;
881 
882 	case ptev_paging:
883 		/* We bind at most one paging event to an instruction. */
884 		if (decoder->bound_paging)
885 			return 0;
886 
887 		if (!pt_insn_binds_to_pip(insn, iext))
888 			return 0;
889 
890 		/* We bound a paging event.  Make sure we do not bind further
891 		 * paging events to this instruction.
892 		 */
893 		decoder->bound_paging = 1;
894 
895 		return pt_insn_postpone(decoder, insn, iext);
896 
897 	case ptev_vmcs:
898 		/* We bind at most one vmcs event to an instruction. */
899 		if (decoder->bound_vmcs)
900 			return 0;
901 
902 		if (!pt_insn_binds_to_vmcs(insn, iext))
903 			return 0;
904 
905 		/* We bound a vmcs event.  Make sure we do not bind further vmcs
906 		 * events to this instruction.
907 		 */
908 		decoder->bound_vmcs = 1;
909 
910 		return pt_insn_postpone(decoder, insn, iext);
911 
912 	case ptev_ptwrite:
913 		/* We bind at most one ptwrite event to an instruction. */
914 		if (decoder->bound_ptwrite)
915 			return 0;
916 
917 		if (ev->ip_suppressed) {
918 			if (!pt_insn_is_ptwrite(insn, iext))
919 				return 0;
920 
921 			/* Fill in the event IP.  Our users will need them to
922 			 * make sense of the PTWRITE payload.
923 			 */
924 			ev->variant.ptwrite.ip = decoder->ip;
925 			ev->ip_suppressed = 0;
926 		} else {
927 			/* The ptwrite event contains the IP of the ptwrite
928 			 * instruction (CLIP) unlike most events that contain
929 			 * the IP of the first instruction that did not complete
930 			 * (NLIP).
931 			 *
932 			 * It's easier to handle this case here, as well.
933 			 */
934 			if (decoder->ip != ev->variant.ptwrite.ip)
935 				return 0;
936 		}
937 
938 		/* We bound a ptwrite event.  Make sure we do not bind further
939 		 * ptwrite events to this instruction.
940 		 */
941 		decoder->bound_ptwrite = 1;
942 
943 		return pt_insn_postpone(decoder, insn, iext);
944 	}
945 
946 	return pt_insn_status(decoder, pts_event_pending);
947 }
948 
949 enum {
950 	/* The maximum number of steps to take when determining whether the
951 	 * event location can be reached.
952 	 */
953 	bdm64_max_steps	= 0x100
954 };
955 
956 /* Try to work around erratum BDM64.
957  *
958  * If we got a transaction abort immediately following a branch that produced
959  * trace, the trace for that branch might have been corrupted.
960  *
961  * Returns a positive integer if the erratum was handled.
962  * Returns zero if the erratum does not seem to apply.
963  * Returns a negative error code otherwise.
964  */
handle_erratum_bdm64(struct pt_insn_decoder * decoder,const struct pt_event * ev,const struct pt_insn * insn,const struct pt_insn_ext * iext)965 static int handle_erratum_bdm64(struct pt_insn_decoder *decoder,
966 				const struct pt_event *ev,
967 				const struct pt_insn *insn,
968 				const struct pt_insn_ext *iext)
969 {
970 	int status;
971 
972 	if (!decoder || !ev || !insn || !iext)
973 		return -pte_internal;
974 
975 	/* This only affects aborts. */
976 	if (!ev->variant.tsx.aborted)
977 		return 0;
978 
979 	/* This only affects branches. */
980 	if (!pt_insn_is_branch(insn, iext))
981 		return 0;
982 
983 	/* Let's check if we can reach the event location from here.
984 	 *
985 	 * If we can, let's assume the erratum did not hit.  We might still be
986 	 * wrong but we're not able to tell.
987 	 */
988 	status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip,
989 					     decoder->mode, decoder->image,
990 					     &decoder->asid, bdm64_max_steps);
991 	if (status > 0)
992 		return 0;
993 
994 	/* We can't reach the event location.  This could either mean that we
995 	 * stopped too early (and status is zero) or that the erratum hit.
996 	 *
997 	 * We assume the latter and pretend that the previous branch brought us
998 	 * to the event location, instead.
999 	 */
1000 	decoder->ip = ev->variant.tsx.ip;
1001 
1002 	return 1;
1003 }
1004 
1005 /* Check whether a peek TSX event should be postponed.
1006  *
1007  * This involves handling erratum BDM64.
1008  *
1009  * Returns a positive integer if the event is to be postponed.
1010  * Returns zero if the event should be processed.
1011  * Returns a negative error code otherwise.
1012  */
pt_insn_postpone_tsx(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext,const struct pt_event * ev)1013 static inline int pt_insn_postpone_tsx(struct pt_insn_decoder *decoder,
1014 				       const struct pt_insn *insn,
1015 				       const struct pt_insn_ext *iext,
1016 				       const struct pt_event *ev)
1017 {
1018 	int status;
1019 
1020 	if (!decoder || !ev)
1021 		return -pte_internal;
1022 
1023 	if (ev->ip_suppressed)
1024 		return 0;
1025 
1026 	if (insn && iext && decoder->query.config.errata.bdm64) {
1027 		status = handle_erratum_bdm64(decoder, ev, insn, iext);
1028 		if (status < 0)
1029 			return status;
1030 	}
1031 
1032 	if (decoder->ip != ev->variant.tsx.ip)
1033 		return 1;
1034 
1035 	return 0;
1036 }
1037 
1038 /* Check for events that bind to an IP.
1039  *
1040  * Check whether an event is pending that binds to @decoder->ip, and, if that is
1041  * the case, indicate the event by setting pt_pts_event_pending.
1042  *
1043  * Returns a non-negative pt_status_flag bit-vector on success, a negative error
1044  * code otherwise.
1045  */
pt_insn_check_ip_event(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)1046 static int pt_insn_check_ip_event(struct pt_insn_decoder *decoder,
1047 				  const struct pt_insn *insn,
1048 				  const struct pt_insn_ext *iext)
1049 {
1050 	struct pt_event *ev;
1051 	int status;
1052 
1053 	if (!decoder)
1054 		return -pte_internal;
1055 
1056 	status = event_pending(decoder);
1057 	if (status <= 0) {
1058 		if (status < 0)
1059 			return status;
1060 
1061 		return pt_insn_status(decoder, 0);
1062 	}
1063 
1064 	ev = &decoder->event;
1065 	switch (ev->type) {
1066 	case ptev_disabled:
1067 		break;
1068 
1069 	case ptev_enabled:
1070 		return pt_insn_status(decoder, pts_event_pending);
1071 
1072 	case ptev_async_disabled:
1073 		if (ev->variant.async_disabled.at != decoder->ip)
1074 			break;
1075 
1076 		if (decoder->query.config.errata.skd022) {
1077 			int errcode;
1078 
1079 			errcode = handle_erratum_skd022(decoder);
1080 			if (errcode != 0) {
1081 				if (errcode < 0)
1082 					return errcode;
1083 
1084 				/* If the erratum applies, we postpone the
1085 				 * modified event to the next call to
1086 				 * pt_insn_next().
1087 				 */
1088 				break;
1089 			}
1090 		}
1091 
1092 		return pt_insn_status(decoder, pts_event_pending);
1093 
1094 	case ptev_tsx:
1095 		status = pt_insn_postpone_tsx(decoder, insn, iext, ev);
1096 		if (status != 0) {
1097 			if (status < 0)
1098 				return status;
1099 
1100 			break;
1101 		}
1102 
1103 		return pt_insn_status(decoder, pts_event_pending);
1104 
1105 	case ptev_async_branch:
1106 		if (ev->variant.async_branch.from != decoder->ip)
1107 			break;
1108 
1109 		return pt_insn_status(decoder, pts_event_pending);
1110 
1111 	case ptev_overflow:
1112 		return pt_insn_status(decoder, pts_event_pending);
1113 
1114 	case ptev_exec_mode:
1115 		if (!ev->ip_suppressed &&
1116 		    ev->variant.exec_mode.ip != decoder->ip)
1117 			break;
1118 
1119 		return pt_insn_status(decoder, pts_event_pending);
1120 
1121 	case ptev_paging:
1122 		if (decoder->enabled)
1123 			break;
1124 
1125 		return pt_insn_status(decoder, pts_event_pending);
1126 
1127 	case ptev_async_paging:
1128 		if (!ev->ip_suppressed &&
1129 		    ev->variant.async_paging.ip != decoder->ip)
1130 			break;
1131 
1132 		return pt_insn_status(decoder, pts_event_pending);
1133 
1134 	case ptev_vmcs:
1135 		if (decoder->enabled)
1136 			break;
1137 
1138 		return pt_insn_status(decoder, pts_event_pending);
1139 
1140 	case ptev_async_vmcs:
1141 		if (!ev->ip_suppressed &&
1142 		    ev->variant.async_vmcs.ip != decoder->ip)
1143 			break;
1144 
1145 		return pt_insn_status(decoder, pts_event_pending);
1146 
1147 	case ptev_stop:
1148 		return pt_insn_status(decoder, pts_event_pending);
1149 
1150 	case ptev_exstop:
1151 		if (!ev->ip_suppressed && decoder->enabled &&
1152 		    decoder->ip != ev->variant.exstop.ip)
1153 			break;
1154 
1155 		return pt_insn_status(decoder, pts_event_pending);
1156 
1157 	case ptev_mwait:
1158 		if (!ev->ip_suppressed && decoder->enabled &&
1159 		    decoder->ip != ev->variant.mwait.ip)
1160 			break;
1161 
1162 		return pt_insn_status(decoder, pts_event_pending);
1163 
1164 	case ptev_pwre:
1165 	case ptev_pwrx:
1166 		return pt_insn_status(decoder, pts_event_pending);
1167 
1168 	case ptev_ptwrite:
1169 		/* Any event binding to the current PTWRITE instruction is
1170 		 * handled in pt_insn_check_insn_event().
1171 		 *
1172 		 * Any subsequent ptwrite event binds to a different instruction
1173 		 * and must wait until the next iteration - as long as tracing
1174 		 * is enabled.
1175 		 *
1176 		 * When tracing is disabled, we forward all ptwrite events
1177 		 * immediately to the user.
1178 		 */
1179 		if (decoder->enabled)
1180 			break;
1181 
1182 		return pt_insn_status(decoder, pts_event_pending);
1183 
1184 	case ptev_tick:
1185 	case ptev_cbr:
1186 	case ptev_mnt:
1187 		return pt_insn_status(decoder, pts_event_pending);
1188 	}
1189 
1190 	return pt_insn_status(decoder, 0);
1191 }
1192 
insn_to_user(struct pt_insn * uinsn,size_t size,const struct pt_insn * insn)1193 static inline int insn_to_user(struct pt_insn *uinsn, size_t size,
1194 			       const struct pt_insn *insn)
1195 {
1196 	if (!uinsn || !insn)
1197 		return -pte_internal;
1198 
1199 	if (uinsn == insn)
1200 		return 0;
1201 
1202 	/* Zero out any unknown bytes. */
1203 	if (sizeof(*insn) < size) {
1204 		memset(uinsn + sizeof(*insn), 0, size - sizeof(*insn));
1205 
1206 		size = sizeof(*insn);
1207 	}
1208 
1209 	memcpy(uinsn, insn, size);
1210 
1211 	return 0;
1212 }
1213 
pt_insn_decode_cached(struct pt_insn_decoder * decoder,const struct pt_mapped_section * msec,struct pt_insn * insn,struct pt_insn_ext * iext)1214 static int pt_insn_decode_cached(struct pt_insn_decoder *decoder,
1215 				 const struct pt_mapped_section *msec,
1216 				 struct pt_insn *insn, struct pt_insn_ext *iext)
1217 {
1218 	int status;
1219 
1220 	if (!decoder || !insn || !iext)
1221 		return -pte_internal;
1222 
1223 	/* Try reading the memory containing @insn from the cached section.  If
1224 	 * that fails, if we don't have a cached section, or if decode fails
1225 	 * later on, fall back to decoding @insn from @decoder->image.
1226 	 *
1227 	 * The latter will also handle truncated instructions that cross section
1228 	 * boundaries.
1229 	 */
1230 
1231 	if (!msec)
1232 		return pt_insn_decode(insn, iext, decoder->image,
1233 				      &decoder->asid);
1234 
1235 	status = pt_msec_read(msec, insn->raw, sizeof(insn->raw), insn->ip);
1236 	if (status < 0) {
1237 		if (status != -pte_nomap)
1238 			return status;
1239 
1240 		return pt_insn_decode(insn, iext, decoder->image,
1241 				      &decoder->asid);
1242 	}
1243 
1244 	/* We initialize @insn->size to the maximal possible size.  It will be
1245 	 * set to the actual size during instruction decode.
1246 	 */
1247 	insn->size = (uint8_t) status;
1248 
1249 	status = pt_ild_decode(insn, iext);
1250 	if (status < 0) {
1251 		if (status != -pte_bad_insn)
1252 			return status;
1253 
1254 		return pt_insn_decode(insn, iext, decoder->image,
1255 				      &decoder->asid);
1256 	}
1257 
1258 	return status;
1259 }
1260 
pt_insn_msec_lookup(struct pt_insn_decoder * decoder,const struct pt_mapped_section ** pmsec)1261 static int pt_insn_msec_lookup(struct pt_insn_decoder *decoder,
1262 			       const struct pt_mapped_section **pmsec)
1263 {
1264 	struct pt_msec_cache *scache;
1265 	struct pt_image *image;
1266 	uint64_t ip;
1267 	int isid;
1268 
1269 	if (!decoder || !pmsec)
1270 		return -pte_internal;
1271 
1272 	scache = &decoder->scache;
1273 	image = decoder->image;
1274 	ip = decoder->ip;
1275 
1276 	isid = pt_msec_cache_read(scache, pmsec, image, ip);
1277 	if (isid < 0) {
1278 		if (isid != -pte_nomap)
1279 			return isid;
1280 
1281 		return pt_msec_cache_fill(scache, pmsec, image,
1282 					  &decoder->asid, ip);
1283 	}
1284 
1285 	return isid;
1286 }
1287 
pt_insn_next(struct pt_insn_decoder * decoder,struct pt_insn * uinsn,size_t size)1288 int pt_insn_next(struct pt_insn_decoder *decoder, struct pt_insn *uinsn,
1289 		 size_t size)
1290 {
1291 	const struct pt_mapped_section *msec;
1292 	struct pt_insn_ext iext;
1293 	struct pt_insn insn, *pinsn;
1294 	int status, isid;
1295 
1296 	if (!uinsn || !decoder)
1297 		return -pte_invalid;
1298 
1299 	/* Tracing must be enabled.
1300 	 *
1301 	 * If it isn't we should be processing events until we either run out of
1302 	 * trace or process a tracing enabled event.
1303 	 */
1304 	if (!decoder->enabled) {
1305 		if (decoder->status & pts_eos)
1306 			return -pte_eos;
1307 
1308 		return -pte_no_enable;
1309 	}
1310 
1311 	pinsn = size == sizeof(insn) ? uinsn : &insn;
1312 
1313 	/* Zero-initialize the instruction in case of error returns. */
1314 	memset(pinsn, 0, sizeof(*pinsn));
1315 
1316 	/* Fill in a few things from the current decode state.
1317 	 *
1318 	 * This reflects the state of the last pt_insn_next(), pt_insn_event()
1319 	 * or pt_insn_start() call.
1320 	 */
1321 	if (decoder->speculative)
1322 		pinsn->speculative = 1;
1323 	pinsn->ip = decoder->ip;
1324 	pinsn->mode = decoder->mode;
1325 
1326 	isid = pt_insn_msec_lookup(decoder, &msec);
1327 	if (isid < 0) {
1328 		if (isid != -pte_nomap)
1329 			return isid;
1330 
1331 		msec = NULL;
1332 	}
1333 
1334 	/* We set an incorrect isid if @msec is NULL.  This will be corrected
1335 	 * when we read the memory from the image later on.
1336 	 */
1337 	pinsn->isid = isid;
1338 
1339 	status = pt_insn_decode_cached(decoder, msec, pinsn, &iext);
1340 	if (status < 0) {
1341 		/* Provide the incomplete instruction - the IP and mode fields
1342 		 * are valid and may help diagnose the error.
1343 		 */
1344 		(void) insn_to_user(uinsn, size, pinsn);
1345 		return status;
1346 	}
1347 
1348 	/* Provide the decoded instruction to the user.  It won't change during
1349 	 * event processing.
1350 	 */
1351 	status = insn_to_user(uinsn, size, pinsn);
1352 	if (status < 0)
1353 		return status;
1354 
1355 	/* Check for events that bind to the current instruction.
1356 	 *
1357 	 * If an event is indicated, we're done.
1358 	 */
1359 	status = pt_insn_check_insn_event(decoder, pinsn, &iext);
1360 	if (status != 0) {
1361 		if (status < 0)
1362 			return status;
1363 
1364 		if (status & pts_event_pending)
1365 			return status;
1366 	}
1367 
1368 	/* Determine the next instruction's IP. */
1369 	status = pt_insn_proceed(decoder, pinsn, &iext);
1370 	if (status < 0)
1371 		return status;
1372 
1373 	/* Indicate events that bind to the new IP.
1374 	 *
1375 	 * Although we only look at the IP for binding events, we pass the
1376 	 * decoded instruction in order to handle errata.
1377 	 */
1378 	return pt_insn_check_ip_event(decoder, pinsn, &iext);
1379 }
1380 
pt_insn_process_enabled(struct pt_insn_decoder * decoder)1381 static int pt_insn_process_enabled(struct pt_insn_decoder *decoder)
1382 {
1383 	struct pt_event *ev;
1384 
1385 	if (!decoder)
1386 		return -pte_internal;
1387 
1388 	ev = &decoder->event;
1389 
1390 	/* This event can't be a status update. */
1391 	if (ev->status_update)
1392 		return -pte_bad_context;
1393 
1394 	/* We must have an IP in order to start decoding. */
1395 	if (ev->ip_suppressed)
1396 		return -pte_noip;
1397 
1398 	/* We must currently be disabled. */
1399 	if (decoder->enabled)
1400 		return -pte_bad_context;
1401 
1402 	decoder->ip = ev->variant.enabled.ip;
1403 	decoder->enabled = 1;
1404 
1405 	return 0;
1406 }
1407 
pt_insn_process_disabled(struct pt_insn_decoder * decoder)1408 static int pt_insn_process_disabled(struct pt_insn_decoder *decoder)
1409 {
1410 	struct pt_event *ev;
1411 
1412 	if (!decoder)
1413 		return -pte_internal;
1414 
1415 	ev = &decoder->event;
1416 
1417 	/* This event can't be a status update. */
1418 	if (ev->status_update)
1419 		return -pte_bad_context;
1420 
1421 	/* We must currently be enabled. */
1422 	if (!decoder->enabled)
1423 		return -pte_bad_context;
1424 
1425 	/* We preserve @decoder->ip.  This is where we expect tracing to resume
1426 	 * and we'll indicate that on the subsequent enabled event if tracing
1427 	 * actually does resume from there.
1428 	 */
1429 	decoder->enabled = 0;
1430 
1431 	return 0;
1432 }
1433 
pt_insn_process_async_branch(struct pt_insn_decoder * decoder)1434 static int pt_insn_process_async_branch(struct pt_insn_decoder *decoder)
1435 {
1436 	struct pt_event *ev;
1437 
1438 	if (!decoder)
1439 		return -pte_internal;
1440 
1441 	ev = &decoder->event;
1442 
1443 	/* This event can't be a status update. */
1444 	if (ev->status_update)
1445 		return -pte_bad_context;
1446 
1447 	/* Tracing must be enabled in order to make sense of the event. */
1448 	if (!decoder->enabled)
1449 		return -pte_bad_context;
1450 
1451 	decoder->ip = ev->variant.async_branch.to;
1452 
1453 	return 0;
1454 }
1455 
pt_insn_process_paging(struct pt_insn_decoder * decoder)1456 static int pt_insn_process_paging(struct pt_insn_decoder *decoder)
1457 {
1458 	uint64_t cr3;
1459 	int errcode;
1460 
1461 	if (!decoder)
1462 		return -pte_internal;
1463 
1464 	cr3 = decoder->event.variant.paging.cr3;
1465 	if (decoder->asid.cr3 != cr3) {
1466 		errcode = pt_msec_cache_invalidate(&decoder->scache);
1467 		if (errcode < 0)
1468 			return errcode;
1469 
1470 		decoder->asid.cr3 = cr3;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
pt_insn_process_overflow(struct pt_insn_decoder * decoder)1476 static int pt_insn_process_overflow(struct pt_insn_decoder *decoder)
1477 {
1478 	struct pt_event *ev;
1479 
1480 	if (!decoder)
1481 		return -pte_internal;
1482 
1483 	ev = &decoder->event;
1484 
1485 	/* This event can't be a status update. */
1486 	if (ev->status_update)
1487 		return -pte_bad_context;
1488 
1489 	/* If the IP is suppressed, the overflow resolved while tracing was
1490 	 * disabled.  Otherwise it resolved while tracing was enabled.
1491 	 */
1492 	if (ev->ip_suppressed) {
1493 		/* Tracing is disabled.
1494 		 *
1495 		 * It doesn't make sense to preserve the previous IP.  This will
1496 		 * just be misleading.  Even if tracing had been disabled
1497 		 * before, as well, we might have missed the re-enable in the
1498 		 * overflow.
1499 		 */
1500 		decoder->enabled = 0;
1501 		decoder->ip = 0ull;
1502 	} else {
1503 		/* Tracing is enabled and we're at the IP at which the overflow
1504 		 * resolved.
1505 		 */
1506 		decoder->ip = ev->variant.overflow.ip;
1507 		decoder->enabled = 1;
1508 	}
1509 
1510 	/* We don't know the TSX state.  Let's assume we execute normally.
1511 	 *
1512 	 * We also don't know the execution mode.  Let's keep what we have
1513 	 * in case we don't get an update before we have to decode the next
1514 	 * instruction.
1515 	 */
1516 	decoder->speculative = 0;
1517 
1518 	return 0;
1519 }
1520 
pt_insn_process_exec_mode(struct pt_insn_decoder * decoder)1521 static int pt_insn_process_exec_mode(struct pt_insn_decoder *decoder)
1522 {
1523 	enum pt_exec_mode mode;
1524 	struct pt_event *ev;
1525 
1526 	if (!decoder)
1527 		return -pte_internal;
1528 
1529 	ev = &decoder->event;
1530 	mode = ev->variant.exec_mode.mode;
1531 
1532 	/* Use status update events to diagnose inconsistencies. */
1533 	if (ev->status_update && decoder->enabled &&
1534 	    decoder->mode != ptem_unknown && decoder->mode != mode)
1535 		return -pte_bad_status_update;
1536 
1537 	decoder->mode = mode;
1538 
1539 	return 0;
1540 }
1541 
pt_insn_process_tsx(struct pt_insn_decoder * decoder)1542 static int pt_insn_process_tsx(struct pt_insn_decoder *decoder)
1543 {
1544 	if (!decoder)
1545 		return -pte_internal;
1546 
1547 	decoder->speculative = decoder->event.variant.tsx.speculative;
1548 
1549 	return 0;
1550 }
1551 
pt_insn_process_stop(struct pt_insn_decoder * decoder)1552 static int pt_insn_process_stop(struct pt_insn_decoder *decoder)
1553 {
1554 	struct pt_event *ev;
1555 
1556 	if (!decoder)
1557 		return -pte_internal;
1558 
1559 	ev = &decoder->event;
1560 
1561 	/* This event can't be a status update. */
1562 	if (ev->status_update)
1563 		return -pte_bad_context;
1564 
1565 	/* Tracing is always disabled before it is stopped. */
1566 	if (decoder->enabled)
1567 		return -pte_bad_context;
1568 
1569 	return 0;
1570 }
1571 
pt_insn_process_vmcs(struct pt_insn_decoder * decoder)1572 static int pt_insn_process_vmcs(struct pt_insn_decoder *decoder)
1573 {
1574 	uint64_t vmcs;
1575 	int errcode;
1576 
1577 	if (!decoder)
1578 		return -pte_internal;
1579 
1580 	vmcs = decoder->event.variant.vmcs.base;
1581 	if (decoder->asid.vmcs != vmcs) {
1582 		errcode = pt_msec_cache_invalidate(&decoder->scache);
1583 		if (errcode < 0)
1584 			return errcode;
1585 
1586 		decoder->asid.vmcs = vmcs;
1587 	}
1588 
1589 	return 0;
1590 }
1591 
pt_insn_event(struct pt_insn_decoder * decoder,struct pt_event * uevent,size_t size)1592 int pt_insn_event(struct pt_insn_decoder *decoder, struct pt_event *uevent,
1593 		  size_t size)
1594 {
1595 	struct pt_event *ev;
1596 	int status;
1597 
1598 	if (!decoder || !uevent)
1599 		return -pte_invalid;
1600 
1601 	/* We must currently process an event. */
1602 	if (!decoder->process_event)
1603 		return -pte_bad_query;
1604 
1605 	ev = &decoder->event;
1606 	switch (ev->type) {
1607 	default:
1608 		/* This is not a user event.
1609 		 *
1610 		 * We either indicated it wrongly or the user called
1611 		 * pt_insn_event() without a pts_event_pending indication.
1612 		 */
1613 		return -pte_bad_query;
1614 
1615 	case ptev_enabled:
1616 		/* Indicate that tracing resumes from the IP at which tracing
1617 		 * had been disabled before (with some special treatment for
1618 		 * calls).
1619 		 */
1620 		if (decoder->ip == ev->variant.enabled.ip)
1621 			ev->variant.enabled.resumed = 1;
1622 
1623 		status = pt_insn_process_enabled(decoder);
1624 		if (status < 0)
1625 			return status;
1626 
1627 		break;
1628 
1629 	case ptev_async_disabled:
1630 		if (!ev->ip_suppressed &&
1631 		    decoder->ip != ev->variant.async_disabled.at)
1632 			return -pte_bad_query;
1633 
1634 		fallthrough;
1635 	case ptev_disabled:
1636 		status = pt_insn_process_disabled(decoder);
1637 		if (status < 0)
1638 			return status;
1639 
1640 		break;
1641 
1642 	case ptev_async_branch:
1643 		if (decoder->ip != ev->variant.async_branch.from)
1644 			return -pte_bad_query;
1645 
1646 		status = pt_insn_process_async_branch(decoder);
1647 		if (status < 0)
1648 			return status;
1649 
1650 		break;
1651 
1652 	case ptev_async_paging:
1653 		if (!ev->ip_suppressed &&
1654 		    decoder->ip != ev->variant.async_paging.ip)
1655 			return -pte_bad_query;
1656 
1657 		fallthrough;
1658 	case ptev_paging:
1659 		status = pt_insn_process_paging(decoder);
1660 		if (status < 0)
1661 			return status;
1662 
1663 		break;
1664 
1665 	case ptev_async_vmcs:
1666 		if (!ev->ip_suppressed &&
1667 		    decoder->ip != ev->variant.async_vmcs.ip)
1668 			return -pte_bad_query;
1669 
1670 		fallthrough;
1671 	case ptev_vmcs:
1672 		status = pt_insn_process_vmcs(decoder);
1673 		if (status < 0)
1674 			return status;
1675 
1676 		break;
1677 
1678 	case ptev_overflow:
1679 		status = pt_insn_process_overflow(decoder);
1680 		if (status < 0)
1681 			return status;
1682 
1683 		break;
1684 
1685 	case ptev_exec_mode:
1686 		status = pt_insn_process_exec_mode(decoder);
1687 		if (status < 0)
1688 			return status;
1689 
1690 		break;
1691 
1692 	case ptev_tsx:
1693 		status = pt_insn_process_tsx(decoder);
1694 		if (status < 0)
1695 			return status;
1696 
1697 		break;
1698 
1699 	case ptev_stop:
1700 		status = pt_insn_process_stop(decoder);
1701 		if (status < 0)
1702 			return status;
1703 
1704 		break;
1705 
1706 	case ptev_exstop:
1707 		if (!ev->ip_suppressed && decoder->enabled &&
1708 		    decoder->ip != ev->variant.exstop.ip)
1709 			return -pte_bad_query;
1710 
1711 		break;
1712 
1713 	case ptev_mwait:
1714 		if (!ev->ip_suppressed && decoder->enabled &&
1715 		    decoder->ip != ev->variant.mwait.ip)
1716 			return -pte_bad_query;
1717 
1718 		break;
1719 
1720 	case ptev_pwre:
1721 	case ptev_pwrx:
1722 	case ptev_ptwrite:
1723 	case ptev_tick:
1724 	case ptev_cbr:
1725 	case ptev_mnt:
1726 		break;
1727 	}
1728 
1729 	/* Copy the event to the user.  Make sure we're not writing beyond the
1730 	 * memory provided by the user.
1731 	 *
1732 	 * We might truncate details of an event but only for those events the
1733 	 * user can't know about, anyway.
1734 	 */
1735 	if (sizeof(*ev) < size)
1736 		size = sizeof(*ev);
1737 
1738 	memcpy(uevent, ev, size);
1739 
1740 	/* This completes processing of the current event. */
1741 	decoder->process_event = 0;
1742 
1743 	/* If we just handled an instruction event, check for further events
1744 	 * that bind to this instruction.
1745 	 *
1746 	 * If we don't have further events, proceed beyond the instruction so we
1747 	 * can check for IP events, as well.
1748 	 */
1749 	if (decoder->process_insn) {
1750 		status = pt_insn_check_insn_event(decoder, &decoder->insn,
1751 						  &decoder->iext);
1752 
1753 		if (status != 0) {
1754 			if (status < 0)
1755 				return status;
1756 
1757 			if (status & pts_event_pending)
1758 				return status;
1759 		}
1760 
1761 		/* Proceed to the next instruction. */
1762 		status = pt_insn_proceed_postponed(decoder);
1763 		if (status < 0)
1764 			return status;
1765 	}
1766 
1767 	/* Indicate further events that bind to the same IP. */
1768 	return pt_insn_check_ip_event(decoder, NULL, NULL);
1769 }
1770