1 /*
2 * Copyright (c) 2014-2019, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "pt_time.h"
30 #include "pt_opcodes.h"
31
32 #include "intel-pt.h"
33
34 #include <string.h>
35 #include <limits.h>
36
37
pt_time_init(struct pt_time * time)38 void pt_time_init(struct pt_time *time)
39 {
40 if (!time)
41 return;
42
43 memset(time, 0, sizeof(*time));
44 }
45
pt_time_query_tsc(uint64_t * tsc,uint32_t * lost_mtc,uint32_t * lost_cyc,const struct pt_time * time)46 int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc,
47 uint32_t *lost_cyc, const struct pt_time *time)
48 {
49 if (!tsc || !time)
50 return -pte_internal;
51
52 *tsc = time->tsc;
53
54 if (lost_mtc)
55 *lost_mtc = time->lost_mtc;
56 if (lost_cyc)
57 *lost_cyc = time->lost_cyc;
58
59 if (!time->have_tsc)
60 return -pte_no_time;
61
62 return 0;
63 }
64
pt_time_query_cbr(uint32_t * cbr,const struct pt_time * time)65 int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time)
66 {
67 if (!cbr || !time)
68 return -pte_internal;
69
70 if (!time->have_cbr)
71 return -pte_no_cbr;
72
73 *cbr = time->cbr;
74
75 return 0;
76 }
77
78 /* Compute the distance between two CTC sources.
79 *
80 * We adjust a single wrap-around but fail if the distance is bigger than that.
81 *
82 * Returns zero on success, a negative error code otherwise.
83 */
pt_time_ctc_delta(uint32_t * ctc_delta,uint32_t ctc,uint32_t last_ctc,const struct pt_config * config)84 static int pt_time_ctc_delta(uint32_t *ctc_delta, uint32_t ctc,
85 uint32_t last_ctc, const struct pt_config *config)
86 {
87 if (!config || !ctc_delta)
88 return -pte_internal;
89
90 /* Correct a single wrap-around. If we lost enough MTCs to wrap
91 * around twice, timing will be wrong until the next TSC.
92 */
93 if (ctc < last_ctc) {
94 ctc += 1u << (config->mtc_freq + pt_pl_mtc_bit_size);
95
96 /* Since we only store the CTC between TMA/MTC or MTC/TMC a
97 * single correction should suffice.
98 */
99 if (ctc < last_ctc)
100 return -pte_bad_packet;
101 }
102
103 *ctc_delta = ctc - last_ctc;
104 return 0;
105 }
106
107 /* Translate CTC into the same unit as the FastCounter by multiplying with P.
108 *
109 * Returns zero on success, a negative error code otherwise.
110 */
pt_time_ctc_fc(uint64_t * fc,uint64_t ctc,const struct pt_config * config)111 static int pt_time_ctc_fc(uint64_t *fc, uint64_t ctc,
112 const struct pt_config *config)
113 {
114 uint32_t eax, ebx;
115
116 if (!fc || !config)
117 return -pte_internal;
118
119 eax = config->cpuid_0x15_eax;
120 ebx = config->cpuid_0x15_ebx;
121
122 /* Neither multiply nor divide by zero. */
123 if (!eax || !ebx)
124 return -pte_bad_config;
125
126 *fc = (ctc * ebx) / eax;
127 return 0;
128 }
129
pt_time_update_tsc(struct pt_time * time,const struct pt_packet_tsc * packet,const struct pt_config * config)130 int pt_time_update_tsc(struct pt_time *time,
131 const struct pt_packet_tsc *packet,
132 const struct pt_config *config)
133 {
134 (void) config;
135
136 if (!time || !packet)
137 return -pte_internal;
138
139 time->have_tsc = 1;
140 time->have_tma = 0;
141 time->have_mtc = 0;
142 time->tsc = time->base = packet->tsc;
143 time->ctc = 0;
144 time->fc = 0ull;
145
146 /* We got the full time; we recover from previous losses. */
147 time->lost_mtc = 0;
148 time->lost_cyc = 0;
149
150 return 0;
151 }
152
pt_time_update_cbr(struct pt_time * time,const struct pt_packet_cbr * packet,const struct pt_config * config)153 int pt_time_update_cbr(struct pt_time *time,
154 const struct pt_packet_cbr *packet,
155 const struct pt_config *config)
156 {
157 uint8_t cbr;
158
159 (void) config;
160
161 if (!time || !packet)
162 return -pte_internal;
163
164 cbr = packet->ratio;
165 if (!cbr)
166 return -pte_bad_packet;
167
168 time->have_cbr = 1;
169 time->cbr = cbr;
170
171 return 0;
172 }
173
pt_time_update_tma(struct pt_time * time,const struct pt_packet_tma * packet,const struct pt_config * config)174 int pt_time_update_tma(struct pt_time *time,
175 const struct pt_packet_tma *packet,
176 const struct pt_config *config)
177 {
178 uint32_t ctc, mtc_freq, mtc_hi, ctc_mask;
179 uint64_t fc;
180
181 if (!time || !packet || !config)
182 return -pte_internal;
183
184 /* Without a TSC something is seriously wrong. */
185 if (!time->have_tsc)
186 return -pte_bad_context;
187
188 /* We shouldn't have more than one TMA per TSC. */
189 if (time->have_tma)
190 return -pte_bad_context;
191
192 /* We're ignoring MTC between TSC and TMA. */
193 if (time->have_mtc)
194 return -pte_internal;
195
196 ctc = packet->ctc;
197 fc = packet->fc;
198
199 mtc_freq = config->mtc_freq;
200 mtc_hi = mtc_freq + pt_pl_mtc_bit_size;
201
202 /* A mask for the relevant CTC bits ignoring high-order bits that are
203 * not provided by MTC.
204 */
205 ctc_mask = (1u << mtc_hi) - 1u;
206
207 time->have_tma = 1;
208 time->base -= fc;
209 time->fc += fc;
210
211 /* If the MTC frequency is low enough that TMA provides the full CTC
212 * value, we can use the TMA as an MTC.
213 *
214 * If it isn't, we will estimate the preceding MTC based on the CTC bits
215 * the TMA provides at the next MTC. We forget about the previous MTC
216 * in this case.
217 *
218 * If no MTC packets are dropped around TMA, we will estimate the
219 * forgotten value again at the next MTC.
220 *
221 * If MTC packets are dropped, we can't really tell where in this
222 * extended MTC period the TSC occurred. The estimation will place it
223 * right before the next MTC.
224 */
225 if (mtc_hi <= pt_pl_tma_ctc_bit_size)
226 time->have_mtc = 1;
227
228 /* In both cases, we store the TMA's CTC bits until the next MTC. */
229 time->ctc = time->ctc_cyc = ctc & ctc_mask;
230
231 return 0;
232 }
233
pt_time_update_mtc(struct pt_time * time,const struct pt_packet_mtc * packet,const struct pt_config * config)234 int pt_time_update_mtc(struct pt_time *time,
235 const struct pt_packet_mtc *packet,
236 const struct pt_config *config)
237 {
238 uint32_t last_ctc, ctc, ctc_delta;
239 uint64_t tsc, base;
240 uint8_t mtc_freq;
241 int errcode, have_tsc, have_tma, have_mtc;
242
243 if (!time || !packet || !config)
244 return -pte_internal;
245
246 have_tsc = time->have_tsc;
247 have_tma = time->have_tma;
248 have_mtc = time->have_mtc;
249
250 /* We ignore MTCs between TSC and TMA to avoid apparent CTC overflows.
251 *
252 * Later MTCs will ensure that no time is lost - provided TMA provides
253 * enough bits. If TMA doesn't provide any of the MTC bits we may place
254 * the TSC into the wrong MTC period.
255 */
256 if (have_tsc && !have_tma)
257 return 0;
258
259 base = time->base;
260 last_ctc = time->ctc;
261 mtc_freq = config->mtc_freq;
262
263 ctc = (uint32_t) packet->ctc << mtc_freq;
264
265 /* Store our CTC value if we have or would have reset FC. */
266 if (time->fc || time->lost_cyc || !have_mtc)
267 time->ctc_cyc = ctc;
268
269 /* Prepare for the next packet in case we error out below. */
270 time->have_mtc = 1;
271 time->fc = 0ull;
272 time->ctc = ctc;
273
274 /* We recover from previous CYC losses. */
275 time->lost_cyc = 0;
276
277 /* Avoid a big jump when we see the first MTC with an arbitrary CTC
278 * payload.
279 */
280 if (!have_mtc) {
281 uint32_t ctc_lo, ctc_hi;
282
283 /* If we have not seen a TMA, we ignore this first MTC.
284 *
285 * We have no idea where in this MTC period tracing started.
286 * We could lose an entire MTC period or just a tiny fraction.
287 *
288 * On the other hand, if we assumed a previous MTC value, we
289 * might make just the same error.
290 */
291 if (!have_tma)
292 return 0;
293
294 /* The TMA's CTC value didn't provide enough bits - otherwise,
295 * we would have treated the TMA as an MTC.
296 */
297 if (last_ctc & ~(uint32_t) pt_pl_tma_ctc_mask)
298 return -pte_internal;
299
300 /* Split this MTC's CTC value into low and high parts with
301 * respect to the bits provided by TMA.
302 */
303 ctc_lo = ctc & (uint32_t) pt_pl_tma_ctc_mask;
304 ctc_hi = ctc & ~(uint32_t) pt_pl_tma_ctc_mask;
305
306 /* We estimate the high-order CTC bits that are not provided by
307 * TMA based on the CTC bits provided by this MTC.
308 *
309 * We assume that no MTC packets were dropped around TMA. If
310 * there are, we might place the TSC into the wrong MTC period
311 * depending on how many CTC bits TMA provides and how many MTC
312 * packets were dropped.
313 *
314 * Note that we may underflow which results in more bits to be
315 * set than MTC packets may provide. Drop those extra bits.
316 */
317 if (ctc_lo < last_ctc) {
318 ctc_hi -= 1u << pt_pl_tma_ctc_bit_size;
319 ctc_hi &= (uint32_t) pt_pl_mtc_mask << mtc_freq;
320 }
321
322 last_ctc |= ctc_hi;
323 }
324
325 errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
326 if (errcode < 0) {
327 time->lost_mtc += 1;
328 return errcode;
329 }
330
331 errcode = pt_time_ctc_fc(&tsc, ctc_delta, config);
332 if (errcode < 0)
333 return errcode;
334
335 base += tsc;
336 time->tsc = time->base = base;
337
338 return 0;
339 }
340
341 /* Adjust a CYC packet's payload spanning multiple MTC periods.
342 *
343 * CYC packets measure the Fast Counter since the last CYC(-eligible) packet.
344 * Depending on the CYC threshold, we may not get a CYC for each MTC, so a CYC
345 * period may overlap with or even span multiple MTC periods.
346 *
347 * We can't do much about the overlap case without examining all packets in
348 * the respective periods. We leave this as expected imprecision.
349 *
350 * If we find a CYC packet to span multiple MTC packets, though, we try to
351 * approximate the portion for the current MTC period by subtracting the
352 * estimated portion for previous MTC periods using calibration information.
353 *
354 * We only consider MTC. For the first CYC after TSC, the corresponding TMA
355 * will contain the Fast Counter at TSC.
356 *
357 * Returns zero on success, a negative error code otherwise.
358 */
pt_time_adjust_cyc(uint64_t * cyc,const struct pt_time * time,const struct pt_config * config,uint64_t fcr)359 static int pt_time_adjust_cyc(uint64_t *cyc, const struct pt_time *time,
360 const struct pt_config *config, uint64_t fcr)
361 {
362 uint32_t last_ctc, ctc, ctc_delta;
363 uint64_t fc, total_cyc, old_cyc;
364 int errcode;
365
366 if (!time || !config || !fcr)
367 return -pte_internal;
368
369 last_ctc = time->ctc_cyc;
370 ctc = time->ctc;
371
372 /* There is nothing to do if this is the current MTC period. */
373 if (ctc == last_ctc)
374 return 0;
375
376 /* Calibration computes
377 *
378 * fc = (ctc_delta * cpuid[0x15].ebx) / cpuid[0x15].eax.
379 * fcr = (fc << pt_tcal_fcr_shr) / cyc
380 *
381 * So cyc = (fc << pt_tcal_fcr_shr) / fcr.
382 */
383
384 errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
385 if (errcode < 0)
386 return errcode;
387
388 errcode = pt_time_ctc_fc(&fc, ctc_delta, config);
389 if (errcode < 0)
390 return errcode;
391
392 old_cyc = (fc << pt_tcal_fcr_shr) / fcr;
393 total_cyc = *cyc;
394
395 /* Make sure we don't wrap around. If we would, attribute the entire
396 * CYC payload to any previous MTC period.
397 *
398 * We lost an unknown portion of the CYC payload for the current MTC
399 * period, but it's usually better to run too slow than too fast.
400 */
401 if (total_cyc < old_cyc)
402 total_cyc = old_cyc;
403
404 *cyc = total_cyc - old_cyc;
405 return 0;
406 }
407
pt_time_update_cyc(struct pt_time * time,const struct pt_packet_cyc * packet,const struct pt_config * config,uint64_t fcr)408 int pt_time_update_cyc(struct pt_time *time,
409 const struct pt_packet_cyc *packet,
410 const struct pt_config *config, uint64_t fcr)
411 {
412 uint64_t cyc, fc;
413
414 if (!time || !packet || !config)
415 return -pte_internal;
416
417 if (!fcr) {
418 time->lost_cyc += 1;
419 return 0;
420 }
421
422 cyc = packet->value;
423 fc = time->fc;
424 if (!fc) {
425 int errcode;
426
427 errcode = pt_time_adjust_cyc(&cyc, time, config, fcr);
428 if (errcode < 0)
429 return errcode;
430 }
431
432 fc += (cyc * fcr) >> pt_tcal_fcr_shr;
433
434 time->fc = fc;
435 time->tsc = time->base + fc;
436
437 return 0;
438 }
439
pt_tcal_init(struct pt_time_cal * tcal)440 void pt_tcal_init(struct pt_time_cal *tcal)
441 {
442 if (!tcal)
443 return;
444
445 memset(tcal, 0, sizeof(*tcal));
446
447 tcal->min_fcr = UINT64_MAX;
448 }
449
pt_tcal_have_fcr(const struct pt_time_cal * tcal)450 static int pt_tcal_have_fcr(const struct pt_time_cal *tcal)
451 {
452 if (!tcal)
453 return 0;
454
455 return (tcal->min_fcr <= tcal->max_fcr);
456 }
457
pt_tcal_fcr(uint64_t * fcr,const struct pt_time_cal * tcal)458 int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal)
459 {
460 if (!fcr || !tcal)
461 return -pte_internal;
462
463 if (!pt_tcal_have_fcr(tcal))
464 return -pte_no_time;
465
466 *fcr = tcal->fcr;
467
468 return 0;
469 }
470
pt_tcal_set_fcr(struct pt_time_cal * tcal,uint64_t fcr)471 int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr)
472 {
473 if (!tcal)
474 return -pte_internal;
475
476 tcal->fcr = fcr;
477
478 if (fcr < tcal->min_fcr)
479 tcal->min_fcr = fcr;
480
481 if (fcr > tcal->max_fcr)
482 tcal->max_fcr = fcr;
483
484 return 0;
485 }
486
pt_tcal_update_psb(struct pt_time_cal * tcal,const struct pt_config * config)487 int pt_tcal_update_psb(struct pt_time_cal *tcal,
488 const struct pt_config *config)
489 {
490 if (!tcal || !config)
491 return -pte_internal;
492
493 if (config->errata.skl168)
494 tcal->check_skl168 = 1;
495
496 return 0;
497 }
498
pt_tcal_update_tsc(struct pt_time_cal * tcal,const struct pt_packet_tsc * packet,const struct pt_config * config)499 int pt_tcal_update_tsc(struct pt_time_cal *tcal,
500 const struct pt_packet_tsc *packet,
501 const struct pt_config *config)
502 {
503 (void) config;
504
505 if (!tcal || !packet)
506 return -pte_internal;
507
508 /* A TSC outside of PSB+ may indicate loss of time. We do not use it
509 * for calibration. We store the TSC value for calibration at the next
510 * TSC in PSB+, though.
511 */
512 tcal->tsc = packet->tsc;
513 tcal->cyc_tsc = 0ull;
514
515 return 0;
516 }
517
pt_tcal_header_tsc(struct pt_time_cal * tcal,const struct pt_packet_tsc * packet,const struct pt_config * config)518 int pt_tcal_header_tsc(struct pt_time_cal *tcal,
519 const struct pt_packet_tsc *packet,
520 const struct pt_config *config)
521 {
522 uint64_t tsc, last_tsc, tsc_delta, cyc, fcr;
523
524 (void) config;
525
526 if (!tcal || !packet)
527 return -pte_internal;
528
529 last_tsc = tcal->tsc;
530 cyc = tcal->cyc_tsc;
531
532 tsc = packet->tsc;
533
534 tcal->tsc = tsc;
535 tcal->cyc_tsc = 0ull;
536
537 if (!last_tsc || !cyc)
538 return 0;
539
540 /* Prefer MTC over TSC for calibration. */
541 if (tcal->have_mtc)
542 return 0;
543
544 /* Correct a single wrap-around. */
545 if (tsc < last_tsc) {
546 tsc += 1ull << pt_pl_tsc_bit_size;
547
548 if (tsc < last_tsc)
549 return -pte_bad_packet;
550 }
551
552 tsc_delta = tsc - last_tsc;
553
554 /* We shift the nominator to improve rounding precision.
555 *
556 * Since we're only collecting the CYCs between two TSC, we shouldn't
557 * overflow. Let's rather fail than overflow.
558 */
559 if (tsc_delta & ~(~0ull >> pt_tcal_fcr_shr))
560 return -pte_internal;
561
562 fcr = (tsc_delta << pt_tcal_fcr_shr) / cyc;
563
564 return pt_tcal_set_fcr(tcal, fcr);
565 }
566
pt_tcal_update_cbr(struct pt_time_cal * tcal,const struct pt_packet_cbr * packet,const struct pt_config * config)567 int pt_tcal_update_cbr(struct pt_time_cal *tcal,
568 const struct pt_packet_cbr *packet,
569 const struct pt_config *config)
570 {
571 /* A CBR outside of PSB+ indicates a frequency change. Reset our
572 * calibration state.
573 */
574 pt_tcal_init(tcal);
575
576 return pt_tcal_header_cbr(tcal, packet, config);
577 }
578
pt_tcal_header_cbr(struct pt_time_cal * tcal,const struct pt_packet_cbr * packet,const struct pt_config * config)579 int pt_tcal_header_cbr(struct pt_time_cal *tcal,
580 const struct pt_packet_cbr *packet,
581 const struct pt_config *config)
582 {
583 uint64_t cbr, p1, fcr;
584
585 if (!tcal || !packet || !config)
586 return -pte_internal;
587
588 p1 = config->nom_freq;
589 if (!p1)
590 return 0;
591
592 /* If we know the nominal frequency, we can use it for calibration. */
593 cbr = packet->ratio;
594 if (!cbr)
595 return -pte_bad_packet;
596
597 fcr = (p1 << pt_tcal_fcr_shr) / cbr;
598
599 return pt_tcal_set_fcr(tcal, fcr);
600 }
601
pt_tcal_update_tma(struct pt_time_cal * tcal,const struct pt_packet_tma * packet,const struct pt_config * config)602 int pt_tcal_update_tma(struct pt_time_cal *tcal,
603 const struct pt_packet_tma *packet,
604 const struct pt_config *config)
605 {
606 (void) tcal;
607 (void) packet;
608 (void) config;
609
610 /* Nothing to do. */
611 return 0;
612 }
613
pt_tcal_update_mtc(struct pt_time_cal * tcal,const struct pt_packet_mtc * packet,const struct pt_config * config)614 int pt_tcal_update_mtc(struct pt_time_cal *tcal,
615 const struct pt_packet_mtc *packet,
616 const struct pt_config *config)
617 {
618 uint32_t last_ctc, ctc, ctc_delta, have_mtc, check_skl168;
619 uint64_t cyc, fc, fcr;
620 int errcode;
621
622 if (!tcal || !packet || !config)
623 return -pte_internal;
624
625 last_ctc = tcal->ctc;
626 have_mtc = tcal->have_mtc;
627 cyc = tcal->cyc_mtc;
628 check_skl168 = tcal->check_skl168;
629
630 /* This only affects the first MTC after PSB. */
631 tcal->check_skl168 = 0;
632
633 ctc = (uint32_t) packet->ctc << config->mtc_freq;
634
635 /* We need at least two MTC (including this). */
636 if (!have_mtc) {
637 tcal->cyc_mtc = 0ull;
638 tcal->ctc = ctc;
639 tcal->have_mtc = 1;
640
641 return 0;
642 }
643
644 /* Without any cycles, we can't calibrate. Try again at the next
645 * MTC and distribute the cycles over the combined MTC period.
646 */
647 if (!cyc)
648 return 0;
649
650 /* Prepare for the next packet in case we error out below. */
651 tcal->have_mtc = 1;
652 tcal->cyc_mtc = 0ull;
653 tcal->ctc = ctc;
654
655 /* Let's pretend we will fail. We'll correct it at the end. */
656 tcal->lost_mtc += 1;
657
658 errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
659 if (errcode < 0)
660 return errcode;
661
662 errcode = pt_time_ctc_fc(&fc, ctc_delta, config);
663 if (errcode < 0)
664 return errcode;
665
666 /* We shift the nominator to improve rounding precision.
667 *
668 * Since we're only collecting the CYCs between two MTC, we shouldn't
669 * overflow. Let's rather fail than overflow.
670 */
671 if (fc & ~(~0ull >> pt_tcal_fcr_shr))
672 return -pte_internal;
673
674 fcr = (fc << pt_tcal_fcr_shr) / cyc;
675
676 /* SKL168: Intel(R) PT CYC Packets Can be Dropped When Immediately
677 * Preceding PSB.
678 *
679 * We skip this MTC if we lost one or more MTC since the last PSB or if
680 * it looks like we lost a wrap CYC packet.
681 *
682 * This is not an error but we count that MTC as lost.
683 */
684 if (check_skl168) {
685 /* If we lost one or more MTC, the case is clear. */
686 if ((1u << config->mtc_freq) < ctc_delta)
687 return 0;
688
689 /* The case is less clear for a lost wrap CYC packet since we do
690 * have some variation in the number of cycles.
691 *
692 * The CYC counter wraps on the affected processors every 4096
693 * cycles. For low MTC frequencies (high values), losing one
694 * may not be noticeable.
695 *
696 * We restrict the workaround to higher MTC frequencies (lower
697 * values).
698 *
699 * We also need a previous FCR so we know how many cycles to
700 * expect.
701 */
702 if ((config->mtc_freq < 10) && pt_tcal_have_fcr(tcal)) {
703 uint64_t dfc;
704
705 /* We choose a slightly lower adjustment to account for
706 * some normal variation.
707 */
708 dfc = (tcal->fcr * (cyc + 0xf00)) >> pt_tcal_fcr_shr;
709
710 /* If we didn't drop a wrap CYC, @dfc should be way
711 * bigger than @fc. If it isn't, we assume that the
712 * erratum applied.
713 */
714 if (dfc < fc)
715 return 0;
716 }
717 }
718
719 errcode = pt_tcal_set_fcr(tcal, fcr);
720 if (errcode < 0)
721 return errcode;
722
723 /* We updated the FCR. This recovers from previous MTC losses. */
724 tcal->lost_mtc = 0;
725
726 return 0;
727 }
728
pt_tcal_update_cyc(struct pt_time_cal * tcal,const struct pt_packet_cyc * packet,const struct pt_config * config)729 int pt_tcal_update_cyc(struct pt_time_cal *tcal,
730 const struct pt_packet_cyc *packet,
731 const struct pt_config *config)
732 {
733 uint64_t cyc;
734
735 (void) config;
736
737 if (!tcal || !packet)
738 return -pte_internal;
739
740 cyc = packet->value;
741 tcal->cyc_mtc += cyc;
742 tcal->cyc_tsc += cyc;
743
744 return 0;
745 }
746
pt_tcal_update_ovf(struct pt_time_cal * tcal,const struct pt_config * config)747 int pt_tcal_update_ovf(struct pt_time_cal *tcal,
748 const struct pt_config *config)
749 {
750 if (!tcal || !config)
751 return -pte_internal;
752
753 tcal->tsc = 0ull;
754 tcal->cyc_tsc = 0ull;
755 tcal->cyc_mtc = 0ull;
756 tcal->ctc = 0;
757 tcal->have_mtc = 0;
758
759 return 0;
760 }
761