xref: /linux/arch/x86/events/intel/pt.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2fd1c601cSBorislav Petkov /*
3fd1c601cSBorislav Petkov  * Intel(R) Processor Trace PMU driver for perf
4fd1c601cSBorislav Petkov  * Copyright (c) 2013-2014, Intel Corporation.
5fd1c601cSBorislav Petkov  *
6fd1c601cSBorislav Petkov  * Intel PT is specified in the Intel Architecture Instruction Set Extensions
7fd1c601cSBorislav Petkov  * Programming Reference:
8fd1c601cSBorislav Petkov  * http://software.intel.com/en-us/intel-isa-extensions
9fd1c601cSBorislav Petkov  */
10fd1c601cSBorislav Petkov 
11fd1c601cSBorislav Petkov #undef DEBUG
12fd1c601cSBorislav Petkov 
13fd1c601cSBorislav Petkov #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14fd1c601cSBorislav Petkov 
15fd1c601cSBorislav Petkov #include <linux/types.h>
16c243cecbSAdrian Hunter #include <linux/bits.h>
17c243cecbSAdrian Hunter #include <linux/limits.h>
18fd1c601cSBorislav Petkov #include <linux/slab.h>
19fd1c601cSBorislav Petkov #include <linux/device.h>
20fd1c601cSBorislav Petkov 
21fd1c601cSBorislav Petkov #include <asm/perf_event.h>
22fd1c601cSBorislav Petkov #include <asm/insn.h>
23fd1c601cSBorislav Petkov #include <asm/io.h>
24fd1c601cSBorislav Petkov #include <asm/intel_pt.h>
25a7011b85STony Luck #include <asm/cpu_device_id.h>
26fd1c601cSBorislav Petkov 
2727f6d22bSBorislav Petkov #include "../perf_event.h"
28fd1c601cSBorislav Petkov #include "pt.h"
29fd1c601cSBorislav Petkov 
30fd1c601cSBorislav Petkov static DEFINE_PER_CPU(struct pt, pt_ctx);
31fd1c601cSBorislav Petkov 
32fd1c601cSBorislav Petkov static struct pt_pmu pt_pmu;
33fd1c601cSBorislav Petkov 
34fd1c601cSBorislav Petkov /*
35fd1c601cSBorislav Petkov  * Capabilities of Intel PT hardware, such as number of address bits or
36fd1c601cSBorislav Petkov  * supported output schemes, are cached and exported to userspace as "caps"
37fd1c601cSBorislav Petkov  * attribute group of pt pmu device
38fd1c601cSBorislav Petkov  * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
39fd1c601cSBorislav Petkov  * relevant bits together with intel_pt traces.
40fd1c601cSBorislav Petkov  *
41fd1c601cSBorislav Petkov  * These are necessary for both trace decoding (payloads_lip, contains address
42fd1c601cSBorislav Petkov  * width encoded in IP-related packets), and event configuration (bitmasks with
43fd1c601cSBorislav Petkov  * permitted values for certain bit fields).
44fd1c601cSBorislav Petkov  */
45fd1c601cSBorislav Petkov #define PT_CAP(_n, _l, _r, _m)						\
46fd1c601cSBorislav Petkov 	[PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l,	\
47fd1c601cSBorislav Petkov 			    .reg = _r, .mask = _m }
48fd1c601cSBorislav Petkov 
49fd1c601cSBorislav Petkov static struct pt_cap_desc {
50fd1c601cSBorislav Petkov 	const char	*name;
51fd1c601cSBorislav Petkov 	u32		leaf;
52fd1c601cSBorislav Petkov 	u8		reg;
53fd1c601cSBorislav Petkov 	u32		mask;
54fd1c601cSBorislav Petkov } pt_caps[] = {
5547f10a36SHe Chen 	PT_CAP(max_subleaf,		0, CPUID_EAX, 0xffffffff),
5647f10a36SHe Chen 	PT_CAP(cr3_filtering,		0, CPUID_EBX, BIT(0)),
5747f10a36SHe Chen 	PT_CAP(psb_cyc,			0, CPUID_EBX, BIT(1)),
5847f10a36SHe Chen 	PT_CAP(ip_filtering,		0, CPUID_EBX, BIT(2)),
5947f10a36SHe Chen 	PT_CAP(mtc,			0, CPUID_EBX, BIT(3)),
6047f10a36SHe Chen 	PT_CAP(ptwrite,			0, CPUID_EBX, BIT(4)),
6147f10a36SHe Chen 	PT_CAP(power_event_trace,	0, CPUID_EBX, BIT(5)),
6228c24dedSAlexander Shishkin 	PT_CAP(event_trace,		0, CPUID_EBX, BIT(7)),
63161a9a33SAlexander Shishkin 	PT_CAP(tnt_disable,		0, CPUID_EBX, BIT(8)),
6447f10a36SHe Chen 	PT_CAP(topa_output,		0, CPUID_ECX, BIT(0)),
6547f10a36SHe Chen 	PT_CAP(topa_multiple_entries,	0, CPUID_ECX, BIT(1)),
6647f10a36SHe Chen 	PT_CAP(single_range_output,	0, CPUID_ECX, BIT(2)),
67e0018afeSLuwei Kang 	PT_CAP(output_subsys,		0, CPUID_ECX, BIT(3)),
6847f10a36SHe Chen 	PT_CAP(payloads_lip,		0, CPUID_ECX, BIT(31)),
69c53c6b74SXiaoyao Li 	PT_CAP(num_address_ranges,	1, CPUID_EAX, 0x7),
7047f10a36SHe Chen 	PT_CAP(mtc_periods,		1, CPUID_EAX, 0xffff0000),
7147f10a36SHe Chen 	PT_CAP(cycle_thresholds,	1, CPUID_EBX, 0xffff),
7247f10a36SHe Chen 	PT_CAP(psb_periods,		1, CPUID_EBX, 0xffff0000),
73fd1c601cSBorislav Petkov };
74fd1c601cSBorislav Petkov 
intel_pt_validate_cap(u32 * caps,enum pt_capabilities capability)7561be2998SLuwei Kang u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
76fd1c601cSBorislav Petkov {
7761be2998SLuwei Kang 	struct pt_cap_desc *cd = &pt_caps[capability];
7861be2998SLuwei Kang 	u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
79fd1c601cSBorislav Petkov 	unsigned int shift = __ffs(cd->mask);
80fd1c601cSBorislav Petkov 
81fd1c601cSBorislav Petkov 	return (c & cd->mask) >> shift;
82fd1c601cSBorislav Petkov }
8361be2998SLuwei Kang EXPORT_SYMBOL_GPL(intel_pt_validate_cap);
8461be2998SLuwei Kang 
intel_pt_validate_hw_cap(enum pt_capabilities cap)8561be2998SLuwei Kang u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
8661be2998SLuwei Kang {
8761be2998SLuwei Kang 	return intel_pt_validate_cap(pt_pmu.caps, cap);
8861be2998SLuwei Kang }
89f6d079ceSChao Peng EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
90fd1c601cSBorislav Petkov 
pt_cap_show(struct device * cdev,struct device_attribute * attr,char * buf)91fd1c601cSBorislav Petkov static ssize_t pt_cap_show(struct device *cdev,
92fd1c601cSBorislav Petkov 			   struct device_attribute *attr,
93fd1c601cSBorislav Petkov 			   char *buf)
94fd1c601cSBorislav Petkov {
95fd1c601cSBorislav Petkov 	struct dev_ext_attribute *ea =
96fd1c601cSBorislav Petkov 		container_of(attr, struct dev_ext_attribute, attr);
97fd1c601cSBorislav Petkov 	enum pt_capabilities cap = (long)ea->var;
98fd1c601cSBorislav Petkov 
99f6d079ceSChao Peng 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
100fd1c601cSBorislav Petkov }
101fd1c601cSBorislav Petkov 
10249e73246SZubin Mithra static struct attribute_group pt_cap_group __ro_after_init = {
103fd1c601cSBorislav Petkov 	.name	= "caps",
104fd1c601cSBorislav Petkov };
105fd1c601cSBorislav Petkov 
106d35869baSAlexander Shishkin PMU_FORMAT_ATTR(pt,		"config:0"	);
107fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(cyc,		"config:1"	);
1085443624bSAlexander Shishkin PMU_FORMAT_ATTR(pwr_evt,	"config:4"	);
1095443624bSAlexander Shishkin PMU_FORMAT_ATTR(fup_on_ptw,	"config:5"	);
110fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(mtc,		"config:9"	);
111fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(tsc,		"config:10"	);
112fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(noretcomp,	"config:11"	);
1135443624bSAlexander Shishkin PMU_FORMAT_ATTR(ptw,		"config:12"	);
114d35869baSAlexander Shishkin PMU_FORMAT_ATTR(branch,		"config:13"	);
11528c24dedSAlexander Shishkin PMU_FORMAT_ATTR(event,		"config:31"	);
116161a9a33SAlexander Shishkin PMU_FORMAT_ATTR(notnt,		"config:55"	);
117fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(mtc_period,	"config:14-17"	);
118fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(cyc_thresh,	"config:19-22"	);
119fd1c601cSBorislav Petkov PMU_FORMAT_ATTR(psb_period,	"config:24-27"	);
120fd1c601cSBorislav Petkov 
121fd1c601cSBorislav Petkov static struct attribute *pt_formats_attr[] = {
122d35869baSAlexander Shishkin 	&format_attr_pt.attr,
123fd1c601cSBorislav Petkov 	&format_attr_cyc.attr,
1245443624bSAlexander Shishkin 	&format_attr_pwr_evt.attr,
12528c24dedSAlexander Shishkin 	&format_attr_event.attr,
126161a9a33SAlexander Shishkin 	&format_attr_notnt.attr,
1275443624bSAlexander Shishkin 	&format_attr_fup_on_ptw.attr,
128fd1c601cSBorislav Petkov 	&format_attr_mtc.attr,
129fd1c601cSBorislav Petkov 	&format_attr_tsc.attr,
130fd1c601cSBorislav Petkov 	&format_attr_noretcomp.attr,
1315443624bSAlexander Shishkin 	&format_attr_ptw.attr,
132d35869baSAlexander Shishkin 	&format_attr_branch.attr,
133fd1c601cSBorislav Petkov 	&format_attr_mtc_period.attr,
134fd1c601cSBorislav Petkov 	&format_attr_cyc_thresh.attr,
135fd1c601cSBorislav Petkov 	&format_attr_psb_period.attr,
136fd1c601cSBorislav Petkov 	NULL,
137fd1c601cSBorislav Petkov };
138fd1c601cSBorislav Petkov 
139fd1c601cSBorislav Petkov static struct attribute_group pt_format_group = {
140fd1c601cSBorislav Petkov 	.name	= "format",
141fd1c601cSBorislav Petkov 	.attrs	= pt_formats_attr,
142fd1c601cSBorislav Petkov };
143fd1c601cSBorislav Petkov 
14465c7e6f1SAlexander Shishkin static ssize_t
pt_timing_attr_show(struct device * dev,struct device_attribute * attr,char * page)14565c7e6f1SAlexander Shishkin pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
14665c7e6f1SAlexander Shishkin 		    char *page)
14765c7e6f1SAlexander Shishkin {
14865c7e6f1SAlexander Shishkin 	struct perf_pmu_events_attr *pmu_attr =
14965c7e6f1SAlexander Shishkin 		container_of(attr, struct perf_pmu_events_attr, attr);
15065c7e6f1SAlexander Shishkin 
15165c7e6f1SAlexander Shishkin 	switch (pmu_attr->id) {
15265c7e6f1SAlexander Shishkin 	case 0:
15365c7e6f1SAlexander Shishkin 		return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
15465c7e6f1SAlexander Shishkin 	case 1:
15565c7e6f1SAlexander Shishkin 		return sprintf(page, "%u:%u\n",
15665c7e6f1SAlexander Shishkin 			       pt_pmu.tsc_art_num,
15765c7e6f1SAlexander Shishkin 			       pt_pmu.tsc_art_den);
15865c7e6f1SAlexander Shishkin 	default:
15965c7e6f1SAlexander Shishkin 		break;
16065c7e6f1SAlexander Shishkin 	}
16165c7e6f1SAlexander Shishkin 
16265c7e6f1SAlexander Shishkin 	return -EINVAL;
16365c7e6f1SAlexander Shishkin }
16465c7e6f1SAlexander Shishkin 
16565c7e6f1SAlexander Shishkin PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
16665c7e6f1SAlexander Shishkin 	       pt_timing_attr_show);
16765c7e6f1SAlexander Shishkin PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
16865c7e6f1SAlexander Shishkin 	       pt_timing_attr_show);
16965c7e6f1SAlexander Shishkin 
17065c7e6f1SAlexander Shishkin static struct attribute *pt_timing_attr[] = {
17165c7e6f1SAlexander Shishkin 	&timing_attr_max_nonturbo_ratio.attr.attr,
17265c7e6f1SAlexander Shishkin 	&timing_attr_tsc_art_ratio.attr.attr,
17365c7e6f1SAlexander Shishkin 	NULL,
17465c7e6f1SAlexander Shishkin };
17565c7e6f1SAlexander Shishkin 
17665c7e6f1SAlexander Shishkin static struct attribute_group pt_timing_group = {
17765c7e6f1SAlexander Shishkin 	.attrs	= pt_timing_attr,
17865c7e6f1SAlexander Shishkin };
17965c7e6f1SAlexander Shishkin 
180fd1c601cSBorislav Petkov static const struct attribute_group *pt_attr_groups[] = {
181fd1c601cSBorislav Petkov 	&pt_cap_group,
182fd1c601cSBorislav Petkov 	&pt_format_group,
18365c7e6f1SAlexander Shishkin 	&pt_timing_group,
184fd1c601cSBorislav Petkov 	NULL,
185fd1c601cSBorislav Petkov };
186fd1c601cSBorislav Petkov 
pt_pmu_hw_init(void)187fd1c601cSBorislav Petkov static int __init pt_pmu_hw_init(void)
188fd1c601cSBorislav Petkov {
189fd1c601cSBorislav Petkov 	struct dev_ext_attribute *de_attrs;
190fd1c601cSBorislav Petkov 	struct attribute **attrs;
191fd1c601cSBorislav Petkov 	size_t size;
1921c5ac21aSAlexander Shishkin 	u64 reg;
193fd1c601cSBorislav Petkov 	int ret;
194fd1c601cSBorislav Petkov 	long i;
195fd1c601cSBorislav Petkov 
19665c7e6f1SAlexander Shishkin 	rdmsrl(MSR_PLATFORM_INFO, reg);
19765c7e6f1SAlexander Shishkin 	pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
19865c7e6f1SAlexander Shishkin 
19965c7e6f1SAlexander Shishkin 	/*
20065c7e6f1SAlexander Shishkin 	 * if available, read in TSC to core crystal clock ratio,
20165c7e6f1SAlexander Shishkin 	 * otherwise, zero for numerator stands for "not enumerated"
20265c7e6f1SAlexander Shishkin 	 * as per SDM
20365c7e6f1SAlexander Shishkin 	 */
20465c7e6f1SAlexander Shishkin 	if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
20565c7e6f1SAlexander Shishkin 		u32 eax, ebx, ecx, edx;
20665c7e6f1SAlexander Shishkin 
20765c7e6f1SAlexander Shishkin 		cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
20865c7e6f1SAlexander Shishkin 
20965c7e6f1SAlexander Shishkin 		pt_pmu.tsc_art_num = ebx;
21065c7e6f1SAlexander Shishkin 		pt_pmu.tsc_art_den = eax;
21165c7e6f1SAlexander Shishkin 	}
21265c7e6f1SAlexander Shishkin 
213d35869baSAlexander Shishkin 	/* model-specific quirks */
214a7011b85STony Luck 	switch (boot_cpu_data.x86_vfm) {
215a7011b85STony Luck 	case INTEL_BROADWELL:
216a7011b85STony Luck 	case INTEL_BROADWELL_D:
217a7011b85STony Luck 	case INTEL_BROADWELL_G:
218a7011b85STony Luck 	case INTEL_BROADWELL_X:
219d35869baSAlexander Shishkin 		/* not setting BRANCH_EN will #GP, erratum BDM106 */
220d35869baSAlexander Shishkin 		pt_pmu.branch_en_always_on = true;
221d35869baSAlexander Shishkin 		break;
222d35869baSAlexander Shishkin 	default:
223d35869baSAlexander Shishkin 		break;
224d35869baSAlexander Shishkin 	}
225d35869baSAlexander Shishkin 
2261c5ac21aSAlexander Shishkin 	if (boot_cpu_has(X86_FEATURE_VMX)) {
2271c5ac21aSAlexander Shishkin 		/*
2281c5ac21aSAlexander Shishkin 		 * Intel SDM, 36.5 "Tracing post-VMXON" says that
2291c5ac21aSAlexander Shishkin 		 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
2301c5ac21aSAlexander Shishkin 		 * post-VMXON.
2311c5ac21aSAlexander Shishkin 		 */
2321c5ac21aSAlexander Shishkin 		rdmsrl(MSR_IA32_VMX_MISC, reg);
2331c5ac21aSAlexander Shishkin 		if (reg & BIT(14))
2341c5ac21aSAlexander Shishkin 			pt_pmu.vmx = true;
2351c5ac21aSAlexander Shishkin 	}
2361c5ac21aSAlexander Shishkin 
237fd1c601cSBorislav Petkov 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
238fd1c601cSBorislav Petkov 		cpuid_count(20, i,
23947f10a36SHe Chen 			    &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
24047f10a36SHe Chen 			    &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
24147f10a36SHe Chen 			    &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
24247f10a36SHe Chen 			    &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
243fd1c601cSBorislav Petkov 	}
244fd1c601cSBorislav Petkov 
245fd1c601cSBorislav Petkov 	ret = -ENOMEM;
246fd1c601cSBorislav Petkov 	size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
247fd1c601cSBorislav Petkov 	attrs = kzalloc(size, GFP_KERNEL);
248fd1c601cSBorislav Petkov 	if (!attrs)
249fd1c601cSBorislav Petkov 		goto fail;
250fd1c601cSBorislav Petkov 
251fd1c601cSBorislav Petkov 	size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
252fd1c601cSBorislav Petkov 	de_attrs = kzalloc(size, GFP_KERNEL);
253fd1c601cSBorislav Petkov 	if (!de_attrs)
254fd1c601cSBorislav Petkov 		goto fail;
255fd1c601cSBorislav Petkov 
256fd1c601cSBorislav Petkov 	for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
257fd1c601cSBorislav Petkov 		struct dev_ext_attribute *de_attr = de_attrs + i;
258fd1c601cSBorislav Petkov 
259fd1c601cSBorislav Petkov 		de_attr->attr.attr.name = pt_caps[i].name;
260fd1c601cSBorislav Petkov 
261fd1c601cSBorislav Petkov 		sysfs_attr_init(&de_attr->attr.attr);
262fd1c601cSBorislav Petkov 
263fd1c601cSBorislav Petkov 		de_attr->attr.attr.mode		= S_IRUGO;
264fd1c601cSBorislav Petkov 		de_attr->attr.show		= pt_cap_show;
265fd1c601cSBorislav Petkov 		de_attr->var			= (void *)i;
266fd1c601cSBorislav Petkov 
267fd1c601cSBorislav Petkov 		attrs[i] = &de_attr->attr.attr;
268fd1c601cSBorislav Petkov 	}
269fd1c601cSBorislav Petkov 
270fd1c601cSBorislav Petkov 	pt_cap_group.attrs = attrs;
271fd1c601cSBorislav Petkov 
272fd1c601cSBorislav Petkov 	return 0;
273fd1c601cSBorislav Petkov 
274fd1c601cSBorislav Petkov fail:
275fd1c601cSBorislav Petkov 	kfree(attrs);
276fd1c601cSBorislav Petkov 
277fd1c601cSBorislav Petkov 	return ret;
278fd1c601cSBorislav Petkov }
279fd1c601cSBorislav Petkov 
280fd1c601cSBorislav Petkov #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC	| \
281fd1c601cSBorislav Petkov 			  RTIT_CTL_CYC_THRESH	| \
282fd1c601cSBorislav Petkov 			  RTIT_CTL_PSB_FREQ)
283fd1c601cSBorislav Petkov 
284fd1c601cSBorislav Petkov #define RTIT_CTL_MTC	(RTIT_CTL_MTC_EN	| \
285fd1c601cSBorislav Petkov 			 RTIT_CTL_MTC_RANGE)
286fd1c601cSBorislav Petkov 
2878ee83b2aSAlexander Shishkin #define RTIT_CTL_PTW	(RTIT_CTL_PTW_EN	| \
2888ee83b2aSAlexander Shishkin 			 RTIT_CTL_FUP_ON_PTW)
2898ee83b2aSAlexander Shishkin 
290d35869baSAlexander Shishkin /*
291d35869baSAlexander Shishkin  * Bit 0 (TraceEn) in the attr.config is meaningless as the
292d35869baSAlexander Shishkin  * corresponding bit in the RTIT_CTL can only be controlled
293d35869baSAlexander Shishkin  * by the driver; therefore, repurpose it to mean: pass
294d35869baSAlexander Shishkin  * through the bit that was previously assumed to be always
295d35869baSAlexander Shishkin  * on for PT, thereby allowing the user to *not* set it if
296d35869baSAlexander Shishkin  * they so wish. See also pt_event_valid() and pt_config().
297d35869baSAlexander Shishkin  */
298d35869baSAlexander Shishkin #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
299d35869baSAlexander Shishkin 
300d35869baSAlexander Shishkin #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN	| \
301d35869baSAlexander Shishkin 			RTIT_CTL_TSC_EN		| \
302fd1c601cSBorislav Petkov 			RTIT_CTL_DISRETC	| \
303d35869baSAlexander Shishkin 			RTIT_CTL_BRANCH_EN	| \
304fd1c601cSBorislav Petkov 			RTIT_CTL_CYC_PSB	| \
3058ee83b2aSAlexander Shishkin 			RTIT_CTL_MTC		| \
3068ee83b2aSAlexander Shishkin 			RTIT_CTL_PWR_EVT_EN	| \
30728c24dedSAlexander Shishkin 			RTIT_CTL_EVENT_EN	| \
308161a9a33SAlexander Shishkin 			RTIT_CTL_NOTNT		| \
3098ee83b2aSAlexander Shishkin 			RTIT_CTL_FUP_ON_PTW	| \
3108ee83b2aSAlexander Shishkin 			RTIT_CTL_PTW_EN)
311fd1c601cSBorislav Petkov 
pt_event_valid(struct perf_event * event)312fd1c601cSBorislav Petkov static bool pt_event_valid(struct perf_event *event)
313fd1c601cSBorislav Petkov {
314fd1c601cSBorislav Petkov 	u64 config = event->attr.config;
315fd1c601cSBorislav Petkov 	u64 allowed, requested;
316fd1c601cSBorislav Petkov 
317fd1c601cSBorislav Petkov 	if ((config & PT_CONFIG_MASK) != config)
318fd1c601cSBorislav Petkov 		return false;
319fd1c601cSBorislav Petkov 
320fd1c601cSBorislav Petkov 	if (config & RTIT_CTL_CYC_PSB) {
321f6d079ceSChao Peng 		if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
322fd1c601cSBorislav Petkov 			return false;
323fd1c601cSBorislav Petkov 
324f6d079ceSChao Peng 		allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
325fd1c601cSBorislav Petkov 		requested = (config & RTIT_CTL_PSB_FREQ) >>
326fd1c601cSBorislav Petkov 			RTIT_CTL_PSB_FREQ_OFFSET;
327fd1c601cSBorislav Petkov 		if (requested && (!(allowed & BIT(requested))))
328fd1c601cSBorislav Petkov 			return false;
329fd1c601cSBorislav Petkov 
330f6d079ceSChao Peng 		allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
331fd1c601cSBorislav Petkov 		requested = (config & RTIT_CTL_CYC_THRESH) >>
332fd1c601cSBorislav Petkov 			RTIT_CTL_CYC_THRESH_OFFSET;
333fd1c601cSBorislav Petkov 		if (requested && (!(allowed & BIT(requested))))
334fd1c601cSBorislav Petkov 			return false;
335fd1c601cSBorislav Petkov 	}
336fd1c601cSBorislav Petkov 
337fd1c601cSBorislav Petkov 	if (config & RTIT_CTL_MTC) {
338fd1c601cSBorislav Petkov 		/*
339fd1c601cSBorislav Petkov 		 * In the unlikely case that CPUID lists valid mtc periods,
340fd1c601cSBorislav Petkov 		 * but not the mtc capability, drop out here.
341fd1c601cSBorislav Petkov 		 *
342fd1c601cSBorislav Petkov 		 * Spec says that setting mtc period bits while mtc bit in
343fd1c601cSBorislav Petkov 		 * CPUID is 0 will #GP, so better safe than sorry.
344fd1c601cSBorislav Petkov 		 */
345f6d079ceSChao Peng 		if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
346fd1c601cSBorislav Petkov 			return false;
347fd1c601cSBorislav Petkov 
348f6d079ceSChao Peng 		allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
349fd1c601cSBorislav Petkov 		if (!allowed)
350fd1c601cSBorislav Petkov 			return false;
351fd1c601cSBorislav Petkov 
352fd1c601cSBorislav Petkov 		requested = (config & RTIT_CTL_MTC_RANGE) >>
353fd1c601cSBorislav Petkov 			RTIT_CTL_MTC_RANGE_OFFSET;
354fd1c601cSBorislav Petkov 
355fd1c601cSBorislav Petkov 		if (!(allowed & BIT(requested)))
356fd1c601cSBorislav Petkov 			return false;
357fd1c601cSBorislav Petkov 	}
358fd1c601cSBorislav Petkov 
3598ee83b2aSAlexander Shishkin 	if (config & RTIT_CTL_PWR_EVT_EN &&
360f6d079ceSChao Peng 	    !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
3618ee83b2aSAlexander Shishkin 		return false;
3628ee83b2aSAlexander Shishkin 
36328c24dedSAlexander Shishkin 	if (config & RTIT_CTL_EVENT_EN &&
36428c24dedSAlexander Shishkin 	    !intel_pt_validate_hw_cap(PT_CAP_event_trace))
36528c24dedSAlexander Shishkin 		return false;
36628c24dedSAlexander Shishkin 
367161a9a33SAlexander Shishkin 	if (config & RTIT_CTL_NOTNT &&
368161a9a33SAlexander Shishkin 	    !intel_pt_validate_hw_cap(PT_CAP_tnt_disable))
369161a9a33SAlexander Shishkin 		return false;
370161a9a33SAlexander Shishkin 
3718ee83b2aSAlexander Shishkin 	if (config & RTIT_CTL_PTW) {
372f6d079ceSChao Peng 		if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
3738ee83b2aSAlexander Shishkin 			return false;
3748ee83b2aSAlexander Shishkin 
3758ee83b2aSAlexander Shishkin 		/* FUPonPTW without PTW doesn't make sense */
3768ee83b2aSAlexander Shishkin 		if ((config & RTIT_CTL_FUP_ON_PTW) &&
3778ee83b2aSAlexander Shishkin 		    !(config & RTIT_CTL_PTW_EN))
3788ee83b2aSAlexander Shishkin 			return false;
3798ee83b2aSAlexander Shishkin 	}
3808ee83b2aSAlexander Shishkin 
381d35869baSAlexander Shishkin 	/*
382d35869baSAlexander Shishkin 	 * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
383d9f6e12fSIngo Molnar 	 * clears the assumption that BranchEn must always be enabled,
384d35869baSAlexander Shishkin 	 * as was the case with the first implementation of PT.
385d35869baSAlexander Shishkin 	 * If this bit is not set, the legacy behavior is preserved
386d35869baSAlexander Shishkin 	 * for compatibility with the older userspace.
387d35869baSAlexander Shishkin 	 *
388d35869baSAlexander Shishkin 	 * Re-using bit 0 for this purpose is fine because it is never
389d35869baSAlexander Shishkin 	 * directly set by the user; previous attempts at setting it in
390d35869baSAlexander Shishkin 	 * the attr.config resulted in -EINVAL.
391d35869baSAlexander Shishkin 	 */
392d35869baSAlexander Shishkin 	if (config & RTIT_CTL_PASSTHROUGH) {
393d35869baSAlexander Shishkin 		/*
394d35869baSAlexander Shishkin 		 * Disallow not setting BRANCH_EN where BRANCH_EN is
395d35869baSAlexander Shishkin 		 * always required.
396d35869baSAlexander Shishkin 		 */
397d35869baSAlexander Shishkin 		if (pt_pmu.branch_en_always_on &&
398d35869baSAlexander Shishkin 		    !(config & RTIT_CTL_BRANCH_EN))
399d35869baSAlexander Shishkin 			return false;
400d35869baSAlexander Shishkin 	} else {
401d35869baSAlexander Shishkin 		/*
402d35869baSAlexander Shishkin 		 * Disallow BRANCH_EN without the PASSTHROUGH.
403d35869baSAlexander Shishkin 		 */
404d35869baSAlexander Shishkin 		if (config & RTIT_CTL_BRANCH_EN)
405d35869baSAlexander Shishkin 			return false;
406d35869baSAlexander Shishkin 	}
407d35869baSAlexander Shishkin 
408fd1c601cSBorislav Petkov 	return true;
409fd1c601cSBorislav Petkov }
410fd1c601cSBorislav Petkov 
411fd1c601cSBorislav Petkov /*
412fd1c601cSBorislav Petkov  * PT configuration helpers
413fd1c601cSBorislav Petkov  * These all are cpu affine and operate on a local PT
414fd1c601cSBorislav Petkov  */
415fd1c601cSBorislav Petkov 
pt_config_start(struct perf_event * event)4168e105a1fSAlexander Shishkin static void pt_config_start(struct perf_event *event)
4178e105a1fSAlexander Shishkin {
4188e105a1fSAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
419*52c3fb1aSPeter Zijlstra 	u64 ctl = event->hw.aux_config;
4208e105a1fSAlexander Shishkin 
4218e105a1fSAlexander Shishkin 	ctl |= RTIT_CTL_TRACEEN;
4228e105a1fSAlexander Shishkin 	if (READ_ONCE(pt->vmx_on))
4238e105a1fSAlexander Shishkin 		perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
4248e105a1fSAlexander Shishkin 	else
4258e105a1fSAlexander Shishkin 		wrmsrl(MSR_IA32_RTIT_CTL, ctl);
4268e105a1fSAlexander Shishkin 
427*52c3fb1aSPeter Zijlstra 	WRITE_ONCE(event->hw.aux_config, ctl);
4288e105a1fSAlexander Shishkin }
4298e105a1fSAlexander Shishkin 
430eadf48caSAlexander Shishkin /* Address ranges and their corresponding msr configuration registers */
431eadf48caSAlexander Shishkin static const struct pt_address_range {
432eadf48caSAlexander Shishkin 	unsigned long	msr_a;
433eadf48caSAlexander Shishkin 	unsigned long	msr_b;
434eadf48caSAlexander Shishkin 	unsigned int	reg_off;
435eadf48caSAlexander Shishkin } pt_address_ranges[] = {
436eadf48caSAlexander Shishkin 	{
437eadf48caSAlexander Shishkin 		.msr_a	 = MSR_IA32_RTIT_ADDR0_A,
438eadf48caSAlexander Shishkin 		.msr_b	 = MSR_IA32_RTIT_ADDR0_B,
439eadf48caSAlexander Shishkin 		.reg_off = RTIT_CTL_ADDR0_OFFSET,
440eadf48caSAlexander Shishkin 	},
441eadf48caSAlexander Shishkin 	{
442eadf48caSAlexander Shishkin 		.msr_a	 = MSR_IA32_RTIT_ADDR1_A,
443eadf48caSAlexander Shishkin 		.msr_b	 = MSR_IA32_RTIT_ADDR1_B,
444eadf48caSAlexander Shishkin 		.reg_off = RTIT_CTL_ADDR1_OFFSET,
445eadf48caSAlexander Shishkin 	},
446eadf48caSAlexander Shishkin 	{
447eadf48caSAlexander Shishkin 		.msr_a	 = MSR_IA32_RTIT_ADDR2_A,
448eadf48caSAlexander Shishkin 		.msr_b	 = MSR_IA32_RTIT_ADDR2_B,
449eadf48caSAlexander Shishkin 		.reg_off = RTIT_CTL_ADDR2_OFFSET,
450eadf48caSAlexander Shishkin 	},
451eadf48caSAlexander Shishkin 	{
452eadf48caSAlexander Shishkin 		.msr_a	 = MSR_IA32_RTIT_ADDR3_A,
453eadf48caSAlexander Shishkin 		.msr_b	 = MSR_IA32_RTIT_ADDR3_B,
454eadf48caSAlexander Shishkin 		.reg_off = RTIT_CTL_ADDR3_OFFSET,
455eadf48caSAlexander Shishkin 	}
456eadf48caSAlexander Shishkin };
457eadf48caSAlexander Shishkin 
pt_config_filters(struct perf_event * event)458eadf48caSAlexander Shishkin static u64 pt_config_filters(struct perf_event *event)
459eadf48caSAlexander Shishkin {
460eadf48caSAlexander Shishkin 	struct pt_filters *filters = event->hw.addr_filters;
461eadf48caSAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
462eadf48caSAlexander Shishkin 	unsigned int range = 0;
463eadf48caSAlexander Shishkin 	u64 rtit_ctl = 0;
464eadf48caSAlexander Shishkin 
465eadf48caSAlexander Shishkin 	if (!filters)
466eadf48caSAlexander Shishkin 		return 0;
467eadf48caSAlexander Shishkin 
468eadf48caSAlexander Shishkin 	perf_event_addr_filters_sync(event);
469eadf48caSAlexander Shishkin 
470eadf48caSAlexander Shishkin 	for (range = 0; range < filters->nr_filters; range++) {
471eadf48caSAlexander Shishkin 		struct pt_filter *filter = &filters->filter[range];
472eadf48caSAlexander Shishkin 
473eadf48caSAlexander Shishkin 		/*
474eadf48caSAlexander Shishkin 		 * Note, if the range has zero start/end addresses due
475eadf48caSAlexander Shishkin 		 * to its dynamic object not being loaded yet, we just
476eadf48caSAlexander Shishkin 		 * go ahead and program zeroed range, which will simply
477eadf48caSAlexander Shishkin 		 * produce no data. Note^2: if executable code at 0x0
478eadf48caSAlexander Shishkin 		 * is a concern, we can set up an "invalid" configuration
479eadf48caSAlexander Shishkin 		 * such as msr_b < msr_a.
480eadf48caSAlexander Shishkin 		 */
481eadf48caSAlexander Shishkin 
482eadf48caSAlexander Shishkin 		/* avoid redundant msr writes */
483eadf48caSAlexander Shishkin 		if (pt->filters.filter[range].msr_a != filter->msr_a) {
484eadf48caSAlexander Shishkin 			wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
485eadf48caSAlexander Shishkin 			pt->filters.filter[range].msr_a = filter->msr_a;
486eadf48caSAlexander Shishkin 		}
487eadf48caSAlexander Shishkin 
488eadf48caSAlexander Shishkin 		if (pt->filters.filter[range].msr_b != filter->msr_b) {
489eadf48caSAlexander Shishkin 			wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
490eadf48caSAlexander Shishkin 			pt->filters.filter[range].msr_b = filter->msr_b;
491eadf48caSAlexander Shishkin 		}
492eadf48caSAlexander Shishkin 
493e5524bf1SAdrian Hunter 		rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
494eadf48caSAlexander Shishkin 	}
495eadf48caSAlexander Shishkin 
496eadf48caSAlexander Shishkin 	return rtit_ctl;
497eadf48caSAlexander Shishkin }
498eadf48caSAlexander Shishkin 
pt_config(struct perf_event * event)499fd1c601cSBorislav Petkov static void pt_config(struct perf_event *event)
500fd1c601cSBorislav Petkov {
50167063847SAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
50267063847SAlexander Shishkin 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
503fd1c601cSBorislav Petkov 	u64 reg;
504fd1c601cSBorislav Petkov 
5058d4e6c4cSAlexander Shishkin 	/* First round: clear STATUS, in particular the PSB byte counter. */
506*52c3fb1aSPeter Zijlstra 	if (!event->hw.aux_config) {
5078d4e6c4cSAlexander Shishkin 		perf_event_itrace_started(event);
508fd1c601cSBorislav Petkov 		wrmsrl(MSR_IA32_RTIT_STATUS, 0);
509fd1c601cSBorislav Petkov 	}
510fd1c601cSBorislav Petkov 
511eadf48caSAlexander Shishkin 	reg = pt_config_filters(event);
51267063847SAlexander Shishkin 	reg |= RTIT_CTL_TRACEEN;
51367063847SAlexander Shishkin 	if (!buf->single)
51467063847SAlexander Shishkin 		reg |= RTIT_CTL_TOPA;
515d35869baSAlexander Shishkin 
516d35869baSAlexander Shishkin 	/*
517d35869baSAlexander Shishkin 	 * Previously, we had BRANCH_EN on by default, but now that PT has
518d35869baSAlexander Shishkin 	 * grown features outside of branch tracing, it is useful to allow
519d35869baSAlexander Shishkin 	 * the user to disable it. Setting bit 0 in the event's attr.config
520d35869baSAlexander Shishkin 	 * allows BRANCH_EN to pass through instead of being always on. See
521d35869baSAlexander Shishkin 	 * also the comment in pt_event_valid().
522d35869baSAlexander Shishkin 	 */
523d35869baSAlexander Shishkin 	if (event->attr.config & BIT(0)) {
524d35869baSAlexander Shishkin 		reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
525d35869baSAlexander Shishkin 	} else {
526d35869baSAlexander Shishkin 		reg |= RTIT_CTL_BRANCH_EN;
527d35869baSAlexander Shishkin 	}
528fd1c601cSBorislav Petkov 
529fd1c601cSBorislav Petkov 	if (!event->attr.exclude_kernel)
530fd1c601cSBorislav Petkov 		reg |= RTIT_CTL_OS;
531fd1c601cSBorislav Petkov 	if (!event->attr.exclude_user)
532fd1c601cSBorislav Petkov 		reg |= RTIT_CTL_USR;
533fd1c601cSBorislav Petkov 
534fd1c601cSBorislav Petkov 	reg |= (event->attr.config & PT_CONFIG_MASK);
535fd1c601cSBorislav Petkov 
536*52c3fb1aSPeter Zijlstra 	event->hw.aux_config = reg;
5378e105a1fSAlexander Shishkin 	pt_config_start(event);
538fd1c601cSBorislav Petkov }
539fd1c601cSBorislav Petkov 
pt_config_stop(struct perf_event * event)5401c5ac21aSAlexander Shishkin static void pt_config_stop(struct perf_event *event)
541fd1c601cSBorislav Petkov {
542ee368428SAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
543*52c3fb1aSPeter Zijlstra 	u64 ctl = READ_ONCE(event->hw.aux_config);
544fd1c601cSBorislav Petkov 
5451c5ac21aSAlexander Shishkin 	/* may be already stopped by a PMI */
5461c5ac21aSAlexander Shishkin 	if (!(ctl & RTIT_CTL_TRACEEN))
5471c5ac21aSAlexander Shishkin 		return;
5481c5ac21aSAlexander Shishkin 
549fd1c601cSBorislav Petkov 	ctl &= ~RTIT_CTL_TRACEEN;
550ee368428SAlexander Shishkin 	if (!READ_ONCE(pt->vmx_on))
551fd1c601cSBorislav Petkov 		wrmsrl(MSR_IA32_RTIT_CTL, ctl);
552fd1c601cSBorislav Petkov 
553*52c3fb1aSPeter Zijlstra 	WRITE_ONCE(event->hw.aux_config, ctl);
5541c5ac21aSAlexander Shishkin 
555fd1c601cSBorislav Petkov 	/*
556fd1c601cSBorislav Petkov 	 * A wrmsr that disables trace generation serializes other PT
557fd1c601cSBorislav Petkov 	 * registers and causes all data packets to be written to memory,
558fd1c601cSBorislav Petkov 	 * but a fence is required for the data to become globally visible.
559fd1c601cSBorislav Petkov 	 *
560fd1c601cSBorislav Petkov 	 * The below WMB, separating data store and aux_head store matches
561fd1c601cSBorislav Petkov 	 * the consumer's RMB that separates aux_head load and data load.
562fd1c601cSBorislav Petkov 	 */
563fd1c601cSBorislav Petkov 	wmb();
564fd1c601cSBorislav Petkov }
565fd1c601cSBorislav Petkov 
566fd1c601cSBorislav Petkov /**
56738bb8d77SAlexander Shishkin  * struct topa - ToPA metadata
568fd1c601cSBorislav Petkov  * @list:	linkage to struct pt_buffer's list of tables
569fd1c601cSBorislav Petkov  * @offset:	offset of the first entry in this table in the buffer
570fd1c601cSBorislav Petkov  * @size:	total size of all entries in this table
571fd1c601cSBorislav Petkov  * @last:	index of the last initialized entry in this table
57239152ee5SAlexander Shishkin  * @z_count:	how many times the first entry repeats
573fd1c601cSBorislav Petkov  */
574fd1c601cSBorislav Petkov struct topa {
575fd1c601cSBorislav Petkov 	struct list_head	list;
576fd1c601cSBorislav Petkov 	u64			offset;
577fd1c601cSBorislav Petkov 	size_t			size;
578fd1c601cSBorislav Petkov 	int			last;
57939152ee5SAlexander Shishkin 	unsigned int		z_count;
580fd1c601cSBorislav Petkov };
581fd1c601cSBorislav Petkov 
58238bb8d77SAlexander Shishkin /*
58338bb8d77SAlexander Shishkin  * Keep ToPA table-related metadata on the same page as the actual table,
58438bb8d77SAlexander Shishkin  * taking up a few words from the top
58538bb8d77SAlexander Shishkin  */
58638bb8d77SAlexander Shishkin 
58738bb8d77SAlexander Shishkin #define TENTS_PER_PAGE	\
58838bb8d77SAlexander Shishkin 	((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
58938bb8d77SAlexander Shishkin 
59038bb8d77SAlexander Shishkin /**
59138bb8d77SAlexander Shishkin  * struct topa_page - page-sized ToPA table with metadata at the top
59238bb8d77SAlexander Shishkin  * @table:	actual ToPA table entries, as understood by PT hardware
59338bb8d77SAlexander Shishkin  * @topa:	metadata
59438bb8d77SAlexander Shishkin  */
59538bb8d77SAlexander Shishkin struct topa_page {
59638bb8d77SAlexander Shishkin 	struct topa_entry	table[TENTS_PER_PAGE];
59738bb8d77SAlexander Shishkin 	struct topa		topa;
59838bb8d77SAlexander Shishkin };
59938bb8d77SAlexander Shishkin 
topa_to_page(struct topa * topa)60038bb8d77SAlexander Shishkin static inline struct topa_page *topa_to_page(struct topa *topa)
60138bb8d77SAlexander Shishkin {
60238bb8d77SAlexander Shishkin 	return container_of(topa, struct topa_page, topa);
60338bb8d77SAlexander Shishkin }
60438bb8d77SAlexander Shishkin 
topa_entry_to_page(struct topa_entry * te)60538bb8d77SAlexander Shishkin static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
60638bb8d77SAlexander Shishkin {
60738bb8d77SAlexander Shishkin 	return (struct topa_page *)((unsigned long)te & PAGE_MASK);
60838bb8d77SAlexander Shishkin }
60938bb8d77SAlexander Shishkin 
topa_pfn(struct topa * topa)61091feca5eSAlexander Shishkin static inline phys_addr_t topa_pfn(struct topa *topa)
61191feca5eSAlexander Shishkin {
61291feca5eSAlexander Shishkin 	return PFN_DOWN(virt_to_phys(topa_to_page(topa)));
61391feca5eSAlexander Shishkin }
61491feca5eSAlexander Shishkin 
615fd1c601cSBorislav Petkov /* make -1 stand for the last table entry */
61638bb8d77SAlexander Shishkin #define TOPA_ENTRY(t, i)				\
61738bb8d77SAlexander Shishkin 	((i) == -1					\
61838bb8d77SAlexander Shishkin 		? &topa_to_page(t)->table[(t)->last]	\
61938bb8d77SAlexander Shishkin 		: &topa_to_page(t)->table[(i)])
620fffec50fSAlexander Shishkin #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
62139152ee5SAlexander Shishkin #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
622fd1c601cSBorislav Petkov 
pt_config_buffer(struct pt_buffer * buf)62367063847SAlexander Shishkin static void pt_config_buffer(struct pt_buffer *buf)
62467063847SAlexander Shishkin {
625295c52eeSAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
62667063847SAlexander Shishkin 	u64 reg, mask;
62767063847SAlexander Shishkin 	void *base;
62867063847SAlexander Shishkin 
62967063847SAlexander Shishkin 	if (buf->single) {
63067063847SAlexander Shishkin 		base = buf->data_pages[0];
63167063847SAlexander Shishkin 		mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7;
63267063847SAlexander Shishkin 	} else {
63367063847SAlexander Shishkin 		base = topa_to_page(buf->cur)->table;
63467063847SAlexander Shishkin 		mask = (u64)buf->cur_idx;
63567063847SAlexander Shishkin 	}
63667063847SAlexander Shishkin 
637295c52eeSAlexander Shishkin 	reg = virt_to_phys(base);
638295c52eeSAlexander Shishkin 	if (pt->output_base != reg) {
639295c52eeSAlexander Shishkin 		pt->output_base = reg;
640295c52eeSAlexander Shishkin 		wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg);
641295c52eeSAlexander Shishkin 	}
64267063847SAlexander Shishkin 
64367063847SAlexander Shishkin 	reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32);
644295c52eeSAlexander Shishkin 	if (pt->output_mask != reg) {
645295c52eeSAlexander Shishkin 		pt->output_mask = reg;
64667063847SAlexander Shishkin 		wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
64767063847SAlexander Shishkin 	}
648295c52eeSAlexander Shishkin }
64967063847SAlexander Shishkin 
650fd1c601cSBorislav Petkov /**
651fd1c601cSBorislav Petkov  * topa_alloc() - allocate page-sized ToPA table
652fd1c601cSBorislav Petkov  * @cpu:	CPU on which to allocate.
653fd1c601cSBorislav Petkov  * @gfp:	Allocation flags.
654fd1c601cSBorislav Petkov  *
655fd1c601cSBorislav Petkov  * Return:	On success, return the pointer to ToPA table page.
656fd1c601cSBorislav Petkov  */
topa_alloc(int cpu,gfp_t gfp)657fd1c601cSBorislav Petkov static struct topa *topa_alloc(int cpu, gfp_t gfp)
658fd1c601cSBorislav Petkov {
659fd1c601cSBorislav Petkov 	int node = cpu_to_node(cpu);
66038bb8d77SAlexander Shishkin 	struct topa_page *tp;
661fd1c601cSBorislav Petkov 	struct page *p;
662fd1c601cSBorislav Petkov 
663fd1c601cSBorislav Petkov 	p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
664fd1c601cSBorislav Petkov 	if (!p)
665fd1c601cSBorislav Petkov 		return NULL;
666fd1c601cSBorislav Petkov 
66738bb8d77SAlexander Shishkin 	tp = page_address(p);
66838bb8d77SAlexander Shishkin 	tp->topa.last = 0;
669fd1c601cSBorislav Petkov 
670fd1c601cSBorislav Petkov 	/*
671fd1c601cSBorislav Petkov 	 * In case of singe-entry ToPA, always put the self-referencing END
672fd1c601cSBorislav Petkov 	 * link as the 2nd entry in the table
673fd1c601cSBorislav Petkov 	 */
674f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
67513301c6bSJiri Olsa 		TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
67638bb8d77SAlexander Shishkin 		TOPA_ENTRY(&tp->topa, 1)->end = 1;
677fd1c601cSBorislav Petkov 	}
678fd1c601cSBorislav Petkov 
67938bb8d77SAlexander Shishkin 	return &tp->topa;
680fd1c601cSBorislav Petkov }
681fd1c601cSBorislav Petkov 
682fd1c601cSBorislav Petkov /**
683fd1c601cSBorislav Petkov  * topa_free() - free a page-sized ToPA table
684fd1c601cSBorislav Petkov  * @topa:	Table to deallocate.
685fd1c601cSBorislav Petkov  */
topa_free(struct topa * topa)686fd1c601cSBorislav Petkov static void topa_free(struct topa *topa)
687fd1c601cSBorislav Petkov {
688fd1c601cSBorislav Petkov 	free_page((unsigned long)topa);
689fd1c601cSBorislav Petkov }
690fd1c601cSBorislav Petkov 
691fd1c601cSBorislav Petkov /**
692fd1c601cSBorislav Petkov  * topa_insert_table() - insert a ToPA table into a buffer
693fd1c601cSBorislav Petkov  * @buf:	 PT buffer that's being extended.
694fd1c601cSBorislav Petkov  * @topa:	 New topa table to be inserted.
695fd1c601cSBorislav Petkov  *
696fd1c601cSBorislav Petkov  * If it's the first table in this buffer, set up buffer's pointers
697fd1c601cSBorislav Petkov  * accordingly; otherwise, add a END=1 link entry to @topa to the current
698fd1c601cSBorislav Petkov  * "last" table and adjust the last table pointer to @topa.
699fd1c601cSBorislav Petkov  */
topa_insert_table(struct pt_buffer * buf,struct topa * topa)700fd1c601cSBorislav Petkov static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
701fd1c601cSBorislav Petkov {
702fd1c601cSBorislav Petkov 	struct topa *last = buf->last;
703fd1c601cSBorislav Petkov 
704fd1c601cSBorislav Petkov 	list_add_tail(&topa->list, &buf->tables);
705fd1c601cSBorislav Petkov 
706fd1c601cSBorislav Petkov 	if (!buf->first) {
707fd1c601cSBorislav Petkov 		buf->first = buf->last = buf->cur = topa;
708fd1c601cSBorislav Petkov 		return;
709fd1c601cSBorislav Petkov 	}
710fd1c601cSBorislav Petkov 
711fd1c601cSBorislav Petkov 	topa->offset = last->offset + last->size;
712fd1c601cSBorislav Petkov 	buf->last = topa;
713fd1c601cSBorislav Petkov 
714f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
715fd1c601cSBorislav Petkov 		return;
716fd1c601cSBorislav Petkov 
717fd1c601cSBorislav Petkov 	BUG_ON(last->last != TENTS_PER_PAGE - 1);
718fd1c601cSBorislav Petkov 
71991feca5eSAlexander Shishkin 	TOPA_ENTRY(last, -1)->base = topa_pfn(topa);
720fd1c601cSBorislav Petkov 	TOPA_ENTRY(last, -1)->end = 1;
721fd1c601cSBorislav Petkov }
722fd1c601cSBorislav Petkov 
723fd1c601cSBorislav Petkov /**
724fd1c601cSBorislav Petkov  * topa_table_full() - check if a ToPA table is filled up
725fd1c601cSBorislav Petkov  * @topa:	ToPA table.
726fd1c601cSBorislav Petkov  */
topa_table_full(struct topa * topa)727fd1c601cSBorislav Petkov static bool topa_table_full(struct topa *topa)
728fd1c601cSBorislav Petkov {
729fd1c601cSBorislav Petkov 	/* single-entry ToPA is a special case */
730f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
731fd1c601cSBorislav Petkov 		return !!topa->last;
732fd1c601cSBorislav Petkov 
733fd1c601cSBorislav Petkov 	return topa->last == TENTS_PER_PAGE - 1;
734fd1c601cSBorislav Petkov }
735fd1c601cSBorislav Petkov 
736fd1c601cSBorislav Petkov /**
737fd1c601cSBorislav Petkov  * topa_insert_pages() - create a list of ToPA tables
738fd1c601cSBorislav Petkov  * @buf:	PT buffer being initialized.
73938cd5b6aSLucy Mielke  * @cpu:	CPU on which to allocate.
740fd1c601cSBorislav Petkov  * @gfp:	Allocation flags.
741fd1c601cSBorislav Petkov  *
742fd1c601cSBorislav Petkov  * This initializes a list of ToPA tables with entries from
743fd1c601cSBorislav Petkov  * the data_pages provided by rb_alloc_aux().
744fd1c601cSBorislav Petkov  *
745fd1c601cSBorislav Petkov  * Return:	0 on success or error code.
746fd1c601cSBorislav Petkov  */
topa_insert_pages(struct pt_buffer * buf,int cpu,gfp_t gfp)74790583af6SAlexander Shishkin static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
748fd1c601cSBorislav Petkov {
749fd1c601cSBorislav Petkov 	struct topa *topa = buf->last;
750fd1c601cSBorislav Petkov 	int order = 0;
751fd1c601cSBorislav Petkov 	struct page *p;
752fd1c601cSBorislav Petkov 
753fd1c601cSBorislav Petkov 	p = virt_to_page(buf->data_pages[buf->nr_pages]);
754fd1c601cSBorislav Petkov 	if (PagePrivate(p))
755fd1c601cSBorislav Petkov 		order = page_private(p);
756fd1c601cSBorislav Petkov 
757fd1c601cSBorislav Petkov 	if (topa_table_full(topa)) {
75890583af6SAlexander Shishkin 		topa = topa_alloc(cpu, gfp);
759fd1c601cSBorislav Petkov 		if (!topa)
760fd1c601cSBorislav Petkov 			return -ENOMEM;
761fd1c601cSBorislav Petkov 
762fd1c601cSBorislav Petkov 		topa_insert_table(buf, topa);
763fd1c601cSBorislav Petkov 	}
764fd1c601cSBorislav Petkov 
76539152ee5SAlexander Shishkin 	if (topa->z_count == topa->last - 1) {
76639152ee5SAlexander Shishkin 		if (order == TOPA_ENTRY(topa, topa->last - 1)->size)
76739152ee5SAlexander Shishkin 			topa->z_count++;
76839152ee5SAlexander Shishkin 	}
76939152ee5SAlexander Shishkin 
770fd1c601cSBorislav Petkov 	TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
771fd1c601cSBorislav Petkov 	TOPA_ENTRY(topa, -1)->size = order;
772f6d079ceSChao Peng 	if (!buf->snapshot &&
773f6d079ceSChao Peng 	    !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
774fd1c601cSBorislav Petkov 		TOPA_ENTRY(topa, -1)->intr = 1;
775fd1c601cSBorislav Petkov 		TOPA_ENTRY(topa, -1)->stop = 1;
776fd1c601cSBorislav Petkov 	}
777fd1c601cSBorislav Petkov 
778fd1c601cSBorislav Petkov 	topa->last++;
779fd1c601cSBorislav Petkov 	topa->size += sizes(order);
780fd1c601cSBorislav Petkov 
781fd1c601cSBorislav Petkov 	buf->nr_pages += 1ul << order;
782fd1c601cSBorislav Petkov 
783fd1c601cSBorislav Petkov 	return 0;
784fd1c601cSBorislav Petkov }
785fd1c601cSBorislav Petkov 
786fd1c601cSBorislav Petkov /**
787fd1c601cSBorislav Petkov  * pt_topa_dump() - print ToPA tables and their entries
788fd1c601cSBorislav Petkov  * @buf:	PT buffer.
789fd1c601cSBorislav Petkov  */
pt_topa_dump(struct pt_buffer * buf)790fd1c601cSBorislav Petkov static void pt_topa_dump(struct pt_buffer *buf)
791fd1c601cSBorislav Petkov {
792fd1c601cSBorislav Petkov 	struct topa *topa;
793fd1c601cSBorislav Petkov 
794fd1c601cSBorislav Petkov 	list_for_each_entry(topa, &buf->tables, list) {
79538bb8d77SAlexander Shishkin 		struct topa_page *tp = topa_to_page(topa);
796fd1c601cSBorislav Petkov 		int i;
797fd1c601cSBorislav Petkov 
79891feca5eSAlexander Shishkin 		pr_debug("# table @%p, off %llx size %zx\n", tp->table,
79991feca5eSAlexander Shishkin 			 topa->offset, topa->size);
800fd1c601cSBorislav Petkov 		for (i = 0; i < TENTS_PER_PAGE; i++) {
801fd1c601cSBorislav Petkov 			pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
80238bb8d77SAlexander Shishkin 				 &tp->table[i],
80338bb8d77SAlexander Shishkin 				 (unsigned long)tp->table[i].base << TOPA_SHIFT,
80438bb8d77SAlexander Shishkin 				 sizes(tp->table[i].size),
80538bb8d77SAlexander Shishkin 				 tp->table[i].end ?  'E' : ' ',
80638bb8d77SAlexander Shishkin 				 tp->table[i].intr ? 'I' : ' ',
80738bb8d77SAlexander Shishkin 				 tp->table[i].stop ? 'S' : ' ',
80838bb8d77SAlexander Shishkin 				 *(u64 *)&tp->table[i]);
809f6d079ceSChao Peng 			if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
81038bb8d77SAlexander Shishkin 			     tp->table[i].stop) ||
81138bb8d77SAlexander Shishkin 			    tp->table[i].end)
812fd1c601cSBorislav Petkov 				break;
81339152ee5SAlexander Shishkin 			if (!i && topa->z_count)
81439152ee5SAlexander Shishkin 				i += topa->z_count;
815fd1c601cSBorislav Petkov 		}
816fd1c601cSBorislav Petkov 	}
817fd1c601cSBorislav Petkov }
818fd1c601cSBorislav Petkov 
819fd1c601cSBorislav Petkov /**
820fd1c601cSBorislav Petkov  * pt_buffer_advance() - advance to the next output region
821fd1c601cSBorislav Petkov  * @buf:	PT buffer.
822fd1c601cSBorislav Petkov  *
823fd1c601cSBorislav Petkov  * Advance the current pointers in the buffer to the next ToPA entry.
824fd1c601cSBorislav Petkov  */
pt_buffer_advance(struct pt_buffer * buf)825fd1c601cSBorislav Petkov static void pt_buffer_advance(struct pt_buffer *buf)
826fd1c601cSBorislav Petkov {
827fd1c601cSBorislav Petkov 	buf->output_off = 0;
828fd1c601cSBorislav Petkov 	buf->cur_idx++;
829fd1c601cSBorislav Petkov 
830fd1c601cSBorislav Petkov 	if (buf->cur_idx == buf->cur->last) {
831fd1c601cSBorislav Petkov 		if (buf->cur == buf->last)
832fd1c601cSBorislav Petkov 			buf->cur = buf->first;
833fd1c601cSBorislav Petkov 		else
834fd1c601cSBorislav Petkov 			buf->cur = list_entry(buf->cur->list.next, struct topa,
835fd1c601cSBorislav Petkov 					      list);
836fd1c601cSBorislav Petkov 		buf->cur_idx = 0;
837fd1c601cSBorislav Petkov 	}
838fd1c601cSBorislav Petkov }
839fd1c601cSBorislav Petkov 
840fd1c601cSBorislav Petkov /**
841fd1c601cSBorislav Petkov  * pt_update_head() - calculate current offsets and sizes
842fd1c601cSBorislav Petkov  * @pt:		Per-cpu pt context.
843fd1c601cSBorislav Petkov  *
844fd1c601cSBorislav Petkov  * Update buffer's current write pointer position and data size.
845fd1c601cSBorislav Petkov  */
pt_update_head(struct pt * pt)846fd1c601cSBorislav Petkov static void pt_update_head(struct pt *pt)
847fd1c601cSBorislav Petkov {
848fd1c601cSBorislav Petkov 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
849fd1c601cSBorislav Petkov 	u64 topa_idx, base, old;
850fd1c601cSBorislav Petkov 
85167063847SAlexander Shishkin 	if (buf->single) {
85267063847SAlexander Shishkin 		local_set(&buf->data_size, buf->output_off);
85367063847SAlexander Shishkin 		return;
85467063847SAlexander Shishkin 	}
85567063847SAlexander Shishkin 
856fd1c601cSBorislav Petkov 	/* offset of the first region in this table from the beginning of buf */
857fd1c601cSBorislav Petkov 	base = buf->cur->offset + buf->output_off;
858fd1c601cSBorislav Petkov 
859fd1c601cSBorislav Petkov 	/* offset of the current output region within this table */
860fd1c601cSBorislav Petkov 	for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
861fffec50fSAlexander Shishkin 		base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
862fd1c601cSBorislav Petkov 
863fd1c601cSBorislav Petkov 	if (buf->snapshot) {
864fd1c601cSBorislav Petkov 		local_set(&buf->data_size, base);
865fd1c601cSBorislav Petkov 	} else {
866fd1c601cSBorislav Petkov 		old = (local64_xchg(&buf->head, base) &
867fd1c601cSBorislav Petkov 		       ((buf->nr_pages << PAGE_SHIFT) - 1));
868fd1c601cSBorislav Petkov 		if (base < old)
869fd1c601cSBorislav Petkov 			base += buf->nr_pages << PAGE_SHIFT;
870fd1c601cSBorislav Petkov 
871fd1c601cSBorislav Petkov 		local_add(base - old, &buf->data_size);
872fd1c601cSBorislav Petkov 	}
873fd1c601cSBorislav Petkov }
874fd1c601cSBorislav Petkov 
875fd1c601cSBorislav Petkov /**
876fd1c601cSBorislav Petkov  * pt_buffer_region() - obtain current output region's address
877fd1c601cSBorislav Petkov  * @buf:	PT buffer.
878fd1c601cSBorislav Petkov  */
pt_buffer_region(struct pt_buffer * buf)879fd1c601cSBorislav Petkov static void *pt_buffer_region(struct pt_buffer *buf)
880fd1c601cSBorislav Petkov {
881ad971963SAdrian Hunter 	return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
882fd1c601cSBorislav Petkov }
883fd1c601cSBorislav Petkov 
884fd1c601cSBorislav Petkov /**
885fd1c601cSBorislav Petkov  * pt_buffer_region_size() - obtain current output region's size
886fd1c601cSBorislav Petkov  * @buf:	PT buffer.
887fd1c601cSBorislav Petkov  */
pt_buffer_region_size(struct pt_buffer * buf)888fd1c601cSBorislav Petkov static size_t pt_buffer_region_size(struct pt_buffer *buf)
889fd1c601cSBorislav Petkov {
890fffec50fSAlexander Shishkin 	return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
891fd1c601cSBorislav Petkov }
892fd1c601cSBorislav Petkov 
893fd1c601cSBorislav Petkov /**
894fd1c601cSBorislav Petkov  * pt_handle_status() - take care of possible status conditions
895fd1c601cSBorislav Petkov  * @pt:		Per-cpu pt context.
896fd1c601cSBorislav Petkov  */
pt_handle_status(struct pt * pt)897fd1c601cSBorislav Petkov static void pt_handle_status(struct pt *pt)
898fd1c601cSBorislav Petkov {
899fd1c601cSBorislav Petkov 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
900fd1c601cSBorislav Petkov 	int advance = 0;
901fd1c601cSBorislav Petkov 	u64 status;
902fd1c601cSBorislav Petkov 
903fd1c601cSBorislav Petkov 	rdmsrl(MSR_IA32_RTIT_STATUS, status);
904fd1c601cSBorislav Petkov 
905fd1c601cSBorislav Petkov 	if (status & RTIT_STATUS_ERROR) {
906fd1c601cSBorislav Petkov 		pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
907fd1c601cSBorislav Petkov 		pt_topa_dump(buf);
908fd1c601cSBorislav Petkov 		status &= ~RTIT_STATUS_ERROR;
909fd1c601cSBorislav Petkov 	}
910fd1c601cSBorislav Petkov 
911fd1c601cSBorislav Petkov 	if (status & RTIT_STATUS_STOPPED) {
912fd1c601cSBorislav Petkov 		status &= ~RTIT_STATUS_STOPPED;
913fd1c601cSBorislav Petkov 
914fd1c601cSBorislav Petkov 		/*
915fd1c601cSBorislav Petkov 		 * On systems that only do single-entry ToPA, hitting STOP
916fd1c601cSBorislav Petkov 		 * means we are already losing data; need to let the decoder
917fd1c601cSBorislav Petkov 		 * know.
918fd1c601cSBorislav Petkov 		 */
9191d909345STristan Hume 		if (!buf->single &&
9201d909345STristan Hume 		    (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
9211d909345STristan Hume 		     buf->output_off == pt_buffer_region_size(buf))) {
922f4c0b0aaSWill Deacon 			perf_aux_output_flag(&pt->handle,
923f4c0b0aaSWill Deacon 			                     PERF_AUX_FLAG_TRUNCATED);
924fd1c601cSBorislav Petkov 			advance++;
925fd1c601cSBorislav Petkov 		}
926fd1c601cSBorislav Petkov 	}
927fd1c601cSBorislav Petkov 
928fd1c601cSBorislav Petkov 	/*
929fd1c601cSBorislav Petkov 	 * Also on single-entry ToPA implementations, interrupt will come
930fd1c601cSBorislav Petkov 	 * before the output reaches its output region's boundary.
931fd1c601cSBorislav Petkov 	 */
932f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
933f6d079ceSChao Peng 	    !buf->snapshot &&
934fd1c601cSBorislav Petkov 	    pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
935fd1c601cSBorislav Petkov 		void *head = pt_buffer_region(buf);
936fd1c601cSBorislav Petkov 
937fd1c601cSBorislav Petkov 		/* everything within this margin needs to be zeroed out */
938fd1c601cSBorislav Petkov 		memset(head + buf->output_off, 0,
939fd1c601cSBorislav Petkov 		       pt_buffer_region_size(buf) -
940fd1c601cSBorislav Petkov 		       buf->output_off);
941fd1c601cSBorislav Petkov 		advance++;
942fd1c601cSBorislav Petkov 	}
943fd1c601cSBorislav Petkov 
944fd1c601cSBorislav Petkov 	if (advance)
945fd1c601cSBorislav Petkov 		pt_buffer_advance(buf);
946fd1c601cSBorislav Petkov 
947fd1c601cSBorislav Petkov 	wrmsrl(MSR_IA32_RTIT_STATUS, status);
948fd1c601cSBorislav Petkov }
949fd1c601cSBorislav Petkov 
950fd1c601cSBorislav Petkov /**
951fd1c601cSBorislav Petkov  * pt_read_offset() - translate registers into buffer pointers
952fd1c601cSBorislav Petkov  * @buf:	PT buffer.
953fd1c601cSBorislav Petkov  *
954fd1c601cSBorislav Petkov  * Set buffer's output pointers from MSR values.
955fd1c601cSBorislav Petkov  */
pt_read_offset(struct pt_buffer * buf)956fd1c601cSBorislav Petkov static void pt_read_offset(struct pt_buffer *buf)
957fd1c601cSBorislav Petkov {
958295c52eeSAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
95938bb8d77SAlexander Shishkin 	struct topa_page *tp;
960fd1c601cSBorislav Petkov 
96167063847SAlexander Shishkin 	if (!buf->single) {
962295c52eeSAlexander Shishkin 		rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
963295c52eeSAlexander Shishkin 		tp = phys_to_virt(pt->output_base);
96438bb8d77SAlexander Shishkin 		buf->cur = &tp->topa;
96567063847SAlexander Shishkin 	}
966fd1c601cSBorislav Petkov 
967295c52eeSAlexander Shishkin 	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
968fd1c601cSBorislav Petkov 	/* offset within current output region */
969295c52eeSAlexander Shishkin 	buf->output_off = pt->output_mask >> 32;
970fd1c601cSBorislav Petkov 	/* index of current output region within this table */
97167063847SAlexander Shishkin 	if (!buf->single)
972295c52eeSAlexander Shishkin 		buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7;
973fd1c601cSBorislav Petkov }
974fd1c601cSBorislav Petkov 
97539152ee5SAlexander Shishkin static struct topa_entry *
pt_topa_entry_for_page(struct pt_buffer * buf,unsigned int pg)97639152ee5SAlexander Shishkin pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
977fd1c601cSBorislav Petkov {
97839152ee5SAlexander Shishkin 	struct topa_page *tp;
97939152ee5SAlexander Shishkin 	struct topa *topa;
98039152ee5SAlexander Shishkin 	unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0;
981fd1c601cSBorislav Petkov 
98239152ee5SAlexander Shishkin 	/*
98339152ee5SAlexander Shishkin 	 * Indicates a bug in the caller.
98439152ee5SAlexander Shishkin 	 */
98539152ee5SAlexander Shishkin 	if (WARN_ON_ONCE(pg >= buf->nr_pages))
98639152ee5SAlexander Shishkin 		return NULL;
987fd1c601cSBorislav Petkov 
98839152ee5SAlexander Shishkin 	/*
98939152ee5SAlexander Shishkin 	 * First, find the ToPA table where @pg fits. With high
99039152ee5SAlexander Shishkin 	 * order allocations, there shouldn't be many of these.
99139152ee5SAlexander Shishkin 	 */
99239152ee5SAlexander Shishkin 	list_for_each_entry(topa, &buf->tables, list) {
9933520b251SAdrian Hunter 		if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT)
99439152ee5SAlexander Shishkin 			goto found;
99539152ee5SAlexander Shishkin 	}
996fd1c601cSBorislav Petkov 
99739152ee5SAlexander Shishkin 	/*
99839152ee5SAlexander Shishkin 	 * Hitting this means we have a problem in the ToPA
99939152ee5SAlexander Shishkin 	 * allocation code.
100039152ee5SAlexander Shishkin 	 */
100139152ee5SAlexander Shishkin 	WARN_ON_ONCE(1);
100239152ee5SAlexander Shishkin 
100339152ee5SAlexander Shishkin 	return NULL;
100439152ee5SAlexander Shishkin 
100539152ee5SAlexander Shishkin found:
100639152ee5SAlexander Shishkin 	/*
100739152ee5SAlexander Shishkin 	 * Indicates a problem in the ToPA allocation code.
100839152ee5SAlexander Shishkin 	 */
100939152ee5SAlexander Shishkin 	if (WARN_ON_ONCE(topa->last == -1))
101039152ee5SAlexander Shishkin 		return NULL;
101139152ee5SAlexander Shishkin 
101239152ee5SAlexander Shishkin 	tp = topa_to_page(topa);
101339152ee5SAlexander Shishkin 	cur_pg = PFN_DOWN(topa->offset);
101439152ee5SAlexander Shishkin 	if (topa->z_count) {
101539152ee5SAlexander Shishkin 		z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1);
101639152ee5SAlexander Shishkin 		start_idx = topa->z_count + 1;
101739152ee5SAlexander Shishkin 	}
101839152ee5SAlexander Shishkin 
101939152ee5SAlexander Shishkin 	/*
102039152ee5SAlexander Shishkin 	 * Multiple entries at the beginning of the table have the same size,
102139152ee5SAlexander Shishkin 	 * ideally all of them; if @pg falls there, the search is done.
102239152ee5SAlexander Shishkin 	 */
102339152ee5SAlexander Shishkin 	if (pg >= cur_pg && pg < cur_pg + z_pg) {
102439152ee5SAlexander Shishkin 		idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0);
102539152ee5SAlexander Shishkin 		return &tp->table[idx];
102639152ee5SAlexander Shishkin 	}
102739152ee5SAlexander Shishkin 
102839152ee5SAlexander Shishkin 	/*
102939152ee5SAlexander Shishkin 	 * Otherwise, slow path: iterate through the remaining entries.
103039152ee5SAlexander Shishkin 	 */
103139152ee5SAlexander Shishkin 	for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) {
103239152ee5SAlexander Shishkin 		if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg)
103339152ee5SAlexander Shishkin 			return &tp->table[idx];
103439152ee5SAlexander Shishkin 
103539152ee5SAlexander Shishkin 		cur_pg += TOPA_ENTRY_PAGES(topa, idx);
103639152ee5SAlexander Shishkin 	}
103739152ee5SAlexander Shishkin 
103839152ee5SAlexander Shishkin 	/*
103939152ee5SAlexander Shishkin 	 * Means we couldn't find a ToPA entry in the table that does match.
104039152ee5SAlexander Shishkin 	 */
104139152ee5SAlexander Shishkin 	WARN_ON_ONCE(1);
104239152ee5SAlexander Shishkin 
104339152ee5SAlexander Shishkin 	return NULL;
104439152ee5SAlexander Shishkin }
104539152ee5SAlexander Shishkin 
104639152ee5SAlexander Shishkin static struct topa_entry *
pt_topa_prev_entry(struct pt_buffer * buf,struct topa_entry * te)104739152ee5SAlexander Shishkin pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te)
104839152ee5SAlexander Shishkin {
104939152ee5SAlexander Shishkin 	unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
105039152ee5SAlexander Shishkin 	struct topa_page *tp;
105139152ee5SAlexander Shishkin 	struct topa *topa;
105239152ee5SAlexander Shishkin 
105339152ee5SAlexander Shishkin 	tp = (struct topa_page *)table;
105439152ee5SAlexander Shishkin 	if (tp->table != te)
105539152ee5SAlexander Shishkin 		return --te;
105639152ee5SAlexander Shishkin 
105739152ee5SAlexander Shishkin 	topa = &tp->topa;
105839152ee5SAlexander Shishkin 	if (topa == buf->first)
105939152ee5SAlexander Shishkin 		topa = buf->last;
106039152ee5SAlexander Shishkin 	else
106139152ee5SAlexander Shishkin 		topa = list_prev_entry(topa, list);
106239152ee5SAlexander Shishkin 
106339152ee5SAlexander Shishkin 	tp = topa_to_page(topa);
106439152ee5SAlexander Shishkin 
106539152ee5SAlexander Shishkin 	return &tp->table[topa->last - 1];
1066fd1c601cSBorislav Petkov }
1067fd1c601cSBorislav Petkov 
1068fd1c601cSBorislav Petkov /**
1069fd1c601cSBorislav Petkov  * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
1070fd1c601cSBorislav Petkov  * @buf:	PT buffer.
1071fd1c601cSBorislav Petkov  * @handle:	Current output handle.
1072fd1c601cSBorislav Petkov  *
1073fd1c601cSBorislav Petkov  * Place INT and STOP marks to prevent overwriting old data that the consumer
1074fd1c601cSBorislav Petkov  * hasn't yet collected and waking up the consumer after a certain fraction of
1075fd1c601cSBorislav Petkov  * the buffer has filled up. Only needed and sensible for non-snapshot counters.
1076fd1c601cSBorislav Petkov  *
1077fd1c601cSBorislav Petkov  * This obviously relies on buf::head to figure out buffer markers, so it has
1078fd1c601cSBorislav Petkov  * to be called after pt_buffer_reset_offsets() and before the hardware tracing
1079fd1c601cSBorislav Petkov  * is enabled.
1080fd1c601cSBorislav Petkov  */
pt_buffer_reset_markers(struct pt_buffer * buf,struct perf_output_handle * handle)1081fd1c601cSBorislav Petkov static int pt_buffer_reset_markers(struct pt_buffer *buf,
1082fd1c601cSBorislav Petkov 				   struct perf_output_handle *handle)
1083fd1c601cSBorislav Petkov 
1084fd1c601cSBorislav Petkov {
1085fd1c601cSBorislav Petkov 	unsigned long head = local64_read(&buf->head);
1086fd1c601cSBorislav Petkov 	unsigned long idx, npages, wakeup;
1087fd1c601cSBorislav Petkov 
108867063847SAlexander Shishkin 	if (buf->single)
108967063847SAlexander Shishkin 		return 0;
109067063847SAlexander Shishkin 
1091fd1c601cSBorislav Petkov 	/* can't stop in the middle of an output region */
1092fffec50fSAlexander Shishkin 	if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
1093f4c0b0aaSWill Deacon 		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1094fd1c601cSBorislav Petkov 		return -EINVAL;
1095f4c0b0aaSWill Deacon 	}
1096fd1c601cSBorislav Petkov 
1097fd1c601cSBorislav Petkov 
1098fd1c601cSBorislav Petkov 	/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
1099f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1100fd1c601cSBorislav Petkov 		return 0;
1101fd1c601cSBorislav Petkov 
1102fd1c601cSBorislav Petkov 	/* clear STOP and INT from current entry */
110339152ee5SAlexander Shishkin 	if (buf->stop_te) {
110439152ee5SAlexander Shishkin 		buf->stop_te->stop = 0;
110539152ee5SAlexander Shishkin 		buf->stop_te->intr = 0;
110639152ee5SAlexander Shishkin 	}
110739152ee5SAlexander Shishkin 
110839152ee5SAlexander Shishkin 	if (buf->intr_te)
110939152ee5SAlexander Shishkin 		buf->intr_te->intr = 0;
1110fd1c601cSBorislav Petkov 
1111fd1c601cSBorislav Petkov 	/* how many pages till the STOP marker */
1112fd1c601cSBorislav Petkov 	npages = handle->size >> PAGE_SHIFT;
1113fd1c601cSBorislav Petkov 
1114fd1c601cSBorislav Petkov 	/* if it's on a page boundary, fill up one more page */
1115fd1c601cSBorislav Petkov 	if (!offset_in_page(head + handle->size + 1))
1116fd1c601cSBorislav Petkov 		npages++;
1117fd1c601cSBorislav Petkov 
1118fd1c601cSBorislav Petkov 	idx = (head >> PAGE_SHIFT) + npages;
1119fd1c601cSBorislav Petkov 	idx &= buf->nr_pages - 1;
112039152ee5SAlexander Shishkin 
112139152ee5SAlexander Shishkin 	if (idx != buf->stop_pos) {
1122fd1c601cSBorislav Petkov 		buf->stop_pos = idx;
112339152ee5SAlexander Shishkin 		buf->stop_te = pt_topa_entry_for_page(buf, idx);
112439152ee5SAlexander Shishkin 		buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te);
112539152ee5SAlexander Shishkin 	}
1126fd1c601cSBorislav Petkov 
1127fd1c601cSBorislav Petkov 	wakeup = handle->wakeup >> PAGE_SHIFT;
1128fd1c601cSBorislav Petkov 
1129fd1c601cSBorislav Petkov 	/* in the worst case, wake up the consumer one page before hard stop */
1130fd1c601cSBorislav Petkov 	idx = (head >> PAGE_SHIFT) + npages - 1;
1131fd1c601cSBorislav Petkov 	if (idx > wakeup)
1132fd1c601cSBorislav Petkov 		idx = wakeup;
1133fd1c601cSBorislav Petkov 
1134fd1c601cSBorislav Petkov 	idx &= buf->nr_pages - 1;
113539152ee5SAlexander Shishkin 	if (idx != buf->intr_pos) {
1136fd1c601cSBorislav Petkov 		buf->intr_pos = idx;
113739152ee5SAlexander Shishkin 		buf->intr_te = pt_topa_entry_for_page(buf, idx);
113839152ee5SAlexander Shishkin 		buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te);
113939152ee5SAlexander Shishkin 	}
1140fd1c601cSBorislav Petkov 
114139152ee5SAlexander Shishkin 	buf->stop_te->stop = 1;
114239152ee5SAlexander Shishkin 	buf->stop_te->intr = 1;
114339152ee5SAlexander Shishkin 	buf->intr_te->intr = 1;
1144fd1c601cSBorislav Petkov 
1145fd1c601cSBorislav Petkov 	return 0;
1146fd1c601cSBorislav Petkov }
1147fd1c601cSBorislav Petkov 
1148fd1c601cSBorislav Petkov /**
1149fd1c601cSBorislav Petkov  * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
1150fd1c601cSBorislav Petkov  * @buf:	PT buffer.
1151fd1c601cSBorislav Petkov  * @head:	Write pointer (aux_head) from AUX buffer.
1152fd1c601cSBorislav Petkov  *
1153fd1c601cSBorislav Petkov  * Find the ToPA table and entry corresponding to given @head and set buffer's
1154fd1c601cSBorislav Petkov  * "current" pointers accordingly. This is done after we have obtained the
1155fd1c601cSBorislav Petkov  * current aux_head position from a successful call to perf_aux_output_begin()
1156fd1c601cSBorislav Petkov  * to make sure the hardware is writing to the right place.
1157fd1c601cSBorislav Petkov  *
1158fd1c601cSBorislav Petkov  * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
1159fd1c601cSBorislav Petkov  * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
1160fd1c601cSBorislav Petkov  * which are used to determine INT and STOP markers' locations by a subsequent
1161fd1c601cSBorislav Petkov  * call to pt_buffer_reset_markers().
1162fd1c601cSBorislav Petkov  */
pt_buffer_reset_offsets(struct pt_buffer * buf,unsigned long head)1163fd1c601cSBorislav Petkov static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
1164fd1c601cSBorislav Petkov {
116538bb8d77SAlexander Shishkin 	struct topa_page *cur_tp;
116639152ee5SAlexander Shishkin 	struct topa_entry *te;
1167fd1c601cSBorislav Petkov 	int pg;
1168fd1c601cSBorislav Petkov 
1169fd1c601cSBorislav Petkov 	if (buf->snapshot)
1170fd1c601cSBorislav Petkov 		head &= (buf->nr_pages << PAGE_SHIFT) - 1;
1171fd1c601cSBorislav Petkov 
117267063847SAlexander Shishkin 	if (!buf->single) {
1173fd1c601cSBorislav Petkov 		pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
117439152ee5SAlexander Shishkin 		te = pt_topa_entry_for_page(buf, pg);
1175fd1c601cSBorislav Petkov 
117639152ee5SAlexander Shishkin 		cur_tp = topa_entry_to_page(te);
117738bb8d77SAlexander Shishkin 		buf->cur = &cur_tp->topa;
117839152ee5SAlexander Shishkin 		buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
1179fffec50fSAlexander Shishkin 		buf->output_off = head & (pt_buffer_region_size(buf) - 1);
118067063847SAlexander Shishkin 	} else {
118167063847SAlexander Shishkin 		buf->output_off = head;
118267063847SAlexander Shishkin 	}
1183fd1c601cSBorislav Petkov 
1184fd1c601cSBorislav Petkov 	local64_set(&buf->head, head);
1185fd1c601cSBorislav Petkov 	local_set(&buf->data_size, 0);
1186fd1c601cSBorislav Petkov }
1187fd1c601cSBorislav Petkov 
1188fd1c601cSBorislav Petkov /**
1189fd1c601cSBorislav Petkov  * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
1190fd1c601cSBorislav Petkov  * @buf:	PT buffer.
1191fd1c601cSBorislav Petkov  */
pt_buffer_fini_topa(struct pt_buffer * buf)1192fd1c601cSBorislav Petkov static void pt_buffer_fini_topa(struct pt_buffer *buf)
1193fd1c601cSBorislav Petkov {
1194fd1c601cSBorislav Petkov 	struct topa *topa, *iter;
1195fd1c601cSBorislav Petkov 
119667063847SAlexander Shishkin 	if (buf->single)
119767063847SAlexander Shishkin 		return;
119867063847SAlexander Shishkin 
1199fd1c601cSBorislav Petkov 	list_for_each_entry_safe(topa, iter, &buf->tables, list) {
1200fd1c601cSBorislav Petkov 		/*
1201fd1c601cSBorislav Petkov 		 * right now, this is in free_aux() path only, so
1202fd1c601cSBorislav Petkov 		 * no need to unlink this table from the list
1203fd1c601cSBorislav Petkov 		 */
1204fd1c601cSBorislav Petkov 		topa_free(topa);
1205fd1c601cSBorislav Petkov 	}
1206fd1c601cSBorislav Petkov }
1207fd1c601cSBorislav Petkov 
1208fd1c601cSBorislav Petkov /**
1209fd1c601cSBorislav Petkov  * pt_buffer_init_topa() - initialize ToPA table for pt buffer
1210fd1c601cSBorislav Petkov  * @buf:	PT buffer.
121138cd5b6aSLucy Mielke  * @cpu:	CPU on which to allocate.
121238cd5b6aSLucy Mielke  * @nr_pages:	No. of pages to allocate.
1213fd1c601cSBorislav Petkov  * @gfp:	Allocation flags.
121438cd5b6aSLucy Mielke  *
121538cd5b6aSLucy Mielke  * Return:	0 on success or error code.
1216fd1c601cSBorislav Petkov  */
pt_buffer_init_topa(struct pt_buffer * buf,int cpu,unsigned long nr_pages,gfp_t gfp)121790583af6SAlexander Shishkin static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
121890583af6SAlexander Shishkin 			       unsigned long nr_pages, gfp_t gfp)
1219fd1c601cSBorislav Petkov {
1220fd1c601cSBorislav Petkov 	struct topa *topa;
1221fd1c601cSBorislav Petkov 	int err;
1222fd1c601cSBorislav Petkov 
122390583af6SAlexander Shishkin 	topa = topa_alloc(cpu, gfp);
1224fd1c601cSBorislav Petkov 	if (!topa)
1225fd1c601cSBorislav Petkov 		return -ENOMEM;
1226fd1c601cSBorislav Petkov 
1227fd1c601cSBorislav Petkov 	topa_insert_table(buf, topa);
1228fd1c601cSBorislav Petkov 
1229fd1c601cSBorislav Petkov 	while (buf->nr_pages < nr_pages) {
123090583af6SAlexander Shishkin 		err = topa_insert_pages(buf, cpu, gfp);
1231fd1c601cSBorislav Petkov 		if (err) {
1232fd1c601cSBorislav Petkov 			pt_buffer_fini_topa(buf);
1233fd1c601cSBorislav Petkov 			return -ENOMEM;
1234fd1c601cSBorislav Petkov 		}
1235fd1c601cSBorislav Petkov 	}
1236fd1c601cSBorislav Petkov 
1237fd1c601cSBorislav Petkov 	/* link last table to the first one, unless we're double buffering */
1238f6d079ceSChao Peng 	if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
123991feca5eSAlexander Shishkin 		TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first);
1240fd1c601cSBorislav Petkov 		TOPA_ENTRY(buf->last, -1)->end = 1;
1241fd1c601cSBorislav Petkov 	}
1242fd1c601cSBorislav Petkov 
1243fd1c601cSBorislav Petkov 	pt_topa_dump(buf);
1244fd1c601cSBorislav Petkov 	return 0;
1245fd1c601cSBorislav Petkov }
1246fd1c601cSBorislav Petkov 
pt_buffer_try_single(struct pt_buffer * buf,int nr_pages)124767063847SAlexander Shishkin static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
124867063847SAlexander Shishkin {
124967063847SAlexander Shishkin 	struct page *p = virt_to_page(buf->data_pages[0]);
125067063847SAlexander Shishkin 	int ret = -ENOTSUPP, order = 0;
125167063847SAlexander Shishkin 
125267063847SAlexander Shishkin 	/*
125367063847SAlexander Shishkin 	 * We can use single range output mode
125467063847SAlexander Shishkin 	 * + in snapshot mode, where we don't need interrupts;
125567063847SAlexander Shishkin 	 * + if the hardware supports it;
125667063847SAlexander Shishkin 	 * + if the entire buffer is one contiguous allocation.
125767063847SAlexander Shishkin 	 */
125867063847SAlexander Shishkin 	if (!buf->snapshot)
125967063847SAlexander Shishkin 		goto out;
126067063847SAlexander Shishkin 
126167063847SAlexander Shishkin 	if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output))
126267063847SAlexander Shishkin 		goto out;
126367063847SAlexander Shishkin 
126467063847SAlexander Shishkin 	if (PagePrivate(p))
126567063847SAlexander Shishkin 		order = page_private(p);
126667063847SAlexander Shishkin 
126767063847SAlexander Shishkin 	if (1 << order != nr_pages)
126867063847SAlexander Shishkin 		goto out;
126967063847SAlexander Shishkin 
1270ce0d998bSAdrian Hunter 	/*
1271ce0d998bSAdrian Hunter 	 * Some processors cannot always support single range for more than
1272ce0d998bSAdrian Hunter 	 * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
1273ce0d998bSAdrian Hunter 	 * also be affected, so for now rather than trying to keep track of
1274ce0d998bSAdrian Hunter 	 * which ones, just disable it for all.
1275ce0d998bSAdrian Hunter 	 */
1276ce0d998bSAdrian Hunter 	if (nr_pages > 1)
1277ce0d998bSAdrian Hunter 		goto out;
1278ce0d998bSAdrian Hunter 
127967063847SAlexander Shishkin 	buf->single = true;
128067063847SAlexander Shishkin 	buf->nr_pages = nr_pages;
128167063847SAlexander Shishkin 	ret = 0;
128267063847SAlexander Shishkin out:
128367063847SAlexander Shishkin 	return ret;
128467063847SAlexander Shishkin }
128567063847SAlexander Shishkin 
1286fd1c601cSBorislav Petkov /**
1287fd1c601cSBorislav Petkov  * pt_buffer_setup_aux() - set up topa tables for a PT buffer
128838cd5b6aSLucy Mielke  * @event:	Performance event
1289fd1c601cSBorislav Petkov  * @pages:	Array of pointers to buffer pages passed from perf core.
1290fd1c601cSBorislav Petkov  * @nr_pages:	Number of pages in the buffer.
1291fd1c601cSBorislav Petkov  * @snapshot:	If this is a snapshot/overwrite counter.
1292fd1c601cSBorislav Petkov  *
1293fd1c601cSBorislav Petkov  * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1294fd1c601cSBorislav Petkov  * bookkeeping for an AUX buffer.
1295fd1c601cSBorislav Petkov  *
1296fd1c601cSBorislav Petkov  * Return:	Our private PT buffer structure.
1297fd1c601cSBorislav Petkov  */
1298fd1c601cSBorislav Petkov static void *
pt_buffer_setup_aux(struct perf_event * event,void ** pages,int nr_pages,bool snapshot)129984001866SMathieu Poirier pt_buffer_setup_aux(struct perf_event *event, void **pages,
130084001866SMathieu Poirier 		    int nr_pages, bool snapshot)
1301fd1c601cSBorislav Petkov {
1302fd1c601cSBorislav Petkov 	struct pt_buffer *buf;
130384001866SMathieu Poirier 	int node, ret, cpu = event->cpu;
1304fd1c601cSBorislav Petkov 
1305fd1c601cSBorislav Petkov 	if (!nr_pages)
1306fd1c601cSBorislav Petkov 		return NULL;
1307fd1c601cSBorislav Petkov 
130825e8920bSAlexander Shishkin 	/*
130925e8920bSAlexander Shishkin 	 * Only support AUX sampling in snapshot mode, where we don't
131025e8920bSAlexander Shishkin 	 * generate NMIs.
131125e8920bSAlexander Shishkin 	 */
131225e8920bSAlexander Shishkin 	if (event->attr.aux_sample_size && !snapshot)
131325e8920bSAlexander Shishkin 		return NULL;
131425e8920bSAlexander Shishkin 
1315fd1c601cSBorislav Petkov 	if (cpu == -1)
1316fd1c601cSBorislav Petkov 		cpu = raw_smp_processor_id();
1317fd1c601cSBorislav Petkov 	node = cpu_to_node(cpu);
1318fd1c601cSBorislav Petkov 
131939152ee5SAlexander Shishkin 	buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node);
1320fd1c601cSBorislav Petkov 	if (!buf)
1321fd1c601cSBorislav Petkov 		return NULL;
1322fd1c601cSBorislav Petkov 
1323fd1c601cSBorislav Petkov 	buf->snapshot = snapshot;
1324fd1c601cSBorislav Petkov 	buf->data_pages = pages;
132539152ee5SAlexander Shishkin 	buf->stop_pos = -1;
132639152ee5SAlexander Shishkin 	buf->intr_pos = -1;
1327fd1c601cSBorislav Petkov 
1328fd1c601cSBorislav Petkov 	INIT_LIST_HEAD(&buf->tables);
1329fd1c601cSBorislav Petkov 
133067063847SAlexander Shishkin 	ret = pt_buffer_try_single(buf, nr_pages);
133167063847SAlexander Shishkin 	if (!ret)
133267063847SAlexander Shishkin 		return buf;
133367063847SAlexander Shishkin 
133490583af6SAlexander Shishkin 	ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
1335fd1c601cSBorislav Petkov 	if (ret) {
1336fd1c601cSBorislav Petkov 		kfree(buf);
1337fd1c601cSBorislav Petkov 		return NULL;
1338fd1c601cSBorislav Petkov 	}
1339fd1c601cSBorislav Petkov 
1340fd1c601cSBorislav Petkov 	return buf;
1341fd1c601cSBorislav Petkov }
1342fd1c601cSBorislav Petkov 
1343fd1c601cSBorislav Petkov /**
1344fd1c601cSBorislav Petkov  * pt_buffer_free_aux() - perf AUX deallocation path callback
1345fd1c601cSBorislav Petkov  * @data:	PT buffer.
1346fd1c601cSBorislav Petkov  */
pt_buffer_free_aux(void * data)1347fd1c601cSBorislav Petkov static void pt_buffer_free_aux(void *data)
1348fd1c601cSBorislav Petkov {
1349fd1c601cSBorislav Petkov 	struct pt_buffer *buf = data;
1350fd1c601cSBorislav Petkov 
1351fd1c601cSBorislav Petkov 	pt_buffer_fini_topa(buf);
1352fd1c601cSBorislav Petkov 	kfree(buf);
1353fd1c601cSBorislav Petkov }
1354fd1c601cSBorislav Petkov 
pt_addr_filters_init(struct perf_event * event)1355eadf48caSAlexander Shishkin static int pt_addr_filters_init(struct perf_event *event)
1356eadf48caSAlexander Shishkin {
1357eadf48caSAlexander Shishkin 	struct pt_filters *filters;
1358eadf48caSAlexander Shishkin 	int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1359eadf48caSAlexander Shishkin 
1360f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1361eadf48caSAlexander Shishkin 		return 0;
1362eadf48caSAlexander Shishkin 
1363eadf48caSAlexander Shishkin 	filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1364eadf48caSAlexander Shishkin 	if (!filters)
1365eadf48caSAlexander Shishkin 		return -ENOMEM;
1366eadf48caSAlexander Shishkin 
1367eadf48caSAlexander Shishkin 	if (event->parent)
1368eadf48caSAlexander Shishkin 		memcpy(filters, event->parent->hw.addr_filters,
1369eadf48caSAlexander Shishkin 		       sizeof(*filters));
1370eadf48caSAlexander Shishkin 
1371eadf48caSAlexander Shishkin 	event->hw.addr_filters = filters;
1372eadf48caSAlexander Shishkin 
1373eadf48caSAlexander Shishkin 	return 0;
1374eadf48caSAlexander Shishkin }
1375eadf48caSAlexander Shishkin 
pt_addr_filters_fini(struct perf_event * event)1376eadf48caSAlexander Shishkin static void pt_addr_filters_fini(struct perf_event *event)
1377eadf48caSAlexander Shishkin {
1378eadf48caSAlexander Shishkin 	kfree(event->hw.addr_filters);
1379eadf48caSAlexander Shishkin 	event->hw.addr_filters = NULL;
1380eadf48caSAlexander Shishkin }
1381eadf48caSAlexander Shishkin 
1382c243cecbSAdrian Hunter #ifdef CONFIG_X86_64
1383c243cecbSAdrian Hunter /* Clamp to a canonical address greater-than-or-equal-to the address given */
clamp_to_ge_canonical_addr(u64 vaddr,u8 vaddr_bits)1384c243cecbSAdrian Hunter static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
1385ddfdad99SAlexander Shishkin {
13861fb85d06SAdrian Hunter 	return __is_canonical_address(vaddr, vaddr_bits) ?
1387c243cecbSAdrian Hunter 	       vaddr :
1388c243cecbSAdrian Hunter 	       -BIT_ULL(vaddr_bits - 1);
1389ddfdad99SAlexander Shishkin }
1390ddfdad99SAlexander Shishkin 
1391c243cecbSAdrian Hunter /* Clamp to a canonical address less-than-or-equal-to the address given */
clamp_to_le_canonical_addr(u64 vaddr,u8 vaddr_bits)1392c243cecbSAdrian Hunter static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
1393c243cecbSAdrian Hunter {
13941fb85d06SAdrian Hunter 	return __is_canonical_address(vaddr, vaddr_bits) ?
1395c243cecbSAdrian Hunter 	       vaddr :
1396c243cecbSAdrian Hunter 	       BIT_ULL(vaddr_bits - 1) - 1;
1397c243cecbSAdrian Hunter }
1398c243cecbSAdrian Hunter #else
1399c243cecbSAdrian Hunter #define clamp_to_ge_canonical_addr(x, y) (x)
1400c243cecbSAdrian Hunter #define clamp_to_le_canonical_addr(x, y) (x)
1401c243cecbSAdrian Hunter #endif
1402c243cecbSAdrian Hunter 
pt_event_addr_filters_validate(struct list_head * filters)1403eadf48caSAlexander Shishkin static int pt_event_addr_filters_validate(struct list_head *filters)
1404eadf48caSAlexander Shishkin {
1405eadf48caSAlexander Shishkin 	struct perf_addr_filter *filter;
1406eadf48caSAlexander Shishkin 	int range = 0;
1407eadf48caSAlexander Shishkin 
1408eadf48caSAlexander Shishkin 	list_for_each_entry(filter, filters, entry) {
14096ed70cf3SAlexander Shishkin 		/*
14106ed70cf3SAlexander Shishkin 		 * PT doesn't support single address triggers and
14116ed70cf3SAlexander Shishkin 		 * 'start' filters.
14126ed70cf3SAlexander Shishkin 		 */
14136ed70cf3SAlexander Shishkin 		if (!filter->size ||
14146ed70cf3SAlexander Shishkin 		    filter->action == PERF_ADDR_FILTER_ACTION_START)
1415eadf48caSAlexander Shishkin 			return -EOPNOTSUPP;
1416eadf48caSAlexander Shishkin 
1417f6d079ceSChao Peng 		if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1418eadf48caSAlexander Shishkin 			return -EOPNOTSUPP;
1419eadf48caSAlexander Shishkin 	}
1420eadf48caSAlexander Shishkin 
1421eadf48caSAlexander Shishkin 	return 0;
1422eadf48caSAlexander Shishkin }
1423eadf48caSAlexander Shishkin 
pt_event_addr_filters_sync(struct perf_event * event)1424eadf48caSAlexander Shishkin static void pt_event_addr_filters_sync(struct perf_event *event)
1425eadf48caSAlexander Shishkin {
1426eadf48caSAlexander Shishkin 	struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1427c60f83b8SAlexander Shishkin 	unsigned long msr_a, msr_b;
1428c60f83b8SAlexander Shishkin 	struct perf_addr_filter_range *fr = event->addr_filter_ranges;
1429eadf48caSAlexander Shishkin 	struct pt_filters *filters = event->hw.addr_filters;
1430eadf48caSAlexander Shishkin 	struct perf_addr_filter *filter;
1431eadf48caSAlexander Shishkin 	int range = 0;
1432eadf48caSAlexander Shishkin 
1433eadf48caSAlexander Shishkin 	if (!filters)
1434eadf48caSAlexander Shishkin 		return;
1435eadf48caSAlexander Shishkin 
1436eadf48caSAlexander Shishkin 	list_for_each_entry(filter, &head->list, entry) {
1437c60f83b8SAlexander Shishkin 		if (filter->path.dentry && !fr[range].start) {
1438eadf48caSAlexander Shishkin 			msr_a = msr_b = 0;
1439eadf48caSAlexander Shishkin 		} else {
1440c243cecbSAdrian Hunter 			unsigned long n = fr[range].size - 1;
1441c243cecbSAdrian Hunter 			unsigned long a = fr[range].start;
1442c243cecbSAdrian Hunter 			unsigned long b;
1443c243cecbSAdrian Hunter 
1444c243cecbSAdrian Hunter 			if (a > ULONG_MAX - n)
1445c243cecbSAdrian Hunter 				b = ULONG_MAX;
1446c243cecbSAdrian Hunter 			else
1447c243cecbSAdrian Hunter 				b = a + n;
1448c243cecbSAdrian Hunter 			/*
1449c243cecbSAdrian Hunter 			 * Apply the offset. 64-bit addresses written to the
1450c243cecbSAdrian Hunter 			 * MSRs must be canonical, but the range can encompass
1451c243cecbSAdrian Hunter 			 * non-canonical addresses. Since software cannot
1452c243cecbSAdrian Hunter 			 * execute at non-canonical addresses, adjusting to
1453c243cecbSAdrian Hunter 			 * canonical addresses does not affect the result of the
1454c243cecbSAdrian Hunter 			 * address filter.
1455c243cecbSAdrian Hunter 			 */
1456c243cecbSAdrian Hunter 			msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
1457c243cecbSAdrian Hunter 			msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
1458c243cecbSAdrian Hunter 			if (msr_b < msr_a)
1459c243cecbSAdrian Hunter 				msr_a = msr_b = 0;
1460eadf48caSAlexander Shishkin 		}
1461eadf48caSAlexander Shishkin 
1462eadf48caSAlexander Shishkin 		filters->filter[range].msr_a  = msr_a;
1463eadf48caSAlexander Shishkin 		filters->filter[range].msr_b  = msr_b;
14646ed70cf3SAlexander Shishkin 		if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER)
14656ed70cf3SAlexander Shishkin 			filters->filter[range].config = 1;
14666ed70cf3SAlexander Shishkin 		else
14676ed70cf3SAlexander Shishkin 			filters->filter[range].config = 2;
1468eadf48caSAlexander Shishkin 		range++;
1469eadf48caSAlexander Shishkin 	}
1470eadf48caSAlexander Shishkin 
1471eadf48caSAlexander Shishkin 	filters->nr_filters = range;
1472eadf48caSAlexander Shishkin }
1473eadf48caSAlexander Shishkin 
1474fd1c601cSBorislav Petkov /**
1475fd1c601cSBorislav Petkov  * intel_pt_interrupt() - PT PMI handler
1476fd1c601cSBorislav Petkov  */
intel_pt_interrupt(void)1477fd1c601cSBorislav Petkov void intel_pt_interrupt(void)
1478fd1c601cSBorislav Petkov {
1479fd1c601cSBorislav Petkov 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1480fd1c601cSBorislav Petkov 	struct pt_buffer *buf;
1481fd1c601cSBorislav Petkov 	struct perf_event *event = pt->handle.event;
1482fd1c601cSBorislav Petkov 
1483fd1c601cSBorislav Petkov 	/*
1484fd1c601cSBorislav Petkov 	 * There may be a dangling PT bit in the interrupt status register
1485fd1c601cSBorislav Petkov 	 * after PT has been disabled by pt_event_stop(). Make sure we don't
1486fd1c601cSBorislav Petkov 	 * do anything (particularly, re-enable) for this event here.
1487fd1c601cSBorislav Petkov 	 */
14881b6de591SAlexander Shishkin 	if (!READ_ONCE(pt->handle_nmi))
1489fd1c601cSBorislav Petkov 		return;
1490fd1c601cSBorislav Petkov 
1491fd1c601cSBorislav Petkov 	if (!event)
1492fd1c601cSBorislav Petkov 		return;
1493fd1c601cSBorislav Petkov 
14941c5ac21aSAlexander Shishkin 	pt_config_stop(event);
14951c5ac21aSAlexander Shishkin 
1496fd1c601cSBorislav Petkov 	buf = perf_get_aux(&pt->handle);
1497fd1c601cSBorislav Petkov 	if (!buf)
1498fd1c601cSBorislav Petkov 		return;
1499fd1c601cSBorislav Petkov 
1500fd1c601cSBorislav Petkov 	pt_read_offset(buf);
1501fd1c601cSBorislav Petkov 
1502fd1c601cSBorislav Petkov 	pt_handle_status(pt);
1503fd1c601cSBorislav Petkov 
1504fd1c601cSBorislav Petkov 	pt_update_head(pt);
1505fd1c601cSBorislav Petkov 
1506f4c0b0aaSWill Deacon 	perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1507fd1c601cSBorislav Petkov 
1508fd1c601cSBorislav Petkov 	if (!event->hw.state) {
1509fd1c601cSBorislav Petkov 		int ret;
1510fd1c601cSBorislav Petkov 
1511fd1c601cSBorislav Petkov 		buf = perf_aux_output_begin(&pt->handle, event);
1512fd1c601cSBorislav Petkov 		if (!buf) {
1513fd1c601cSBorislav Petkov 			event->hw.state = PERF_HES_STOPPED;
1514fd1c601cSBorislav Petkov 			return;
1515fd1c601cSBorislav Petkov 		}
1516fd1c601cSBorislav Petkov 
1517fd1c601cSBorislav Petkov 		pt_buffer_reset_offsets(buf, pt->handle.head);
1518fd1c601cSBorislav Petkov 		/* snapshot counters don't use PMI, so it's safe */
1519fd1c601cSBorislav Petkov 		ret = pt_buffer_reset_markers(buf, &pt->handle);
1520fd1c601cSBorislav Petkov 		if (ret) {
1521f4c0b0aaSWill Deacon 			perf_aux_output_end(&pt->handle, 0);
1522fd1c601cSBorislav Petkov 			return;
1523fd1c601cSBorislav Petkov 		}
1524fd1c601cSBorislav Petkov 
152567063847SAlexander Shishkin 		pt_config_buffer(buf);
15268e105a1fSAlexander Shishkin 		pt_config_start(event);
1527fd1c601cSBorislav Petkov 	}
1528fd1c601cSBorislav Petkov }
1529fd1c601cSBorislav Petkov 
intel_pt_handle_vmx(int on)15301c5ac21aSAlexander Shishkin void intel_pt_handle_vmx(int on)
15311c5ac21aSAlexander Shishkin {
15321c5ac21aSAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
15331c5ac21aSAlexander Shishkin 	struct perf_event *event;
15341c5ac21aSAlexander Shishkin 	unsigned long flags;
15351c5ac21aSAlexander Shishkin 
15361c5ac21aSAlexander Shishkin 	/* PT plays nice with VMX, do nothing */
15371c5ac21aSAlexander Shishkin 	if (pt_pmu.vmx)
15381c5ac21aSAlexander Shishkin 		return;
15391c5ac21aSAlexander Shishkin 
15401c5ac21aSAlexander Shishkin 	/*
15411c5ac21aSAlexander Shishkin 	 * VMXON will clear RTIT_CTL.TraceEn; we need to make
15421c5ac21aSAlexander Shishkin 	 * sure to not try to set it while VMX is on. Disable
15431c5ac21aSAlexander Shishkin 	 * interrupts to avoid racing with pmu callbacks;
15441c5ac21aSAlexander Shishkin 	 * concurrent PMI should be handled fine.
15451c5ac21aSAlexander Shishkin 	 */
15461c5ac21aSAlexander Shishkin 	local_irq_save(flags);
15471c5ac21aSAlexander Shishkin 	WRITE_ONCE(pt->vmx_on, on);
15481c5ac21aSAlexander Shishkin 
1549ee368428SAlexander Shishkin 	/*
1550ee368428SAlexander Shishkin 	 * If an AUX transaction is in progress, it will contain
1551ee368428SAlexander Shishkin 	 * gap(s), so flag it PARTIAL to inform the user.
1552ee368428SAlexander Shishkin 	 */
15531c5ac21aSAlexander Shishkin 	event = pt->handle.event;
15541c5ac21aSAlexander Shishkin 	if (event)
1555ee368428SAlexander Shishkin 		perf_aux_output_flag(&pt->handle,
1556ee368428SAlexander Shishkin 		                     PERF_AUX_FLAG_PARTIAL);
1557ee368428SAlexander Shishkin 
1558ee368428SAlexander Shishkin 	/* Turn PTs back on */
1559ee368428SAlexander Shishkin 	if (!on && event)
1560*52c3fb1aSPeter Zijlstra 		wrmsrl(MSR_IA32_RTIT_CTL, event->hw.aux_config);
1561ee368428SAlexander Shishkin 
15621c5ac21aSAlexander Shishkin 	local_irq_restore(flags);
15631c5ac21aSAlexander Shishkin }
15641c5ac21aSAlexander Shishkin EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
15651c5ac21aSAlexander Shishkin 
1566fd1c601cSBorislav Petkov /*
1567fd1c601cSBorislav Petkov  * PMU callbacks
1568fd1c601cSBorislav Petkov  */
1569fd1c601cSBorislav Petkov 
pt_event_start(struct perf_event * event,int mode)1570fd1c601cSBorislav Petkov static void pt_event_start(struct perf_event *event, int mode)
1571fd1c601cSBorislav Petkov {
157266d21901SAlexander Shishkin 	struct hw_perf_event *hwc = &event->hw;
1573fd1c601cSBorislav Petkov 	struct pt *pt = this_cpu_ptr(&pt_ctx);
157466d21901SAlexander Shishkin 	struct pt_buffer *buf;
1575fd1c601cSBorislav Petkov 
157666d21901SAlexander Shishkin 	buf = perf_aux_output_begin(&pt->handle, event);
157766d21901SAlexander Shishkin 	if (!buf)
157866d21901SAlexander Shishkin 		goto fail_stop;
157966d21901SAlexander Shishkin 
158066d21901SAlexander Shishkin 	pt_buffer_reset_offsets(buf, pt->handle.head);
158166d21901SAlexander Shishkin 	if (!buf->snapshot) {
158266d21901SAlexander Shishkin 		if (pt_buffer_reset_markers(buf, &pt->handle))
158366d21901SAlexander Shishkin 			goto fail_end_stop;
1584fd1c601cSBorislav Petkov 	}
1585fd1c601cSBorislav Petkov 
15861b6de591SAlexander Shishkin 	WRITE_ONCE(pt->handle_nmi, 1);
158766d21901SAlexander Shishkin 	hwc->state = 0;
1588fd1c601cSBorislav Petkov 
158967063847SAlexander Shishkin 	pt_config_buffer(buf);
1590fd1c601cSBorislav Petkov 	pt_config(event);
159166d21901SAlexander Shishkin 
159266d21901SAlexander Shishkin 	return;
159366d21901SAlexander Shishkin 
159466d21901SAlexander Shishkin fail_end_stop:
1595f4c0b0aaSWill Deacon 	perf_aux_output_end(&pt->handle, 0);
159666d21901SAlexander Shishkin fail_stop:
159766d21901SAlexander Shishkin 	hwc->state = PERF_HES_STOPPED;
1598fd1c601cSBorislav Petkov }
1599fd1c601cSBorislav Petkov 
pt_event_stop(struct perf_event * event,int mode)1600fd1c601cSBorislav Petkov static void pt_event_stop(struct perf_event *event, int mode)
1601fd1c601cSBorislav Petkov {
1602fd1c601cSBorislav Petkov 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1603fd1c601cSBorislav Petkov 
1604fd1c601cSBorislav Petkov 	/*
1605fd1c601cSBorislav Petkov 	 * Protect against the PMI racing with disabling wrmsr,
1606fd1c601cSBorislav Petkov 	 * see comment in intel_pt_interrupt().
1607fd1c601cSBorislav Petkov 	 */
16081b6de591SAlexander Shishkin 	WRITE_ONCE(pt->handle_nmi, 0);
1609d92792a4SAdrian Hunter 	barrier();
16101c5ac21aSAlexander Shishkin 
16111c5ac21aSAlexander Shishkin 	pt_config_stop(event);
1612fd1c601cSBorislav Petkov 
1613fd1c601cSBorislav Petkov 	if (event->hw.state == PERF_HES_STOPPED)
1614fd1c601cSBorislav Petkov 		return;
1615fd1c601cSBorislav Petkov 
1616fd1c601cSBorislav Petkov 	event->hw.state = PERF_HES_STOPPED;
1617fd1c601cSBorislav Petkov 
1618fd1c601cSBorislav Petkov 	if (mode & PERF_EF_UPDATE) {
1619fd1c601cSBorislav Petkov 		struct pt_buffer *buf = perf_get_aux(&pt->handle);
1620fd1c601cSBorislav Petkov 
1621fd1c601cSBorislav Petkov 		if (!buf)
1622fd1c601cSBorislav Petkov 			return;
1623fd1c601cSBorislav Petkov 
1624fd1c601cSBorislav Petkov 		if (WARN_ON_ONCE(pt->handle.event != event))
1625fd1c601cSBorislav Petkov 			return;
1626fd1c601cSBorislav Petkov 
1627fd1c601cSBorislav Petkov 		pt_read_offset(buf);
1628fd1c601cSBorislav Petkov 
1629fd1c601cSBorislav Petkov 		pt_handle_status(pt);
1630fd1c601cSBorislav Petkov 
1631fd1c601cSBorislav Petkov 		pt_update_head(pt);
1632fd1c601cSBorislav Petkov 
1633fd1c601cSBorislav Petkov 		if (buf->snapshot)
1634fd1c601cSBorislav Petkov 			pt->handle.head =
1635fd1c601cSBorislav Petkov 				local_xchg(&buf->data_size,
1636fd1c601cSBorislav Petkov 					   buf->nr_pages << PAGE_SHIFT);
1637f4c0b0aaSWill Deacon 		perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1638fd1c601cSBorislav Petkov 	}
1639fd1c601cSBorislav Petkov }
1640fd1c601cSBorislav Petkov 
pt_event_snapshot_aux(struct perf_event * event,struct perf_output_handle * handle,unsigned long size)164125e8920bSAlexander Shishkin static long pt_event_snapshot_aux(struct perf_event *event,
164225e8920bSAlexander Shishkin 				  struct perf_output_handle *handle,
164325e8920bSAlexander Shishkin 				  unsigned long size)
164425e8920bSAlexander Shishkin {
164525e8920bSAlexander Shishkin 	struct pt *pt = this_cpu_ptr(&pt_ctx);
164625e8920bSAlexander Shishkin 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
164725e8920bSAlexander Shishkin 	unsigned long from = 0, to;
164825e8920bSAlexander Shishkin 	long ret;
164925e8920bSAlexander Shishkin 
165025e8920bSAlexander Shishkin 	if (WARN_ON_ONCE(!buf))
165125e8920bSAlexander Shishkin 		return 0;
165225e8920bSAlexander Shishkin 
165325e8920bSAlexander Shishkin 	/*
165425e8920bSAlexander Shishkin 	 * Sampling is only allowed on snapshot events;
165525e8920bSAlexander Shishkin 	 * see pt_buffer_setup_aux().
165625e8920bSAlexander Shishkin 	 */
165725e8920bSAlexander Shishkin 	if (WARN_ON_ONCE(!buf->snapshot))
165825e8920bSAlexander Shishkin 		return 0;
165925e8920bSAlexander Shishkin 
166025e8920bSAlexander Shishkin 	/*
1661d92792a4SAdrian Hunter 	 * There is no PT interrupt in this mode, so stop the trace and it will
1662d92792a4SAdrian Hunter 	 * remain stopped while the buffer is copied.
166325e8920bSAlexander Shishkin 	 */
166425e8920bSAlexander Shishkin 	pt_config_stop(event);
166525e8920bSAlexander Shishkin 	pt_read_offset(buf);
166625e8920bSAlexander Shishkin 	pt_update_head(pt);
166725e8920bSAlexander Shishkin 
166825e8920bSAlexander Shishkin 	to = local_read(&buf->data_size);
166925e8920bSAlexander Shishkin 	if (to < size)
167025e8920bSAlexander Shishkin 		from = buf->nr_pages << PAGE_SHIFT;
167125e8920bSAlexander Shishkin 	from += to - size;
167225e8920bSAlexander Shishkin 
167325e8920bSAlexander Shishkin 	ret = perf_output_copy_aux(&pt->handle, handle, from, to);
167425e8920bSAlexander Shishkin 
167525e8920bSAlexander Shishkin 	/*
1676d92792a4SAdrian Hunter 	 * Here, handle_nmi tells us if the tracing was on.
1677d92792a4SAdrian Hunter 	 * If the tracing was on, restart it.
167825e8920bSAlexander Shishkin 	 */
1679d92792a4SAdrian Hunter 	if (READ_ONCE(pt->handle_nmi))
168025e8920bSAlexander Shishkin 		pt_config_start(event);
168125e8920bSAlexander Shishkin 
168225e8920bSAlexander Shishkin 	return ret;
168325e8920bSAlexander Shishkin }
168425e8920bSAlexander Shishkin 
pt_event_del(struct perf_event * event,int mode)168566d21901SAlexander Shishkin static void pt_event_del(struct perf_event *event, int mode)
168666d21901SAlexander Shishkin {
168766d21901SAlexander Shishkin 	pt_event_stop(event, PERF_EF_UPDATE);
168866d21901SAlexander Shishkin }
168966d21901SAlexander Shishkin 
pt_event_add(struct perf_event * event,int mode)1690fd1c601cSBorislav Petkov static int pt_event_add(struct perf_event *event, int mode)
1691fd1c601cSBorislav Petkov {
1692fd1c601cSBorislav Petkov 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1693fd1c601cSBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
1694fd1c601cSBorislav Petkov 	int ret = -EBUSY;
1695fd1c601cSBorislav Petkov 
1696fd1c601cSBorislav Petkov 	if (pt->handle.event)
1697fd1c601cSBorislav Petkov 		goto fail;
1698fd1c601cSBorislav Petkov 
1699fd1c601cSBorislav Petkov 	if (mode & PERF_EF_START) {
1700fd1c601cSBorislav Petkov 		pt_event_start(event, 0);
170166d21901SAlexander Shishkin 		ret = -EINVAL;
1702fd1c601cSBorislav Petkov 		if (hwc->state == PERF_HES_STOPPED)
170366d21901SAlexander Shishkin 			goto fail;
1704fd1c601cSBorislav Petkov 	} else {
1705fd1c601cSBorislav Petkov 		hwc->state = PERF_HES_STOPPED;
1706fd1c601cSBorislav Petkov 	}
1707fd1c601cSBorislav Petkov 
170866d21901SAlexander Shishkin 	ret = 0;
1709fd1c601cSBorislav Petkov fail:
171066d21901SAlexander Shishkin 
1711fd1c601cSBorislav Petkov 	return ret;
1712fd1c601cSBorislav Petkov }
1713fd1c601cSBorislav Petkov 
pt_event_read(struct perf_event * event)1714fd1c601cSBorislav Petkov static void pt_event_read(struct perf_event *event)
1715fd1c601cSBorislav Petkov {
1716fd1c601cSBorislav Petkov }
1717fd1c601cSBorislav Petkov 
pt_event_destroy(struct perf_event * event)1718fd1c601cSBorislav Petkov static void pt_event_destroy(struct perf_event *event)
1719fd1c601cSBorislav Petkov {
1720eadf48caSAlexander Shishkin 	pt_addr_filters_fini(event);
1721fd1c601cSBorislav Petkov 	x86_del_exclusive(x86_lbr_exclusive_pt);
1722fd1c601cSBorislav Petkov }
1723fd1c601cSBorislav Petkov 
pt_event_init(struct perf_event * event)1724fd1c601cSBorislav Petkov static int pt_event_init(struct perf_event *event)
1725fd1c601cSBorislav Petkov {
1726fd1c601cSBorislav Petkov 	if (event->attr.type != pt_pmu.pmu.type)
1727fd1c601cSBorislav Petkov 		return -ENOENT;
1728fd1c601cSBorislav Petkov 
1729fd1c601cSBorislav Petkov 	if (!pt_event_valid(event))
1730fd1c601cSBorislav Petkov 		return -EINVAL;
1731fd1c601cSBorislav Petkov 
1732fd1c601cSBorislav Petkov 	if (x86_add_exclusive(x86_lbr_exclusive_pt))
1733fd1c601cSBorislav Petkov 		return -EBUSY;
1734fd1c601cSBorislav Petkov 
1735eadf48caSAlexander Shishkin 	if (pt_addr_filters_init(event)) {
1736eadf48caSAlexander Shishkin 		x86_del_exclusive(x86_lbr_exclusive_pt);
1737eadf48caSAlexander Shishkin 		return -ENOMEM;
1738eadf48caSAlexander Shishkin 	}
1739eadf48caSAlexander Shishkin 
1740fd1c601cSBorislav Petkov 	event->destroy = pt_event_destroy;
1741fd1c601cSBorislav Petkov 
1742fd1c601cSBorislav Petkov 	return 0;
1743fd1c601cSBorislav Petkov }
1744fd1c601cSBorislav Petkov 
cpu_emergency_stop_pt(void)1745fd1c601cSBorislav Petkov void cpu_emergency_stop_pt(void)
1746fd1c601cSBorislav Petkov {
1747fd1c601cSBorislav Petkov 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1748fd1c601cSBorislav Petkov 
1749fd1c601cSBorislav Petkov 	if (pt->handle.event)
1750fd1c601cSBorislav Petkov 		pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1751fd1c601cSBorislav Petkov }
1752fd1c601cSBorislav Petkov 
is_intel_pt_event(struct perf_event * event)175342880f72SAlexander Shishkin int is_intel_pt_event(struct perf_event *event)
175442880f72SAlexander Shishkin {
175542880f72SAlexander Shishkin 	return event->pmu == &pt_pmu.pmu;
175642880f72SAlexander Shishkin }
175742880f72SAlexander Shishkin 
pt_init(void)1758fd1c601cSBorislav Petkov static __init int pt_init(void)
1759fd1c601cSBorislav Petkov {
1760fd1c601cSBorislav Petkov 	int ret, cpu, prior_warn = 0;
1761fd1c601cSBorislav Petkov 
1762fd1c601cSBorislav Petkov 	BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1763fd1c601cSBorislav Petkov 
1764e465de1cSAlexander Shishkin 	if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1765fd1c601cSBorislav Petkov 		return -ENODEV;
1766fd1c601cSBorislav Petkov 
1767eda8a2c5SSebastian Andrzej Siewior 	cpus_read_lock();
1768fd1c601cSBorislav Petkov 	for_each_online_cpu(cpu) {
1769fd1c601cSBorislav Petkov 		u64 ctl;
1770fd1c601cSBorislav Petkov 
1771fd1c601cSBorislav Petkov 		ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1772fd1c601cSBorislav Petkov 		if (!ret && (ctl & RTIT_CTL_TRACEEN))
1773fd1c601cSBorislav Petkov 			prior_warn++;
1774fd1c601cSBorislav Petkov 	}
1775eda8a2c5SSebastian Andrzej Siewior 	cpus_read_unlock();
1776fd1c601cSBorislav Petkov 
1777fd1c601cSBorislav Petkov 	if (prior_warn) {
1778fd1c601cSBorislav Petkov 		x86_add_exclusive(x86_lbr_exclusive_pt);
1779fd1c601cSBorislav Petkov 		pr_warn("PT is enabled at boot time, doing nothing\n");
1780fd1c601cSBorislav Petkov 
1781fd1c601cSBorislav Petkov 		return -EBUSY;
1782fd1c601cSBorislav Petkov 	}
1783fd1c601cSBorislav Petkov 
1784fd1c601cSBorislav Petkov 	ret = pt_pmu_hw_init();
1785fd1c601cSBorislav Petkov 	if (ret)
1786fd1c601cSBorislav Petkov 		return ret;
1787fd1c601cSBorislav Petkov 
1788f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
1789fd1c601cSBorislav Petkov 		pr_warn("ToPA output is not supported on this CPU\n");
1790fd1c601cSBorislav Petkov 		return -ENODEV;
1791fd1c601cSBorislav Petkov 	}
1792fd1c601cSBorislav Petkov 
1793f6d079ceSChao Peng 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
179472e830f6SAlexander Shishkin 		pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
1795fd1c601cSBorislav Petkov 
1796fd1c601cSBorislav Petkov 	pt_pmu.pmu.capabilities	|= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1797fd1c601cSBorislav Petkov 	pt_pmu.pmu.attr_groups		 = pt_attr_groups;
1798fd1c601cSBorislav Petkov 	pt_pmu.pmu.task_ctx_nr		 = perf_sw_context;
1799fd1c601cSBorislav Petkov 	pt_pmu.pmu.event_init		 = pt_event_init;
1800fd1c601cSBorislav Petkov 	pt_pmu.pmu.add			 = pt_event_add;
1801fd1c601cSBorislav Petkov 	pt_pmu.pmu.del			 = pt_event_del;
1802fd1c601cSBorislav Petkov 	pt_pmu.pmu.start		 = pt_event_start;
1803fd1c601cSBorislav Petkov 	pt_pmu.pmu.stop			 = pt_event_stop;
180425e8920bSAlexander Shishkin 	pt_pmu.pmu.snapshot_aux		 = pt_event_snapshot_aux;
1805fd1c601cSBorislav Petkov 	pt_pmu.pmu.read			 = pt_event_read;
1806fd1c601cSBorislav Petkov 	pt_pmu.pmu.setup_aux		 = pt_buffer_setup_aux;
1807fd1c601cSBorislav Petkov 	pt_pmu.pmu.free_aux		 = pt_buffer_free_aux;
1808eadf48caSAlexander Shishkin 	pt_pmu.pmu.addr_filters_sync     = pt_event_addr_filters_sync;
1809eadf48caSAlexander Shishkin 	pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1810eadf48caSAlexander Shishkin 	pt_pmu.pmu.nr_addr_filters       =
1811f6d079ceSChao Peng 		intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
1812eadf48caSAlexander Shishkin 
1813fd1c601cSBorislav Petkov 	ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1814fd1c601cSBorislav Petkov 
1815fd1c601cSBorislav Petkov 	return ret;
1816fd1c601cSBorislav Petkov }
1817fd1c601cSBorislav Petkov arch_initcall(pt_init);
1818