xref: /linux/arch/x86/kernel/cpu/resctrl/intel_aet.c (revision 7e6df9614546ae7eb1f1b2074d7b6039bb01540d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Intel Application Energy Telemetry
5  *
6  * Copyright (C) 2025 Intel Corporation
7  *
8  * Author:
9  *    Tony Luck <tony.luck@intel.com>
10  */
11 
12 #define pr_fmt(fmt)   "resctrl: " fmt
13 
14 #include <linux/compiler_types.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/intel_pmt_features.h>
18 #include <linux/intel_vsec.h>
19 #include <linux/printk.h>
20 #include <linux/resctrl.h>
21 #include <linux/resctrl_types.h>
22 #include <linux/stddef.h>
23 #include <linux/topology.h>
24 #include <linux/types.h>
25 
26 #include "internal.h"
27 
28 /**
29  * struct pmt_event - Telemetry event.
30  * @id:		Resctrl event id.
31  * @idx:	Counter index within each per-RMID block of counters.
32  * @bin_bits:	Zero for integer valued events, else number bits in fraction
33  *		part of fixed-point.
34  */
35 struct pmt_event {
36 	enum resctrl_event_id	id;
37 	unsigned int		idx;
38 	unsigned int		bin_bits;
39 };
40 
41 #define EVT(_id, _idx, _bits) { .id = _id, .idx = _idx, .bin_bits = _bits }
42 
43 /**
44  * struct event_group - Events with the same feature type ("energy" or "perf") and GUID.
45  * @pfname:		PMT feature name ("energy" or "perf") of this event group.
46  * @pfg:		Points to the aggregated telemetry space information
47  *			returned by the intel_pmt_get_regions_by_feature()
48  *			call to the INTEL_PMT_TELEMETRY driver that contains
49  *			data for all telemetry regions of type @pfname.
50  *			Valid if the system supports the event group,
51  *			NULL otherwise.
52  * @guid:		Unique number per XML description file.
53  * @mmio_size:		Number of bytes of MMIO registers for this group.
54  * @num_events:		Number of events in this group.
55  * @evts:		Array of event descriptors.
56  */
57 struct event_group {
58 	/* Data fields for additional structures to manage this group. */
59 	const char			*pfname;
60 	struct pmt_feature_group	*pfg;
61 
62 	/* Remaining fields initialized from XML file. */
63 	u32				guid;
64 	size_t				mmio_size;
65 	unsigned int			num_events;
66 	struct pmt_event		evts[] __counted_by(num_events);
67 };
68 
69 #define XML_MMIO_SIZE(num_rmids, num_events, num_extra_status) \
70 		      (((num_rmids) * (num_events) + (num_extra_status)) * sizeof(u64))
71 
72 /*
73  * Link: https://github.com/intel/Intel-PMT/blob/main/xml/CWF/OOBMSM/RMID-ENERGY/cwf_aggregator.xml
74  */
75 static struct event_group energy_0x26696143 = {
76 	.pfname		= "energy",
77 	.guid		= 0x26696143,
78 	.mmio_size	= XML_MMIO_SIZE(576, 2, 3),
79 	.num_events	= 2,
80 	.evts		= {
81 		EVT(PMT_EVENT_ENERGY, 0, 18),
82 		EVT(PMT_EVENT_ACTIVITY, 1, 18),
83 	}
84 };
85 
86 /*
87  * Link: https://github.com/intel/Intel-PMT/blob/main/xml/CWF/OOBMSM/RMID-PERF/cwf_aggregator.xml
88  */
89 static struct event_group perf_0x26557651 = {
90 	.pfname		= "perf",
91 	.guid		= 0x26557651,
92 	.mmio_size	= XML_MMIO_SIZE(576, 7, 3),
93 	.num_events	= 7,
94 	.evts		= {
95 		EVT(PMT_EVENT_STALLS_LLC_HIT, 0, 0),
96 		EVT(PMT_EVENT_C1_RES, 1, 0),
97 		EVT(PMT_EVENT_UNHALTED_CORE_CYCLES, 2, 0),
98 		EVT(PMT_EVENT_STALLS_LLC_MISS, 3, 0),
99 		EVT(PMT_EVENT_AUTO_C6_RES, 4, 0),
100 		EVT(PMT_EVENT_UNHALTED_REF_CYCLES, 5, 0),
101 		EVT(PMT_EVENT_UOPS_RETIRED, 6, 0),
102 	}
103 };
104 
105 static struct event_group *known_event_groups[] = {
106 	&energy_0x26696143,
107 	&perf_0x26557651,
108 };
109 
110 #define for_each_event_group(_peg)						\
111 	for (_peg = known_event_groups;						\
112 	     _peg < &known_event_groups[ARRAY_SIZE(known_event_groups)];	\
113 	     _peg++)
114 
115 static bool skip_telem_region(struct telemetry_region *tr, struct event_group *e)
116 {
117 	if (tr->guid != e->guid)
118 		return true;
119 	if (tr->plat_info.package_id >= topology_max_packages()) {
120 		pr_warn("Bad package %u in guid 0x%x\n", tr->plat_info.package_id,
121 			tr->guid);
122 		return true;
123 	}
124 	if (tr->size != e->mmio_size) {
125 		pr_warn("MMIO space wrong size (%zu bytes) for guid 0x%x. Expected %zu bytes.\n",
126 			tr->size, e->guid, e->mmio_size);
127 		return true;
128 	}
129 
130 	return false;
131 }
132 
133 static bool group_has_usable_regions(struct event_group *e, struct pmt_feature_group *p)
134 {
135 	bool usable_regions = false;
136 
137 	for (int i = 0; i < p->count; i++) {
138 		if (skip_telem_region(&p->regions[i], e)) {
139 			/*
140 			 * Clear the address field of regions that did not pass the checks in
141 			 * skip_telem_region() so they will not be used by intel_aet_read_event().
142 			 * This is safe to do because intel_pmt_get_regions_by_feature() allocates
143 			 * a new pmt_feature_group structure to return to each caller and only makes
144 			 * use of the pmt_feature_group::kref field when intel_pmt_put_feature_group()
145 			 * returns the structure.
146 			 */
147 			p->regions[i].addr = NULL;
148 
149 			continue;
150 		}
151 		usable_regions = true;
152 	}
153 
154 	return usable_regions;
155 }
156 
157 static bool enable_events(struct event_group *e, struct pmt_feature_group *p)
158 {
159 	struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_PERF_PKG].r_resctrl;
160 	int skipped_events = 0;
161 
162 	if (!group_has_usable_regions(e, p))
163 		return false;
164 
165 	for (int j = 0; j < e->num_events; j++) {
166 		if (!resctrl_enable_mon_event(e->evts[j].id, true,
167 					      e->evts[j].bin_bits, &e->evts[j]))
168 			skipped_events++;
169 	}
170 	if (e->num_events == skipped_events) {
171 		pr_info("No events enabled in %s %s:0x%x\n", r->name, e->pfname, e->guid);
172 		return false;
173 	}
174 
175 	return true;
176 }
177 
178 static enum pmt_feature_id lookup_pfid(const char *pfname)
179 {
180 	if (!strcmp(pfname, "energy"))
181 		return FEATURE_PER_RMID_ENERGY_TELEM;
182 	else if (!strcmp(pfname, "perf"))
183 		return FEATURE_PER_RMID_PERF_TELEM;
184 
185 	pr_warn("Unknown PMT feature name '%s'\n", pfname);
186 
187 	return FEATURE_INVALID;
188 }
189 
190 /*
191  * Request a copy of struct pmt_feature_group for each event group. If there is
192  * one, the returned structure has an array of telemetry_region structures,
193  * each element of the array describes one telemetry aggregator. The
194  * telemetry aggregators may have different GUIDs so obtain duplicate struct
195  * pmt_feature_group for event groups with same feature type but different
196  * GUID. Post-processing ensures an event group can only use the telemetry
197  * aggregators that match its GUID. An event group keeps a pointer to its
198  * struct pmt_feature_group to indicate that its events are successfully
199  * enabled.
200  */
201 bool intel_aet_get_events(void)
202 {
203 	struct pmt_feature_group *p;
204 	enum pmt_feature_id pfid;
205 	struct event_group **peg;
206 	bool ret = false;
207 
208 	for_each_event_group(peg) {
209 		pfid = lookup_pfid((*peg)->pfname);
210 		p = intel_pmt_get_regions_by_feature(pfid);
211 		if (IS_ERR_OR_NULL(p))
212 			continue;
213 		if (enable_events(*peg, p)) {
214 			(*peg)->pfg = p;
215 			ret = true;
216 		} else {
217 			intel_pmt_put_feature_group(p);
218 		}
219 	}
220 
221 	return ret;
222 }
223 
224 void __exit intel_aet_exit(void)
225 {
226 	struct event_group **peg;
227 
228 	for_each_event_group(peg) {
229 		if ((*peg)->pfg) {
230 			intel_pmt_put_feature_group((*peg)->pfg);
231 			(*peg)->pfg = NULL;
232 		}
233 	}
234 }
235