xref: /linux/drivers/platform/x86/intel/pmt/telemetry.c (revision 6f47c7ae8c7afaf9ad291d39f0d3974f191a7946)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Platform Monitory Technology Telemetry driver
4  *
5  * Copyright (c) 2020, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Author: "David E. Box" <david.e.box@linux.intel.com>
9  */
10 
11 #include <linux/auxiliary_bus.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/overflow.h>
18 
19 #include "../vsec.h"
20 #include "class.h"
21 
22 #define TELEM_SIZE_OFFSET	0x0
23 #define TELEM_GUID_OFFSET	0x4
24 #define TELEM_BASE_OFFSET	0x8
25 #define TELEM_ACCESS(v)		((v) & GENMASK(3, 0))
26 #define TELEM_TYPE(v)		(((v) & GENMASK(7, 4)) >> 4)
27 /* size is in bytes */
28 #define TELEM_SIZE(v)		(((v) & GENMASK(27, 12)) >> 10)
29 
30 /* Used by client hardware to identify a fixed telemetry entry*/
31 #define TELEM_CLIENT_FIXED_BLOCK_GUID	0x10000000
32 
33 #define NUM_BYTES_QWORD(v)	((v) << 3)
34 #define SAMPLE_ID_OFFSET(v)	((v) << 3)
35 
36 #define NUM_BYTES_DWORD(v)	((v) << 2)
37 #define SAMPLE_ID_OFFSET32(v)	((v) << 2)
38 
39 /* Protects access to the xarray of telemetry endpoint handles */
40 static DEFINE_MUTEX(ep_lock);
41 
42 enum telem_type {
43 	TELEM_TYPE_PUNIT = 0,
44 	TELEM_TYPE_CRASHLOG,
45 	TELEM_TYPE_PUNIT_FIXED,
46 };
47 
48 struct pmt_telem_priv {
49 	int				num_entries;
50 	struct intel_pmt_entry		entry[];
51 };
52 
53 static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
54 				      struct device *dev)
55 {
56 	u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
57 
58 	if (intel_pmt_is_early_client_hw(dev)) {
59 		u32 type = TELEM_TYPE(readl(entry->disc_table));
60 
61 		if ((type == TELEM_TYPE_PUNIT_FIXED) ||
62 		    (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
63 			return true;
64 	}
65 
66 	return false;
67 }
68 
69 static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
70 				   struct device *dev)
71 {
72 	void __iomem *disc_table = entry->disc_table;
73 	struct intel_pmt_header *header = &entry->header;
74 
75 	if (pmt_telem_region_overlaps(entry, dev))
76 		return 1;
77 
78 	header->access_type = TELEM_ACCESS(readl(disc_table));
79 	header->guid = readl(disc_table + TELEM_GUID_OFFSET);
80 	header->base_offset = readl(disc_table + TELEM_BASE_OFFSET);
81 
82 	/* Size is measured in DWORDS, but accessor returns bytes */
83 	header->size = TELEM_SIZE(readl(disc_table));
84 
85 	/*
86 	 * Some devices may expose non-functioning entries that are
87 	 * reserved for future use. They have zero size. Do not fail
88 	 * probe for these. Just ignore them.
89 	 */
90 	if (header->size == 0 || header->access_type == 0xF)
91 		return 1;
92 
93 	return 0;
94 }
95 
96 static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry,
97 				  struct pci_dev *pdev)
98 {
99 	struct telem_endpoint *ep;
100 
101 	/* Endpoint lifetimes are managed by kref, not devres */
102 	entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL);
103 	if (!entry->ep)
104 		return -ENOMEM;
105 
106 	ep = entry->ep;
107 	ep->pcidev = pdev;
108 	ep->header.access_type = entry->header.access_type;
109 	ep->header.guid = entry->header.guid;
110 	ep->header.base_offset = entry->header.base_offset;
111 	ep->header.size = entry->header.size;
112 	ep->base = entry->base;
113 	ep->present = true;
114 
115 	kref_init(&ep->kref);
116 
117 	return 0;
118 }
119 
120 static DEFINE_XARRAY_ALLOC(telem_array);
121 static struct intel_pmt_namespace pmt_telem_ns = {
122 	.name = "telem",
123 	.xa = &telem_array,
124 	.pmt_header_decode = pmt_telem_header_decode,
125 	.pmt_add_endpoint = pmt_telem_add_endpoint,
126 };
127 
128 /* Called when all users unregister and the device is removed */
129 static void pmt_telem_ep_release(struct kref *kref)
130 {
131 	struct telem_endpoint *ep;
132 
133 	ep = container_of(kref, struct telem_endpoint, kref);
134 	kfree(ep);
135 }
136 
137 unsigned long pmt_telem_get_next_endpoint(unsigned long start)
138 {
139 	struct intel_pmt_entry *entry;
140 	unsigned long found_idx;
141 
142 	mutex_lock(&ep_lock);
143 	xa_for_each_start(&telem_array, found_idx, entry, start) {
144 		/*
145 		 * Return first found index after start.
146 		 * 0 is not valid id.
147 		 */
148 		if (found_idx > start)
149 			break;
150 	}
151 	mutex_unlock(&ep_lock);
152 
153 	return found_idx == start ? 0 : found_idx;
154 }
155 EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, INTEL_PMT_TELEMETRY);
156 
157 struct telem_endpoint *pmt_telem_register_endpoint(int devid)
158 {
159 	struct intel_pmt_entry *entry;
160 	unsigned long index = devid;
161 
162 	mutex_lock(&ep_lock);
163 	entry = xa_find(&telem_array, &index, index, XA_PRESENT);
164 	if (!entry) {
165 		mutex_unlock(&ep_lock);
166 		return ERR_PTR(-ENXIO);
167 	}
168 
169 	kref_get(&entry->ep->kref);
170 	mutex_unlock(&ep_lock);
171 
172 	return entry->ep;
173 }
174 EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, INTEL_PMT_TELEMETRY);
175 
176 void pmt_telem_unregister_endpoint(struct telem_endpoint *ep)
177 {
178 	kref_put(&ep->kref, pmt_telem_ep_release);
179 }
180 EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, INTEL_PMT_TELEMETRY);
181 
182 int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info)
183 {
184 	struct intel_pmt_entry *entry;
185 	unsigned long index = devid;
186 	int err = 0;
187 
188 	if (!info)
189 		return -EINVAL;
190 
191 	mutex_lock(&ep_lock);
192 	entry = xa_find(&telem_array, &index, index, XA_PRESENT);
193 	if (!entry) {
194 		err = -ENXIO;
195 		goto unlock;
196 	}
197 
198 	info->pdev = entry->ep->pcidev;
199 	info->header = entry->ep->header;
200 
201 unlock:
202 	mutex_unlock(&ep_lock);
203 	return err;
204 
205 }
206 EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, INTEL_PMT_TELEMETRY);
207 
208 int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
209 {
210 	u32 offset, size;
211 
212 	if (!ep->present)
213 		return -ENODEV;
214 
215 	offset = SAMPLE_ID_OFFSET(id);
216 	size = ep->header.size;
217 
218 	if (offset + NUM_BYTES_QWORD(count) > size)
219 		return -EINVAL;
220 
221 	memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count));
222 
223 	return ep->present ? 0 : -EPIPE;
224 }
225 EXPORT_SYMBOL_NS_GPL(pmt_telem_read, INTEL_PMT_TELEMETRY);
226 
227 int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count)
228 {
229 	u32 offset, size;
230 
231 	if (!ep->present)
232 		return -ENODEV;
233 
234 	offset = SAMPLE_ID_OFFSET32(id);
235 	size = ep->header.size;
236 
237 	if (offset + NUM_BYTES_DWORD(count) > size)
238 		return -EINVAL;
239 
240 	memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count));
241 
242 	return ep->present ? 0 : -EPIPE;
243 }
244 EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, INTEL_PMT_TELEMETRY);
245 
246 struct telem_endpoint *
247 pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos)
248 {
249 	int devid = 0;
250 	int inst = 0;
251 	int err = 0;
252 
253 	while ((devid = pmt_telem_get_next_endpoint(devid))) {
254 		struct telem_endpoint_info ep_info;
255 
256 		err = pmt_telem_get_endpoint_info(devid, &ep_info);
257 		if (err)
258 			return ERR_PTR(err);
259 
260 		if (ep_info.header.guid == guid && ep_info.pdev == pcidev) {
261 			if (inst == pos)
262 				return pmt_telem_register_endpoint(devid);
263 			++inst;
264 		}
265 	}
266 
267 	return ERR_PTR(-ENXIO);
268 }
269 EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, INTEL_PMT_TELEMETRY);
270 
271 static void pmt_telem_remove(struct auxiliary_device *auxdev)
272 {
273 	struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
274 	int i;
275 
276 	mutex_lock(&ep_lock);
277 	for (i = 0; i < priv->num_entries; i++) {
278 		struct intel_pmt_entry *entry = &priv->entry[i];
279 
280 		kref_put(&entry->ep->kref, pmt_telem_ep_release);
281 		intel_pmt_dev_destroy(entry, &pmt_telem_ns);
282 	}
283 	mutex_unlock(&ep_lock);
284 };
285 
286 static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
287 {
288 	struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
289 	struct pmt_telem_priv *priv;
290 	size_t size;
291 	int i, ret;
292 
293 	size = struct_size(priv, entry, intel_vsec_dev->num_resources);
294 	priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
295 	if (!priv)
296 		return -ENOMEM;
297 
298 	auxiliary_set_drvdata(auxdev, priv);
299 
300 	for (i = 0; i < intel_vsec_dev->num_resources; i++) {
301 		struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
302 
303 		mutex_lock(&ep_lock);
304 		ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
305 		mutex_unlock(&ep_lock);
306 		if (ret < 0)
307 			goto abort_probe;
308 		if (ret)
309 			continue;
310 
311 		priv->num_entries++;
312 	}
313 
314 	return 0;
315 abort_probe:
316 	pmt_telem_remove(auxdev);
317 	return ret;
318 }
319 
320 static const struct auxiliary_device_id pmt_telem_id_table[] = {
321 	{ .name = "intel_vsec.telemetry" },
322 	{}
323 };
324 MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table);
325 
326 static struct auxiliary_driver pmt_telem_aux_driver = {
327 	.id_table	= pmt_telem_id_table,
328 	.remove		= pmt_telem_remove,
329 	.probe		= pmt_telem_probe,
330 };
331 
332 static int __init pmt_telem_init(void)
333 {
334 	return auxiliary_driver_register(&pmt_telem_aux_driver);
335 }
336 module_init(pmt_telem_init);
337 
338 static void __exit pmt_telem_exit(void)
339 {
340 	auxiliary_driver_unregister(&pmt_telem_aux_driver);
341 	xa_destroy(&telem_array);
342 }
343 module_exit(pmt_telem_exit);
344 
345 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
346 MODULE_DESCRIPTION("Intel PMT Telemetry driver");
347 MODULE_LICENSE("GPL v2");
348 MODULE_IMPORT_NS(INTEL_PMT);
349