xref: /linux/drivers/platform/x86/intel/vsec.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Vendor Specific Extended Capabilities auxiliary bus driver
4  *
5  * Copyright (c) 2021, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Author: David E. Box <david.e.box@linux.intel.com>
9  *
10  * This driver discovers and creates auxiliary devices for Intel defined PCIe
11  * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities,
12  * VSEC and DVSEC respectively. The driver supports features on specific PCIe
13  * endpoints that exist primarily to expose them.
14  */
15 
16 #include <linux/auxiliary_bus.h>
17 #include <linux/bits.h>
18 #include <linux/kernel.h>
19 #include <linux/idr.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/types.h>
23 
24 #include "vsec.h"
25 
26 /* Intel DVSEC offsets */
27 #define INTEL_DVSEC_ENTRIES		0xA
28 #define INTEL_DVSEC_SIZE		0xB
29 #define INTEL_DVSEC_TABLE		0xC
30 #define INTEL_DVSEC_TABLE_BAR(x)	((x) & GENMASK(2, 0))
31 #define INTEL_DVSEC_TABLE_OFFSET(x)	((x) & GENMASK(31, 3))
32 #define TABLE_OFFSET_SHIFT		3
33 
34 static DEFINE_IDA(intel_vsec_ida);
35 
36 /**
37  * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
38  * @rev:         Revision ID of the VSEC/DVSEC register space
39  * @length:      Length of the VSEC/DVSEC register space
40  * @id:          ID of the feature
41  * @num_entries: Number of instances of the feature
42  * @entry_size:  Size of the discovery table for each feature
43  * @tbir:        BAR containing the discovery tables
44  * @offset:      BAR offset of start of the first discovery table
45  */
46 struct intel_vsec_header {
47 	u8	rev;
48 	u16	length;
49 	u16	id;
50 	u8	num_entries;
51 	u8	entry_size;
52 	u8	tbir;
53 	u32	offset;
54 };
55 
56 /* Platform specific data */
57 struct intel_vsec_platform_info {
58 	struct intel_vsec_header **capabilities;
59 	unsigned long quirks;
60 };
61 
62 enum intel_vsec_id {
63 	VSEC_ID_TELEMETRY	= 2,
64 	VSEC_ID_WATCHER		= 3,
65 	VSEC_ID_CRASHLOG	= 4,
66 };
67 
68 static enum intel_vsec_id intel_vsec_allow_list[] = {
69 	VSEC_ID_TELEMETRY,
70 	VSEC_ID_WATCHER,
71 	VSEC_ID_CRASHLOG,
72 };
73 
74 static const char *intel_vsec_name(enum intel_vsec_id id)
75 {
76 	switch (id) {
77 	case VSEC_ID_TELEMETRY:
78 		return "telemetry";
79 
80 	case VSEC_ID_WATCHER:
81 		return "watcher";
82 
83 	case VSEC_ID_CRASHLOG:
84 		return "crashlog";
85 
86 	default:
87 		return NULL;
88 	}
89 }
90 
91 static bool intel_vsec_allowed(u16 id)
92 {
93 	int i;
94 
95 	for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++)
96 		if (intel_vsec_allow_list[i] == id)
97 			return true;
98 
99 	return false;
100 }
101 
102 static bool intel_vsec_disabled(u16 id, unsigned long quirks)
103 {
104 	switch (id) {
105 	case VSEC_ID_WATCHER:
106 		return !!(quirks & VSEC_QUIRK_NO_WATCHER);
107 
108 	case VSEC_ID_CRASHLOG:
109 		return !!(quirks & VSEC_QUIRK_NO_CRASHLOG);
110 
111 	default:
112 		return false;
113 	}
114 }
115 
116 static void intel_vsec_remove_aux(void *data)
117 {
118 	auxiliary_device_delete(data);
119 	auxiliary_device_uninit(data);
120 }
121 
122 static void intel_vsec_dev_release(struct device *dev)
123 {
124 	struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
125 
126 	ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
127 	kfree(intel_vsec_dev->resource);
128 	kfree(intel_vsec_dev);
129 }
130 
131 static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev,
132 			      const char *name)
133 {
134 	struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
135 	int ret;
136 
137 	ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
138 	if (ret < 0) {
139 		kfree(intel_vsec_dev);
140 		return ret;
141 	}
142 
143 	auxdev->id = ret;
144 	auxdev->name = name;
145 	auxdev->dev.parent = &pdev->dev;
146 	auxdev->dev.release = intel_vsec_dev_release;
147 
148 	ret = auxiliary_device_init(auxdev);
149 	if (ret < 0) {
150 		ida_free(intel_vsec_dev->ida, auxdev->id);
151 		kfree(intel_vsec_dev->resource);
152 		kfree(intel_vsec_dev);
153 		return ret;
154 	}
155 
156 	ret = auxiliary_device_add(auxdev);
157 	if (ret < 0) {
158 		auxiliary_device_uninit(auxdev);
159 		return ret;
160 	}
161 
162 	return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev);
163 }
164 
165 static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
166 			   unsigned long quirks)
167 {
168 	struct intel_vsec_device *intel_vsec_dev;
169 	struct resource *res, *tmp;
170 	int i;
171 
172 	if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
173 		return -EINVAL;
174 
175 	if (!header->num_entries) {
176 		dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id);
177 		return -EINVAL;
178 	}
179 
180 	if (!header->entry_size) {
181 		dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id);
182 		return -EINVAL;
183 	}
184 
185 	intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL);
186 	if (!intel_vsec_dev)
187 		return -ENOMEM;
188 
189 	res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
190 	if (!res) {
191 		kfree(intel_vsec_dev);
192 		return -ENOMEM;
193 	}
194 
195 	if (quirks & VSEC_QUIRK_TABLE_SHIFT)
196 		header->offset >>= TABLE_OFFSET_SHIFT;
197 
198 	/*
199 	 * The DVSEC/VSEC contains the starting offset and count for a block of
200 	 * discovery tables. Create a resource array of these tables to the
201 	 * auxiliary device driver.
202 	 */
203 	for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
204 		tmp->start = pdev->resource[header->tbir].start +
205 			     header->offset + i * (header->entry_size * sizeof(u32));
206 		tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
207 		tmp->flags = IORESOURCE_MEM;
208 	}
209 
210 	intel_vsec_dev->pcidev = pdev;
211 	intel_vsec_dev->resource = res;
212 	intel_vsec_dev->num_resources = header->num_entries;
213 	intel_vsec_dev->quirks = quirks;
214 	intel_vsec_dev->ida = &intel_vsec_ida;
215 
216 	return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
217 }
218 
219 static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
220 				struct intel_vsec_header **header)
221 {
222 	bool have_devices = false;
223 	int ret;
224 
225 	for ( ; *header; header++) {
226 		ret = intel_vsec_add_dev(pdev, *header, quirks);
227 		if (ret)
228 			dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
229 				 (*header)->id);
230 		else
231 			have_devices = true;
232 	}
233 
234 	return have_devices;
235 }
236 
237 static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
238 {
239 	bool have_devices = false;
240 	int pos = 0;
241 
242 	do {
243 		struct intel_vsec_header header;
244 		u32 table, hdr;
245 		u16 vid;
246 		int ret;
247 
248 		pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
249 		if (!pos)
250 			break;
251 
252 		pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr);
253 		vid = PCI_DVSEC_HEADER1_VID(hdr);
254 		if (vid != PCI_VENDOR_ID_INTEL)
255 			continue;
256 
257 		/* Support only revision 1 */
258 		header.rev = PCI_DVSEC_HEADER1_REV(hdr);
259 		if (header.rev != 1) {
260 			dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev);
261 			continue;
262 		}
263 
264 		header.length = PCI_DVSEC_HEADER1_LEN(hdr);
265 
266 		pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
267 		pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
268 		pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
269 
270 		header.tbir = INTEL_DVSEC_TABLE_BAR(table);
271 		header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
272 
273 		pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
274 		header.id = PCI_DVSEC_HEADER2_ID(hdr);
275 
276 		ret = intel_vsec_add_dev(pdev, &header, quirks);
277 		if (ret)
278 			continue;
279 
280 		have_devices = true;
281 	} while (true);
282 
283 	return have_devices;
284 }
285 
286 static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
287 {
288 	bool have_devices = false;
289 	int pos = 0;
290 
291 	do {
292 		struct intel_vsec_header header;
293 		u32 table, hdr;
294 		int ret;
295 
296 		pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR);
297 		if (!pos)
298 			break;
299 
300 		pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr);
301 
302 		/* Support only revision 1 */
303 		header.rev = PCI_VNDR_HEADER_REV(hdr);
304 		if (header.rev != 1) {
305 			dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev);
306 			continue;
307 		}
308 
309 		header.id = PCI_VNDR_HEADER_ID(hdr);
310 		header.length = PCI_VNDR_HEADER_LEN(hdr);
311 
312 		/* entry, size, and table offset are the same as DVSEC */
313 		pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
314 		pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
315 		pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
316 
317 		header.tbir = INTEL_DVSEC_TABLE_BAR(table);
318 		header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
319 
320 		ret = intel_vsec_add_dev(pdev, &header, quirks);
321 		if (ret)
322 			continue;
323 
324 		have_devices = true;
325 	} while (true);
326 
327 	return have_devices;
328 }
329 
330 static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
331 {
332 	struct intel_vsec_platform_info *info;
333 	bool have_devices = false;
334 	unsigned long quirks = 0;
335 	int ret;
336 
337 	ret = pcim_enable_device(pdev);
338 	if (ret)
339 		return ret;
340 
341 	info = (struct intel_vsec_platform_info *)id->driver_data;
342 	if (info)
343 		quirks = info->quirks;
344 
345 	if (intel_vsec_walk_dvsec(pdev, quirks))
346 		have_devices = true;
347 
348 	if (intel_vsec_walk_vsec(pdev, quirks))
349 		have_devices = true;
350 
351 	if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
352 	    intel_vsec_walk_header(pdev, quirks, info->capabilities))
353 		have_devices = true;
354 
355 	if (!have_devices)
356 		return -ENODEV;
357 
358 	return 0;
359 }
360 
361 /* TGL info */
362 static const struct intel_vsec_platform_info tgl_info = {
363 	.quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT,
364 };
365 
366 /* DG1 info */
367 static struct intel_vsec_header dg1_telemetry = {
368 	.length = 0x10,
369 	.id = 2,
370 	.num_entries = 1,
371 	.entry_size = 3,
372 	.tbir = 0,
373 	.offset = 0x466000,
374 };
375 
376 static struct intel_vsec_header *dg1_capabilities[] = {
377 	&dg1_telemetry,
378 	NULL
379 };
380 
381 static const struct intel_vsec_platform_info dg1_info = {
382 	.capabilities = dg1_capabilities,
383 	.quirks = VSEC_QUIRK_NO_DVSEC,
384 };
385 
386 #define PCI_DEVICE_ID_INTEL_VSEC_ADL		0x467d
387 #define PCI_DEVICE_ID_INTEL_VSEC_DG1		0x490e
388 #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM		0x09a7
389 #define PCI_DEVICE_ID_INTEL_VSEC_TGL		0x9a0d
390 static const struct pci_device_id intel_vsec_pci_ids[] = {
391 	{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
392 	{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
393 	{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) },
394 	{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
395 	{ }
396 };
397 MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
398 
399 static struct pci_driver intel_vsec_pci_driver = {
400 	.name = "intel_vsec",
401 	.id_table = intel_vsec_pci_ids,
402 	.probe = intel_vsec_pci_probe,
403 };
404 module_pci_driver(intel_vsec_pci_driver);
405 
406 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
407 MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
408 MODULE_LICENSE("GPL v2");
409