xref: /linux/drivers/gpu/drm/xe/xe_vsec.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright © 2024 Intel Corporation */
3 #include <linux/bitfield.h>
4 #include <linux/bits.h>
5 #include <linux/cleanup.h>
6 #include <linux/errno.h>
7 #include <linux/intel_vsec.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/pci.h>
11 #include <linux/types.h>
12 
13 #include "xe_device.h"
14 #include "xe_device_types.h"
15 #include "xe_mmio.h"
16 #include "xe_platform_types.h"
17 #include "xe_pm.h"
18 #include "xe_vsec.h"
19 
20 #include "regs/xe_pmt.h"
21 
22 /* PMT GUID value for BMG devices.  NOTE: this is NOT a PCI id */
23 #define BMG_DEVICE_ID 0xE2F8
24 
25 static struct intel_vsec_header bmg_telemetry = {
26 	.rev = 1,
27 	.length = 0x10,
28 	.id = VSEC_ID_TELEMETRY,
29 	.num_entries = 2,
30 	.entry_size = 4,
31 	.tbir = 0,
32 	.offset = BMG_DISCOVERY_OFFSET,
33 };
34 
35 static struct intel_vsec_header bmg_crashlog = {
36 	.rev = 1,
37 	.length = 0x10,
38 	.id = VSEC_ID_CRASHLOG,
39 	.num_entries = 2,
40 	.entry_size = 6,
41 	.tbir = 0,
42 	.offset = BMG_DISCOVERY_OFFSET + 0x60,
43 };
44 
45 static struct intel_vsec_header *bmg_capabilities[] = {
46 	&bmg_telemetry,
47 	&bmg_crashlog,
48 	NULL
49 };
50 
51 enum xe_vsec {
52 	XE_VSEC_UNKNOWN = 0,
53 	XE_VSEC_BMG,
54 };
55 
56 static struct intel_vsec_platform_info xe_vsec_info[] = {
57 	[XE_VSEC_BMG] = {
58 		.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
59 		.headers = bmg_capabilities,
60 	},
61 	{ }
62 };
63 
64 /*
65  * The GUID will have the following bits to decode:
66  *   [0:3]   - {Telemetry space iteration number (0,1,..)}
67  *   [4:7]   - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
68  *   [8:11]  - SOC_SKU
69  *   [12:27] – Device ID – changes for each down bin SKU’s
70  *   [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
71  *   [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
72  */
73 #define GUID_TELEM_ITERATION	GENMASK(3, 0)
74 #define GUID_SEGMENT		GENMASK(7, 4)
75 #define GUID_SOC_SKU		GENMASK(11, 8)
76 #define GUID_DEVICE_ID		GENMASK(27, 12)
77 #define GUID_CAP_TYPE		GENMASK(29, 28)
78 #define GUID_RECORD_ID		GENMASK(31, 30)
79 
80 #define PUNIT_TELEMETRY_OFFSET		0x0200
81 #define PUNIT_WATCHER_OFFSET		0x14A0
82 #define OOBMSM_0_WATCHER_OFFSET		0x18D8
83 #define OOBMSM_1_TELEMETRY_OFFSET	0x1000
84 
85 enum record_id {
86 	PUNIT,
87 	OOBMSM_0,
88 	OOBMSM_1,
89 };
90 
91 enum capability {
92 	CRASHLOG,
93 	TELEMETRY,
94 	WATCHER,
95 };
96 
97 static int xe_guid_decode(u32 guid, int *index, u32 *offset)
98 {
99 	u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
100 	u32 cap_type  = FIELD_GET(GUID_CAP_TYPE, guid);
101 	u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
102 
103 	if (device_id != BMG_DEVICE_ID)
104 		return -ENODEV;
105 
106 	if (cap_type > WATCHER)
107 		return -EINVAL;
108 
109 	*offset = 0;
110 
111 	if (cap_type == CRASHLOG) {
112 		*index = record_id == PUNIT ? 2 : 4;
113 		return 0;
114 	}
115 
116 	switch (record_id) {
117 	case PUNIT:
118 		*index = 0;
119 		if (cap_type == TELEMETRY)
120 			*offset = PUNIT_TELEMETRY_OFFSET;
121 		else
122 			*offset = PUNIT_WATCHER_OFFSET;
123 		break;
124 
125 	case OOBMSM_0:
126 		*index = 1;
127 		if (cap_type == WATCHER)
128 			*offset = OOBMSM_0_WATCHER_OFFSET;
129 		break;
130 
131 	case OOBMSM_1:
132 		*index = 1;
133 		if (cap_type == TELEMETRY)
134 			*offset = OOBMSM_1_TELEMETRY_OFFSET;
135 		break;
136 	default:
137 		return -EINVAL;
138 	}
139 
140 	return 0;
141 }
142 
143 int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
144 		      u32 count)
145 {
146 	struct xe_device *xe = pdev_to_xe_device(pdev);
147 	void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
148 	u32 mem_region;
149 	u32 offset;
150 	int ret;
151 
152 	ret = xe_guid_decode(guid, &mem_region, &offset);
153 	if (ret)
154 		return ret;
155 
156 	telem_addr += offset + user_offset;
157 
158 	guard(mutex)(&xe->pmt.lock);
159 
160 	if (!xe->soc_remapper.set_telem_region)
161 		return -ENODEV;
162 
163 	/* indicate that we are not at an appropriate power level */
164 	if (!xe_pm_runtime_get_if_active(xe))
165 		return -ENODATA;
166 
167 	/* set SoC re-mapper index register based on GUID memory region */
168 	xe->soc_remapper.set_telem_region(xe, mem_region);
169 
170 	memcpy_fromio(data, telem_addr, count);
171 	xe_pm_runtime_put(xe);
172 
173 	return count;
174 }
175 
176 static struct pmt_callbacks xe_pmt_cb = {
177 	.read_telem = xe_pmt_telem_read,
178 };
179 
180 static const int vsec_platforms[] = {
181 	[XE_BATTLEMAGE] = XE_VSEC_BMG,
182 };
183 
184 static enum xe_vsec get_platform_info(struct xe_device *xe)
185 {
186 	if (xe->info.platform > XE_BATTLEMAGE)
187 		return XE_VSEC_UNKNOWN;
188 
189 	return vsec_platforms[xe->info.platform];
190 }
191 
192 /**
193  * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
194  * interface
195  * @xe: valid xe instance
196  */
197 void xe_vsec_init(struct xe_device *xe)
198 {
199 	struct intel_vsec_platform_info *info;
200 	struct device *dev = xe->drm.dev;
201 	struct pci_dev *pdev = to_pci_dev(dev);
202 	enum xe_vsec platform;
203 
204 	platform = get_platform_info(xe);
205 	if (platform == XE_VSEC_UNKNOWN)
206 		return;
207 
208 	info = &xe_vsec_info[platform];
209 	if (!info->headers)
210 		return;
211 
212 	switch (platform) {
213 	case XE_VSEC_BMG:
214 		info->priv_data = &xe_pmt_cb;
215 		break;
216 	default:
217 		break;
218 	}
219 
220 	/*
221 	 * Register a VSEC. Cleanup is handled using device managed
222 	 * resources.
223 	 */
224 	intel_vsec_register(pdev, info);
225 }
226 MODULE_IMPORT_NS("INTEL_VSEC");
227