xref: /linux/drivers/gpu/drm/xe/xe_vsec.c (revision b4ada0618eed0fbd1b1630f73deb048c592b06a1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright © 2024 Intel Corporation */
3 #include <linux/bitfield.h>
4 #include <linux/bits.h>
5 #include <linux/cleanup.h>
6 #include <linux/errno.h>
7 #include <linux/intel_vsec.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/pci.h>
11 #include <linux/types.h>
12 
13 #include "xe_device.h"
14 #include "xe_device_types.h"
15 #include "xe_drv.h"
16 #include "xe_mmio.h"
17 #include "xe_platform_types.h"
18 #include "xe_pm.h"
19 #include "xe_vsec.h"
20 
21 #include "regs/xe_pmt.h"
22 
23 /* PMT GUID value for BMG devices.  NOTE: this is NOT a PCI id */
24 #define BMG_DEVICE_ID 0xE2F8
25 
26 static struct intel_vsec_header bmg_telemetry = {
27 	.rev = 1,
28 	.length = 0x10,
29 	.id = VSEC_ID_TELEMETRY,
30 	.num_entries = 2,
31 	.entry_size = 4,
32 	.tbir = 0,
33 	.offset = BMG_DISCOVERY_OFFSET,
34 };
35 
36 static struct intel_vsec_header bmg_crashlog = {
37 	.rev = 1,
38 	.length = 0x10,
39 	.id = VSEC_ID_CRASHLOG,
40 	.num_entries = 2,
41 	.entry_size = 6,
42 	.tbir = 0,
43 	.offset = BMG_DISCOVERY_OFFSET + 0x60,
44 };
45 
46 static struct intel_vsec_header *bmg_capabilities[] = {
47 	&bmg_telemetry,
48 	&bmg_crashlog,
49 	NULL
50 };
51 
52 enum xe_vsec {
53 	XE_VSEC_UNKNOWN = 0,
54 	XE_VSEC_BMG,
55 };
56 
57 static struct intel_vsec_platform_info xe_vsec_info[] = {
58 	[XE_VSEC_BMG] = {
59 		.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
60 		.headers = bmg_capabilities,
61 	},
62 	{ }
63 };
64 
65 /*
66  * The GUID will have the following bits to decode:
67  *   [0:3]   - {Telemetry space iteration number (0,1,..)}
68  *   [4:7]   - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
69  *   [8:11]  - SOC_SKU
70  *   [12:27] – Device ID – changes for each down bin SKU’s
71  *   [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
72  *   [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
73  */
74 #define GUID_TELEM_ITERATION	GENMASK(3, 0)
75 #define GUID_SEGMENT		GENMASK(7, 4)
76 #define GUID_SOC_SKU		GENMASK(11, 8)
77 #define GUID_DEVICE_ID		GENMASK(27, 12)
78 #define GUID_CAP_TYPE		GENMASK(29, 28)
79 #define GUID_RECORD_ID		GENMASK(31, 30)
80 
81 #define PUNIT_TELEMETRY_OFFSET		0x0200
82 #define PUNIT_WATCHER_OFFSET		0x14A0
83 #define OOBMSM_0_WATCHER_OFFSET		0x18D8
84 #define OOBMSM_1_TELEMETRY_OFFSET	0x1000
85 
86 enum record_id {
87 	PUNIT,
88 	OOBMSM_0,
89 	OOBMSM_1,
90 };
91 
92 enum capability {
93 	CRASHLOG,
94 	TELEMETRY,
95 	WATCHER,
96 };
97 
98 static int xe_guid_decode(u32 guid, int *index, u32 *offset)
99 {
100 	u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
101 	u32 cap_type  = FIELD_GET(GUID_CAP_TYPE, guid);
102 	u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
103 
104 	if (device_id != BMG_DEVICE_ID)
105 		return -ENODEV;
106 
107 	if (cap_type > WATCHER)
108 		return -EINVAL;
109 
110 	*offset = 0;
111 
112 	if (cap_type == CRASHLOG) {
113 		*index = record_id == PUNIT ? 2 : 4;
114 		return 0;
115 	}
116 
117 	switch (record_id) {
118 	case PUNIT:
119 		*index = 0;
120 		if (cap_type == TELEMETRY)
121 			*offset = PUNIT_TELEMETRY_OFFSET;
122 		else
123 			*offset = PUNIT_WATCHER_OFFSET;
124 		break;
125 
126 	case OOBMSM_0:
127 		*index = 1;
128 		if (cap_type == WATCHER)
129 			*offset = OOBMSM_0_WATCHER_OFFSET;
130 		break;
131 
132 	case OOBMSM_1:
133 		*index = 1;
134 		if (cap_type == TELEMETRY)
135 			*offset = OOBMSM_1_TELEMETRY_OFFSET;
136 		break;
137 	default:
138 		return -EINVAL;
139 	}
140 
141 	return 0;
142 }
143 
144 int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
145 		      u32 count)
146 {
147 	struct xe_device *xe = pdev_to_xe_device(pdev);
148 	void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
149 	u32 mem_region;
150 	u32 offset;
151 	int ret;
152 
153 	ret = xe_guid_decode(guid, &mem_region, &offset);
154 	if (ret)
155 		return ret;
156 
157 	telem_addr += offset + user_offset;
158 
159 	guard(mutex)(&xe->pmt.lock);
160 
161 	/* indicate that we are not at an appropriate power level */
162 	if (!xe_pm_runtime_get_if_active(xe))
163 		return -ENODATA;
164 
165 	/* set SoC re-mapper index register based on GUID memory region */
166 	xe_mmio_rmw32(xe_root_tile_mmio(xe), SG_REMAP_INDEX1, SG_REMAP_BITS,
167 		      REG_FIELD_PREP(SG_REMAP_BITS, mem_region));
168 
169 	memcpy_fromio(data, telem_addr, count);
170 	xe_pm_runtime_put(xe);
171 
172 	return count;
173 }
174 
175 static struct pmt_callbacks xe_pmt_cb = {
176 	.read_telem = xe_pmt_telem_read,
177 };
178 
179 static const int vsec_platforms[] = {
180 	[XE_BATTLEMAGE] = XE_VSEC_BMG,
181 };
182 
183 static enum xe_vsec get_platform_info(struct xe_device *xe)
184 {
185 	if (xe->info.platform > XE_BATTLEMAGE)
186 		return XE_VSEC_UNKNOWN;
187 
188 	return vsec_platforms[xe->info.platform];
189 }
190 
191 /**
192  * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
193  * interface
194  * @xe: valid xe instance
195  */
196 void xe_vsec_init(struct xe_device *xe)
197 {
198 	struct intel_vsec_platform_info *info;
199 	struct device *dev = xe->drm.dev;
200 	struct pci_dev *pdev = to_pci_dev(dev);
201 	enum xe_vsec platform;
202 
203 	platform = get_platform_info(xe);
204 	if (platform == XE_VSEC_UNKNOWN)
205 		return;
206 
207 	info = &xe_vsec_info[platform];
208 	if (!info->headers)
209 		return;
210 
211 	switch (platform) {
212 	case XE_VSEC_BMG:
213 		info->priv_data = &xe_pmt_cb;
214 		break;
215 	default:
216 		break;
217 	}
218 
219 	/*
220 	 * Register a VSEC. Cleanup is handled using device managed
221 	 * resources.
222 	 */
223 	intel_vsec_register(pdev, info);
224 }
225 MODULE_IMPORT_NS("INTEL_VSEC");
226