1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright © 2024 Intel Corporation */
3 #include <linux/bitfield.h>
4 #include <linux/bits.h>
5 #include <linux/cleanup.h>
6 #include <linux/errno.h>
7 #include <linux/intel_vsec.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/pci.h>
11 #include <linux/types.h>
12
13 #include "xe_device.h"
14 #include "xe_device_types.h"
15 #include "xe_drv.h"
16 #include "xe_mmio.h"
17 #include "xe_platform_types.h"
18 #include "xe_pm.h"
19 #include "xe_vsec.h"
20
21 #include "regs/xe_pmt.h"
22
23 /* PMT GUID value for BMG devices. NOTE: this is NOT a PCI id */
24 #define BMG_DEVICE_ID 0xE2F8
25
26 static struct intel_vsec_header bmg_telemetry = {
27 .length = 0x10,
28 .id = VSEC_ID_TELEMETRY,
29 .num_entries = 2,
30 .entry_size = 4,
31 .tbir = 0,
32 .offset = BMG_DISCOVERY_OFFSET,
33 };
34
35 static struct intel_vsec_header bmg_punit_crashlog = {
36 .length = 0x10,
37 .id = VSEC_ID_CRASHLOG,
38 .num_entries = 1,
39 .entry_size = 4,
40 .tbir = 0,
41 .offset = BMG_DISCOVERY_OFFSET + 0x60,
42 };
43
44 static struct intel_vsec_header bmg_oobmsm_crashlog = {
45 .length = 0x10,
46 .id = VSEC_ID_CRASHLOG,
47 .num_entries = 1,
48 .entry_size = 4,
49 .tbir = 0,
50 .offset = BMG_DISCOVERY_OFFSET + 0x78,
51 };
52
53 static struct intel_vsec_header *bmg_capabilities[] = {
54 &bmg_telemetry,
55 &bmg_punit_crashlog,
56 &bmg_oobmsm_crashlog,
57 NULL
58 };
59
60 enum xe_vsec {
61 XE_VSEC_UNKNOWN = 0,
62 XE_VSEC_BMG,
63 };
64
65 static struct intel_vsec_platform_info xe_vsec_info[] = {
66 [XE_VSEC_BMG] = {
67 .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
68 .headers = bmg_capabilities,
69 },
70 { }
71 };
72
73 /*
74 * The GUID will have the following bits to decode:
75 * [0:3] - {Telemetry space iteration number (0,1,..)}
76 * [4:7] - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
77 * [8:11] - SOC_SKU
78 * [12:27] – Device ID – changes for each down bin SKU’s
79 * [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
80 * [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
81 */
82 #define GUID_TELEM_ITERATION GENMASK(3, 0)
83 #define GUID_SEGMENT GENMASK(7, 4)
84 #define GUID_SOC_SKU GENMASK(11, 8)
85 #define GUID_DEVICE_ID GENMASK(27, 12)
86 #define GUID_CAP_TYPE GENMASK(29, 28)
87 #define GUID_RECORD_ID GENMASK(31, 30)
88
89 #define PUNIT_TELEMETRY_OFFSET 0x0200
90 #define PUNIT_WATCHER_OFFSET 0x14A0
91 #define OOBMSM_0_WATCHER_OFFSET 0x18D8
92 #define OOBMSM_1_TELEMETRY_OFFSET 0x1000
93
94 enum record_id {
95 PUNIT,
96 OOBMSM_0,
97 OOBMSM_1,
98 };
99
100 enum capability {
101 CRASHLOG,
102 TELEMETRY,
103 WATCHER,
104 };
105
xe_guid_decode(u32 guid,int * index,u32 * offset)106 static int xe_guid_decode(u32 guid, int *index, u32 *offset)
107 {
108 u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
109 u32 cap_type = FIELD_GET(GUID_CAP_TYPE, guid);
110 u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
111
112 if (device_id != BMG_DEVICE_ID)
113 return -ENODEV;
114
115 if (cap_type > WATCHER)
116 return -EINVAL;
117
118 *offset = 0;
119
120 if (cap_type == CRASHLOG) {
121 *index = record_id == PUNIT ? 2 : 4;
122 return 0;
123 }
124
125 switch (record_id) {
126 case PUNIT:
127 *index = 0;
128 if (cap_type == TELEMETRY)
129 *offset = PUNIT_TELEMETRY_OFFSET;
130 else
131 *offset = PUNIT_WATCHER_OFFSET;
132 break;
133
134 case OOBMSM_0:
135 *index = 1;
136 if (cap_type == WATCHER)
137 *offset = OOBMSM_0_WATCHER_OFFSET;
138 break;
139
140 case OOBMSM_1:
141 *index = 1;
142 if (cap_type == TELEMETRY)
143 *offset = OOBMSM_1_TELEMETRY_OFFSET;
144 break;
145 default:
146 return -EINVAL;
147 }
148
149 return 0;
150 }
151
xe_pmt_telem_read(struct pci_dev * pdev,u32 guid,u64 * data,loff_t user_offset,u32 count)152 static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
153 u32 count)
154 {
155 struct xe_device *xe = pdev_to_xe_device(pdev);
156 void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
157 u32 mem_region;
158 u32 offset;
159 int ret;
160
161 ret = xe_guid_decode(guid, &mem_region, &offset);
162 if (ret)
163 return ret;
164
165 telem_addr += offset + user_offset;
166
167 guard(mutex)(&xe->pmt.lock);
168
169 /* indicate that we are not at an appropriate power level */
170 if (!xe_pm_runtime_get_if_active(xe))
171 return -ENODATA;
172
173 /* set SoC re-mapper index register based on GUID memory region */
174 xe_mmio_rmw32(xe_root_tile_mmio(xe), SG_REMAP_INDEX1, SG_REMAP_BITS,
175 REG_FIELD_PREP(SG_REMAP_BITS, mem_region));
176
177 memcpy_fromio(data, telem_addr, count);
178 xe_pm_runtime_put(xe);
179
180 return count;
181 }
182
183 static struct pmt_callbacks xe_pmt_cb = {
184 .read_telem = xe_pmt_telem_read,
185 };
186
187 static const int vsec_platforms[] = {
188 [XE_BATTLEMAGE] = XE_VSEC_BMG,
189 };
190
get_platform_info(struct xe_device * xe)191 static enum xe_vsec get_platform_info(struct xe_device *xe)
192 {
193 if (xe->info.platform > XE_BATTLEMAGE)
194 return XE_VSEC_UNKNOWN;
195
196 return vsec_platforms[xe->info.platform];
197 }
198
199 /**
200 * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
201 * interface
202 * @xe: valid xe instance
203 */
xe_vsec_init(struct xe_device * xe)204 void xe_vsec_init(struct xe_device *xe)
205 {
206 struct intel_vsec_platform_info *info;
207 struct device *dev = xe->drm.dev;
208 struct pci_dev *pdev = to_pci_dev(dev);
209 enum xe_vsec platform;
210
211 platform = get_platform_info(xe);
212 if (platform == XE_VSEC_UNKNOWN)
213 return;
214
215 info = &xe_vsec_info[platform];
216 if (!info->headers)
217 return;
218
219 switch (platform) {
220 case XE_VSEC_BMG:
221 info->priv_data = &xe_pmt_cb;
222 break;
223 default:
224 break;
225 }
226
227 /*
228 * Register a VSEC. Cleanup is handled using device managed
229 * resources.
230 */
231 intel_vsec_register(pdev, info);
232 }
233 MODULE_IMPORT_NS("INTEL_VSEC");
234