1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <linux/device.h>
7 #include <linux/kobject.h>
8 #include <linux/pci.h>
9 #include <linux/sysfs.h>
10
11 #include "xe_device.h"
12 #include "xe_device_sysfs.h"
13 #include "xe_mmio.h"
14 #include "xe_pcode_api.h"
15 #include "xe_pcode.h"
16 #include "xe_pm.h"
17
18 /**
19 * DOC: Xe device sysfs
20 * Xe driver requires exposing certain tunable knobs controlled by user space for
21 * each graphics device. Considering this, we need to add sysfs attributes at device
22 * level granularity.
23 * These sysfs attributes will be available under pci device kobj directory.
24 *
25 * vram_d3cold_threshold - Report/change vram used threshold(in MB) below
26 * which vram save/restore is permissible during runtime D3cold entry/exit.
27 *
28 * lb_fan_control_version - Fan control version provisioned by late binding.
29 * Exposed only if supported by the device.
30 *
31 * lb_voltage_regulator_version - Voltage regulator version provisioned by late
32 * binding. Exposed only if supported by the device.
33 */
34
35 static ssize_t
vram_d3cold_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)36 vram_d3cold_threshold_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38 {
39 struct pci_dev *pdev = to_pci_dev(dev);
40 struct xe_device *xe = pdev_to_xe_device(pdev);
41 int ret;
42
43 xe_pm_runtime_get(xe);
44 ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
45 xe_pm_runtime_put(xe);
46
47 return ret;
48 }
49
50 static ssize_t
vram_d3cold_threshold_store(struct device * dev,struct device_attribute * attr,const char * buff,size_t count)51 vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
52 const char *buff, size_t count)
53 {
54 struct pci_dev *pdev = to_pci_dev(dev);
55 struct xe_device *xe = pdev_to_xe_device(pdev);
56 u32 vram_d3cold_threshold;
57 int ret;
58
59 ret = kstrtou32(buff, 0, &vram_d3cold_threshold);
60 if (ret)
61 return ret;
62
63 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
64
65 xe_pm_runtime_get(xe);
66 ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
67 xe_pm_runtime_put(xe);
68
69 return ret ?: count;
70 }
71
72 static DEVICE_ATTR_RW(vram_d3cold_threshold);
73
74 static ssize_t
lb_fan_control_version_show(struct device * dev,struct device_attribute * attr,char * buf)75 lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
76 {
77 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
78 struct xe_tile *root = xe_device_get_root_tile(xe);
79 u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
80 u16 major = 0, minor = 0, hotfix = 0, build = 0;
81 int ret;
82
83 xe_pm_runtime_get(xe);
84
85 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
86 &cap, NULL);
87 if (ret)
88 goto out;
89
90 if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
91 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
92 &ver_low, NULL);
93 if (ret)
94 goto out;
95
96 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
97 &ver_high, NULL);
98 if (ret)
99 goto out;
100
101 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
102 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
103 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
104 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
105 }
106 out:
107 xe_pm_runtime_put(xe);
108
109 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
110 }
111 static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
112
113 static ssize_t
lb_voltage_regulator_version_show(struct device * dev,struct device_attribute * attr,char * buf)114 lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf)
115 {
116 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
117 struct xe_tile *root = xe_device_get_root_tile(xe);
118 u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
119 u16 major = 0, minor = 0, hotfix = 0, build = 0;
120 int ret;
121
122 xe_pm_runtime_get(xe);
123
124 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
125 &cap, NULL);
126 if (ret)
127 goto out;
128
129 if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
130 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
131 &ver_low, NULL);
132 if (ret)
133 goto out;
134
135 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
136 &ver_high, NULL);
137 if (ret)
138 goto out;
139
140 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
141 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
142 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
143 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
144 }
145 out:
146 xe_pm_runtime_put(xe);
147
148 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
149 }
150 static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
151
late_bind_create_files(struct device * dev)152 static int late_bind_create_files(struct device *dev)
153 {
154 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
155 struct xe_tile *root = xe_device_get_root_tile(xe);
156 u32 cap;
157 int ret;
158
159 xe_pm_runtime_get(xe);
160
161 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
162 &cap, NULL);
163 if (ret) {
164 if (ret == -ENXIO) {
165 drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
166 ret = 0;
167 }
168 goto out;
169 }
170
171 if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
172 ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
173 if (ret)
174 goto out;
175 }
176
177 if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
178 ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
179 out:
180 xe_pm_runtime_put(xe);
181
182 return ret;
183 }
184
late_bind_remove_files(struct device * dev)185 static void late_bind_remove_files(struct device *dev)
186 {
187 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
188 struct xe_tile *root = xe_device_get_root_tile(xe);
189 u32 cap;
190 int ret;
191
192 xe_pm_runtime_get(xe);
193
194 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
195 &cap, NULL);
196 if (ret)
197 goto out;
198
199 if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
200 sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
201
202 if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
203 sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
204 out:
205 xe_pm_runtime_put(xe);
206 }
207
208 /**
209 * DOC: PCIe Gen5 Limitations
210 *
211 * Default link speed of discrete GPUs is determined by configuration parameters
212 * stored in their flash memory, which are subject to override through user
213 * initiated firmware updates. It has been observed that devices configured with
214 * PCIe Gen5 as their default link speed can come across link quality issues due
215 * to host or motherboard limitations and may have to auto-downgrade their link
216 * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
217 * firmware updates rather risky on such setups. It is required to ensure that
218 * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
219 * pushing the firmware image with PCIe Gen5 as default configuration. This can
220 * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
221 * denote if the device is capable of auto-downgrading its link to PCIe Gen4
222 * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
223 * `capable` respectively.
224 *
225 * .. code-block:: shell
226 *
227 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
228 *
229 * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
230 * link downgrade incapable device and facing link instability due to host or
231 * motherboard limitations can result in driver failing to bind to the device,
232 * making further firmware updates impossible with RMA being the only last
233 * resort.
234 *
235 * Link downgrade status of auto link downgrade capable devices is available
236 * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
237 * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
238 * link training (which is the optimal scenario) and ``1`` means the device has
239 * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
240 *
241 * .. code-block:: shell
242 *
243 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
244 */
245
246 static ssize_t
auto_link_downgrade_capable_show(struct device * dev,struct device_attribute * attr,char * buf)247 auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
248 {
249 struct pci_dev *pdev = to_pci_dev(dev);
250 struct xe_device *xe = pdev_to_xe_device(pdev);
251 u32 cap, val;
252
253 xe_pm_runtime_get(xe);
254 val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
255 xe_pm_runtime_put(xe);
256
257 cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
258 return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
259 }
260 static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
261
262 static ssize_t
auto_link_downgrade_status_show(struct device * dev,struct device_attribute * attr,char * buf)263 auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
264 {
265 struct pci_dev *pdev = to_pci_dev(dev);
266 struct xe_device *xe = pdev_to_xe_device(pdev);
267 /* default the auto_link_downgrade status to 0 */
268 u32 val = 0;
269 int ret;
270
271 xe_pm_runtime_get(xe);
272 ret = xe_pcode_read(xe_device_get_root_tile(xe),
273 PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
274 &val, NULL);
275 xe_pm_runtime_put(xe);
276
277 return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
278 }
279 static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
280
281 static const struct attribute *auto_link_downgrade_attrs[] = {
282 &dev_attr_auto_link_downgrade_capable.attr,
283 &dev_attr_auto_link_downgrade_status.attr,
284 NULL
285 };
286
xe_device_sysfs_fini(void * arg)287 static void xe_device_sysfs_fini(void *arg)
288 {
289 struct xe_device *xe = arg;
290
291 if (xe->d3cold.capable)
292 sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
293
294 if (xe->info.platform == XE_BATTLEMAGE) {
295 sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
296 late_bind_remove_files(xe->drm.dev);
297 }
298 }
299
xe_device_sysfs_init(struct xe_device * xe)300 int xe_device_sysfs_init(struct xe_device *xe)
301 {
302 struct device *dev = xe->drm.dev;
303 int ret;
304
305 if (xe->d3cold.capable) {
306 ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
307 if (ret)
308 return ret;
309 }
310
311 if (xe->info.platform == XE_BATTLEMAGE) {
312 ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
313 if (ret)
314 return ret;
315
316 ret = late_bind_create_files(dev);
317 if (ret)
318 return ret;
319 }
320
321 return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
322 }
323