1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/device.h> 7 #include <linux/kobject.h> 8 #include <linux/pci.h> 9 #include <linux/sysfs.h> 10 11 #include "xe_device.h" 12 #include "xe_device_sysfs.h" 13 #include "xe_mmio.h" 14 #include "xe_pcode_api.h" 15 #include "xe_pcode.h" 16 #include "xe_pm.h" 17 18 /** 19 * DOC: Xe device sysfs 20 * Xe driver requires exposing certain tunable knobs controlled by user space for 21 * each graphics device. Considering this, we need to add sysfs attributes at device 22 * level granularity. 23 * These sysfs attributes will be available under pci device kobj directory. 24 * 25 * vram_d3cold_threshold - Report/change vram used threshold(in MB) below 26 * which vram save/restore is permissible during runtime D3cold entry/exit. 27 * 28 * lb_fan_control_version - Fan control version provisioned by late binding. 29 * Exposed only if supported by the device. 30 * 31 * lb_voltage_regulator_version - Voltage regulator version provisioned by late 32 * binding. Exposed only if supported by the device. 33 */ 34 35 static ssize_t 36 vram_d3cold_threshold_show(struct device *dev, 37 struct device_attribute *attr, char *buf) 38 { 39 struct pci_dev *pdev = to_pci_dev(dev); 40 struct xe_device *xe = pdev_to_xe_device(pdev); 41 int ret; 42 43 xe_pm_runtime_get(xe); 44 ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); 45 xe_pm_runtime_put(xe); 46 47 return ret; 48 } 49 50 static ssize_t 51 vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr, 52 const char *buff, size_t count) 53 { 54 struct pci_dev *pdev = to_pci_dev(dev); 55 struct xe_device *xe = pdev_to_xe_device(pdev); 56 u32 vram_d3cold_threshold; 57 int ret; 58 59 ret = kstrtou32(buff, 0, &vram_d3cold_threshold); 60 if (ret) 61 return ret; 62 63 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold); 64 65 xe_pm_runtime_get(xe); 66 ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold); 67 xe_pm_runtime_put(xe); 68 69 return ret ?: count; 70 } 71 72 static DEVICE_ATTR_RW(vram_d3cold_threshold); 73 74 static ssize_t 75 lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf) 76 { 77 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 78 struct xe_tile *root = xe_device_get_root_tile(xe); 79 u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE; 80 u16 major = 0, minor = 0, hotfix = 0, build = 0; 81 int ret; 82 83 xe_pm_runtime_get(xe); 84 85 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 86 &cap, NULL); 87 if (ret) 88 goto out; 89 90 if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) { 91 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0), 92 &ver_low, NULL); 93 if (ret) 94 goto out; 95 96 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0), 97 &ver_high, NULL); 98 if (ret) 99 goto out; 100 101 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low); 102 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low); 103 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high); 104 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high); 105 } 106 out: 107 xe_pm_runtime_put(xe); 108 109 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build); 110 } 111 static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version); 112 113 static ssize_t 114 lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf) 115 { 116 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 117 struct xe_tile *root = xe_device_get_root_tile(xe); 118 u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG; 119 u16 major = 0, minor = 0, hotfix = 0, build = 0; 120 int ret; 121 122 xe_pm_runtime_get(xe); 123 124 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 125 &cap, NULL); 126 if (ret) 127 goto out; 128 129 if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) { 130 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0), 131 &ver_low, NULL); 132 if (ret) 133 goto out; 134 135 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0), 136 &ver_high, NULL); 137 if (ret) 138 goto out; 139 140 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low); 141 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low); 142 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high); 143 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high); 144 } 145 out: 146 xe_pm_runtime_put(xe); 147 148 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build); 149 } 150 static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version); 151 152 static int late_bind_create_files(struct device *dev) 153 { 154 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 155 struct xe_tile *root = xe_device_get_root_tile(xe); 156 u32 cap; 157 int ret; 158 159 xe_pm_runtime_get(xe); 160 161 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 162 &cap, NULL); 163 if (ret) 164 goto out; 165 166 if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) { 167 ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); 168 if (ret) 169 goto out; 170 } 171 172 if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) 173 ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr); 174 out: 175 xe_pm_runtime_put(xe); 176 177 return ret; 178 } 179 180 static void late_bind_remove_files(struct device *dev) 181 { 182 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 183 struct xe_tile *root = xe_device_get_root_tile(xe); 184 u32 cap; 185 int ret; 186 187 xe_pm_runtime_get(xe); 188 189 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 190 &cap, NULL); 191 if (ret) 192 goto out; 193 194 if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) 195 sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); 196 197 if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) 198 sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr); 199 out: 200 xe_pm_runtime_put(xe); 201 } 202 203 /** 204 * DOC: PCIe Gen5 Limitations 205 * 206 * Default link speed of discrete GPUs is determined by configuration parameters 207 * stored in their flash memory, which are subject to override through user 208 * initiated firmware updates. It has been observed that devices configured with 209 * PCIe Gen5 as their default link speed can come across link quality issues due 210 * to host or motherboard limitations and may have to auto-downgrade their link 211 * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes 212 * firmware updates rather risky on such setups. It is required to ensure that 213 * the device is capable of auto-downgrading its link to PCIe Gen4 speed before 214 * pushing the firmware image with PCIe Gen5 as default configuration. This can 215 * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will 216 * denote if the device is capable of auto-downgrading its link to PCIe Gen4 217 * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or 218 * `capable` respectively. 219 * 220 * .. code-block:: shell 221 * 222 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable 223 * 224 * Pushing the firmware image with PCIe Gen5 as default configuration on a auto 225 * link downgrade incapable device and facing link instability due to host or 226 * motherboard limitations can result in driver failing to bind to the device, 227 * making further firmware updates impossible with RMA being the only last 228 * resort. 229 * 230 * Link downgrade status of auto link downgrade capable devices is available 231 * through ``auto_link_downgrade_status`` sysfs entry with boolean output value 232 * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during 233 * link training (which is the optimal scenario) and ``1`` means the device has 234 * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link. 235 * 236 * .. code-block:: shell 237 * 238 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status 239 */ 240 241 static ssize_t 242 auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf) 243 { 244 struct pci_dev *pdev = to_pci_dev(dev); 245 struct xe_device *xe = pdev_to_xe_device(pdev); 246 u32 cap, val; 247 248 xe_pm_runtime_get(xe); 249 val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP); 250 xe_pm_runtime_put(xe); 251 252 cap = REG_FIELD_GET(LINK_DOWNGRADE, val); 253 return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE); 254 } 255 static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable); 256 257 static ssize_t 258 auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf) 259 { 260 struct pci_dev *pdev = to_pci_dev(dev); 261 struct xe_device *xe = pdev_to_xe_device(pdev); 262 /* default the auto_link_downgrade status to 0 */ 263 u32 val = 0; 264 int ret; 265 266 xe_pm_runtime_get(xe); 267 ret = xe_pcode_read(xe_device_get_root_tile(xe), 268 PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0), 269 &val, NULL); 270 xe_pm_runtime_put(xe); 271 272 return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val)); 273 } 274 static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status); 275 276 static const struct attribute *auto_link_downgrade_attrs[] = { 277 &dev_attr_auto_link_downgrade_capable.attr, 278 &dev_attr_auto_link_downgrade_status.attr, 279 NULL 280 }; 281 282 static void xe_device_sysfs_fini(void *arg) 283 { 284 struct xe_device *xe = arg; 285 286 if (xe->d3cold.capable) 287 sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr); 288 289 if (xe->info.platform == XE_BATTLEMAGE) { 290 sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs); 291 late_bind_remove_files(xe->drm.dev); 292 } 293 } 294 295 int xe_device_sysfs_init(struct xe_device *xe) 296 { 297 struct device *dev = xe->drm.dev; 298 int ret; 299 300 if (xe->d3cold.capable) { 301 ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr); 302 if (ret) 303 return ret; 304 } 305 306 if (xe->info.platform == XE_BATTLEMAGE) { 307 ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs); 308 if (ret) 309 return ret; 310 311 ret = late_bind_create_files(dev); 312 if (ret) 313 return ret; 314 } 315 316 return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe); 317 } 318