1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/device.h> 7 #include <linux/kobject.h> 8 #include <linux/pci.h> 9 #include <linux/sysfs.h> 10 11 #include "xe_device.h" 12 #include "xe_device_sysfs.h" 13 #include "xe_mmio.h" 14 #include "xe_pcode_api.h" 15 #include "xe_pcode.h" 16 #include "xe_pm.h" 17 18 /** 19 * DOC: Xe device sysfs 20 * Xe driver requires exposing certain tunable knobs controlled by user space for 21 * each graphics device. Considering this, we need to add sysfs attributes at device 22 * level granularity. 23 * These sysfs attributes will be available under pci device kobj directory. 24 * 25 * vram_d3cold_threshold - Report/change vram used threshold(in MB) below 26 * which vram save/restore is permissible during runtime D3cold entry/exit. 27 * 28 * lb_fan_control_version - Fan control version provisioned by late binding. 29 * Exposed only if supported by the device. 30 * 31 * lb_voltage_regulator_version - Voltage regulator version provisioned by late 32 * binding. Exposed only if supported by the device. 33 */ 34 35 static ssize_t 36 vram_d3cold_threshold_show(struct device *dev, 37 struct device_attribute *attr, char *buf) 38 { 39 struct pci_dev *pdev = to_pci_dev(dev); 40 struct xe_device *xe = pdev_to_xe_device(pdev); 41 int ret; 42 43 xe_pm_runtime_get(xe); 44 ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold); 45 xe_pm_runtime_put(xe); 46 47 return ret; 48 } 49 50 static ssize_t 51 vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr, 52 const char *buff, size_t count) 53 { 54 struct pci_dev *pdev = to_pci_dev(dev); 55 struct xe_device *xe = pdev_to_xe_device(pdev); 56 u32 vram_d3cold_threshold; 57 int ret; 58 59 ret = kstrtou32(buff, 0, &vram_d3cold_threshold); 60 if (ret) 61 return ret; 62 63 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold); 64 65 xe_pm_runtime_get(xe); 66 ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold); 67 xe_pm_runtime_put(xe); 68 69 return ret ?: count; 70 } 71 72 static DEVICE_ATTR_RW(vram_d3cold_threshold); 73 74 static struct attribute *vram_attrs[] = { 75 &dev_attr_vram_d3cold_threshold.attr, 76 NULL 77 }; 78 79 static const struct attribute_group vram_attr_group = { 80 .attrs = vram_attrs, 81 }; 82 83 static ssize_t 84 lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf) 85 { 86 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 87 struct xe_tile *root = xe_device_get_root_tile(xe); 88 u32 cap = 0, ver_low = FAN_TABLE, ver_high = FAN_TABLE; 89 u16 major = 0, minor = 0, hotfix = 0, build = 0; 90 int ret; 91 92 xe_pm_runtime_get(xe); 93 94 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 95 &cap, NULL); 96 if (ret) 97 goto out; 98 99 if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) { 100 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0), 101 &ver_low, NULL); 102 if (ret) 103 goto out; 104 105 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0), 106 &ver_high, NULL); 107 if (ret) 108 goto out; 109 110 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low); 111 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low); 112 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high); 113 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high); 114 } 115 out: 116 xe_pm_runtime_put(xe); 117 118 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build); 119 } 120 static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version); 121 122 static ssize_t 123 lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf) 124 { 125 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 126 struct xe_tile *root = xe_device_get_root_tile(xe); 127 u32 cap = 0, ver_low = VR_CONFIG, ver_high = VR_CONFIG; 128 u16 major = 0, minor = 0, hotfix = 0, build = 0; 129 int ret; 130 131 xe_pm_runtime_get(xe); 132 133 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 134 &cap, NULL); 135 if (ret) 136 goto out; 137 138 if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) { 139 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0), 140 &ver_low, NULL); 141 if (ret) 142 goto out; 143 144 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0), 145 &ver_high, NULL); 146 if (ret) 147 goto out; 148 149 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low); 150 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low); 151 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high); 152 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high); 153 } 154 out: 155 xe_pm_runtime_put(xe); 156 157 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build); 158 } 159 static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version); 160 161 static struct attribute *late_bind_attrs[] = { 162 &dev_attr_lb_fan_control_version.attr, 163 &dev_attr_lb_voltage_regulator_version.attr, 164 NULL 165 }; 166 167 static umode_t late_bind_attr_is_visible(struct kobject *kobj, 168 struct attribute *attr, int n) 169 { 170 struct device *dev = kobj_to_dev(kobj); 171 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); 172 struct xe_tile *root = xe_device_get_root_tile(xe); 173 u32 cap = 0; 174 int ret; 175 176 xe_pm_runtime_get(xe); 177 178 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), 179 &cap, NULL); 180 xe_pm_runtime_put(xe); 181 if (ret) 182 return 0; 183 184 if (attr == &dev_attr_lb_fan_control_version.attr && 185 REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) 186 return attr->mode; 187 if (attr == &dev_attr_lb_voltage_regulator_version.attr && 188 REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) 189 return attr->mode; 190 191 return 0; 192 } 193 194 static const struct attribute_group late_bind_attr_group = { 195 .attrs = late_bind_attrs, 196 .is_visible = late_bind_attr_is_visible, 197 }; 198 199 /** 200 * DOC: PCIe Gen5 Limitations 201 * 202 * Default link speed of discrete GPUs is determined by configuration parameters 203 * stored in their flash memory, which are subject to override through user 204 * initiated firmware updates. It has been observed that devices configured with 205 * PCIe Gen5 as their default link speed can come across link quality issues due 206 * to host or motherboard limitations and may have to auto-downgrade their link 207 * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes 208 * firmware updates rather risky on such setups. It is required to ensure that 209 * the device is capable of auto-downgrading its link to PCIe Gen4 speed before 210 * pushing the firmware image with PCIe Gen5 as default configuration. This can 211 * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will 212 * denote if the device is capable of auto-downgrading its link to PCIe Gen4 213 * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or 214 * `capable` respectively. 215 * 216 * .. code-block:: shell 217 * 218 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable 219 * 220 * Pushing the firmware image with PCIe Gen5 as default configuration on a auto 221 * link downgrade incapable device and facing link instability due to host or 222 * motherboard limitations can result in driver failing to bind to the device, 223 * making further firmware updates impossible with RMA being the only last 224 * resort. 225 * 226 * Link downgrade status of auto link downgrade capable devices is available 227 * through ``auto_link_downgrade_status`` sysfs entry with boolean output value 228 * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during 229 * link training (which is the optimal scenario) and ``1`` means the device has 230 * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link. 231 * 232 * .. code-block:: shell 233 * 234 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status 235 */ 236 237 static ssize_t 238 auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf) 239 { 240 struct pci_dev *pdev = to_pci_dev(dev); 241 struct xe_device *xe = pdev_to_xe_device(pdev); 242 u32 cap, val; 243 244 xe_pm_runtime_get(xe); 245 val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP); 246 xe_pm_runtime_put(xe); 247 248 cap = REG_FIELD_GET(LINK_DOWNGRADE, val); 249 return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE); 250 } 251 static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable); 252 253 static ssize_t 254 auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf) 255 { 256 struct pci_dev *pdev = to_pci_dev(dev); 257 struct xe_device *xe = pdev_to_xe_device(pdev); 258 /* default the auto_link_downgrade status to 0 */ 259 u32 val = 0; 260 int ret; 261 262 xe_pm_runtime_get(xe); 263 ret = xe_pcode_read(xe_device_get_root_tile(xe), 264 PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0), 265 &val, NULL); 266 xe_pm_runtime_put(xe); 267 268 return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val)); 269 } 270 static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status); 271 272 static struct attribute *auto_link_downgrade_attrs[] = { 273 &dev_attr_auto_link_downgrade_capable.attr, 274 &dev_attr_auto_link_downgrade_status.attr, 275 NULL 276 }; 277 278 static const struct attribute_group auto_link_downgrade_attr_group = { 279 .attrs = auto_link_downgrade_attrs, 280 }; 281 282 int xe_device_sysfs_init(struct xe_device *xe) 283 { 284 struct device *dev = xe->drm.dev; 285 int ret; 286 287 if (xe->d3cold.capable) { 288 ret = devm_device_add_group(dev, &vram_attr_group); 289 if (ret) 290 return ret; 291 } 292 293 if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) { 294 ret = devm_device_add_group(dev, &auto_link_downgrade_attr_group); 295 if (ret) 296 return ret; 297 298 ret = devm_device_add_group(dev, &late_bind_attr_group); 299 if (ret) 300 return ret; 301 } 302 303 return 0; 304 } 305