1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/hwmon-sysfs.h> 7 #include <linux/hwmon.h> 8 #include <linux/jiffies.h> 9 #include <linux/types.h> 10 #include <linux/units.h> 11 12 #include <drm/drm_managed.h> 13 #include "regs/xe_gt_regs.h" 14 #include "regs/xe_mchbar_regs.h" 15 #include "regs/xe_pcode_regs.h" 16 #include "xe_device.h" 17 #include "xe_hwmon.h" 18 #include "xe_mmio.h" 19 #include "xe_pcode.h" 20 #include "xe_pcode_api.h" 21 #include "xe_sriov.h" 22 #include "xe_pm.h" 23 #include "xe_vsec.h" 24 #include "regs/xe_pmt.h" 25 26 enum xe_hwmon_reg { 27 REG_TEMP, 28 REG_PKG_RAPL_LIMIT, 29 REG_PKG_POWER_SKU, 30 REG_PKG_POWER_SKU_UNIT, 31 REG_GT_PERF_STATUS, 32 REG_PKG_ENERGY_STATUS, 33 REG_FAN_SPEED, 34 }; 35 36 enum xe_hwmon_reg_operation { 37 REG_READ32, 38 REG_RMW32, 39 REG_READ64, 40 }; 41 42 enum xe_hwmon_channel { 43 CHANNEL_CARD, 44 CHANNEL_PKG, 45 CHANNEL_VRAM, 46 CHANNEL_MAX, 47 }; 48 49 enum xe_fan_channel { 50 FAN_1, 51 FAN_2, 52 FAN_3, 53 FAN_MAX, 54 }; 55 56 /* Attribute index for powerX_xxx_interval sysfs entries */ 57 enum sensor_attr_power { 58 SENSOR_INDEX_PSYS_PL1, 59 SENSOR_INDEX_PKG_PL1, 60 SENSOR_INDEX_PSYS_PL2, 61 SENSOR_INDEX_PKG_PL2, 62 }; 63 64 /* 65 * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is 66 * not supported and below are SKU units to be used. 67 */ 68 #define PWR_UNIT 0x3 69 #define ENERGY_UNIT 0xe 70 #define TIME_UNIT 0xa 71 72 /* 73 * SF_* - scale factors for particular quantities according to hwmon spec. 74 */ 75 #define SF_POWER 1000000 /* microwatts */ 76 #define SF_CURR 1000 /* milliamperes */ 77 #define SF_VOLTAGE 1000 /* millivolts */ 78 #define SF_ENERGY 1000000 /* microjoules */ 79 #define SF_TIME 1000 /* milliseconds */ 80 81 /* 82 * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute. 83 */ 84 #define PL1_HWMON_ATTR hwmon_power_max 85 #define PL2_HWMON_ATTR hwmon_power_cap 86 87 #define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2") 88 89 /* 90 * Timeout for power limit write mailbox command. 91 */ 92 #define PL_WRITE_MBX_TIMEOUT_MS (1) 93 94 /** 95 * struct xe_hwmon_energy_info - to accumulate energy 96 */ 97 struct xe_hwmon_energy_info { 98 /** @reg_val_prev: previous energy reg val */ 99 u32 reg_val_prev; 100 /** @accum_energy: accumulated energy */ 101 long accum_energy; 102 }; 103 104 /** 105 * struct xe_hwmon_fan_info - to cache previous fan reading 106 */ 107 struct xe_hwmon_fan_info { 108 /** @reg_val_prev: previous fan reg val */ 109 u32 reg_val_prev; 110 /** @time_prev: previous timestamp */ 111 u64 time_prev; 112 }; 113 114 /** 115 * struct xe_hwmon - xe hwmon data structure 116 */ 117 struct xe_hwmon { 118 /** @hwmon_dev: hwmon device for xe */ 119 struct device *hwmon_dev; 120 /** @xe: Xe device */ 121 struct xe_device *xe; 122 /** @hwmon_lock: lock for rw attributes*/ 123 struct mutex hwmon_lock; 124 /** @scl_shift_power: pkg power unit */ 125 int scl_shift_power; 126 /** @scl_shift_energy: pkg energy unit */ 127 int scl_shift_energy; 128 /** @scl_shift_time: pkg time unit */ 129 int scl_shift_time; 130 /** @ei: Energy info for energyN_input */ 131 struct xe_hwmon_energy_info ei[CHANNEL_MAX]; 132 /** @fi: Fan info for fanN_input */ 133 struct xe_hwmon_fan_info fi[FAN_MAX]; 134 /** @boot_power_limit_read: is boot power limits read */ 135 bool boot_power_limit_read; 136 /** @pl1_on_boot: power limit PL1 on boot */ 137 u32 pl1_on_boot[CHANNEL_MAX]; 138 /** @pl2_on_boot: power limit PL2 on boot */ 139 u32 pl2_on_boot[CHANNEL_MAX]; 140 141 }; 142 143 static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel, 144 u32 *uval) 145 { 146 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 147 u32 val0 = 0, val1 = 0; 148 int ret = 0; 149 150 ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 151 (channel == CHANNEL_CARD) ? 152 READ_PSYSGPU_POWER_LIMIT : 153 READ_PACKAGE_POWER_LIMIT, 154 hwmon->boot_power_limit_read ? 155 READ_PL_FROM_PCODE : READ_PL_FROM_FW), 156 &val0, &val1); 157 158 if (ret) { 159 drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n", 160 channel, val0, val1, ret); 161 *uval = 0; 162 return ret; 163 } 164 165 /* return the value only if limit is enabled */ 166 if (attr == PL1_HWMON_ATTR) 167 *uval = (val0 & PWR_LIM_EN) ? val0 : 0; 168 else if (attr == PL2_HWMON_ATTR) 169 *uval = (val1 & PWR_LIM_EN) ? val1 : 0; 170 else if (attr == hwmon_power_label) 171 *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0; 172 else 173 *uval = 0; 174 175 return ret; 176 } 177 178 static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel, 179 u32 clr, u32 set) 180 { 181 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 182 u32 val0, val1; 183 int ret = 0; 184 185 ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 186 (channel == CHANNEL_CARD) ? 187 READ_PSYSGPU_POWER_LIMIT : 188 READ_PACKAGE_POWER_LIMIT, 189 hwmon->boot_power_limit_read ? 190 READ_PL_FROM_PCODE : READ_PL_FROM_FW), 191 &val0, &val1); 192 193 if (ret) 194 drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n", 195 channel, val0, val1, ret); 196 197 if (attr == PL1_HWMON_ATTR) 198 val0 = (val0 & ~clr) | set; 199 else if (attr == PL2_HWMON_ATTR) 200 val1 = (val1 & ~clr) | set; 201 else 202 return -EIO; 203 204 ret = xe_pcode_write64_timeout(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 205 (channel == CHANNEL_CARD) ? 206 WRITE_PSYSGPU_POWER_LIMIT : 207 WRITE_PACKAGE_POWER_LIMIT, 0), 208 val0, val1, PL_WRITE_MBX_TIMEOUT_MS); 209 if (ret) 210 drm_dbg(&hwmon->xe->drm, "write failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n", 211 channel, val0, val1, ret); 212 return ret; 213 } 214 215 static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg, 216 int channel) 217 { 218 struct xe_device *xe = hwmon->xe; 219 220 switch (hwmon_reg) { 221 case REG_TEMP: 222 if (xe->info.platform == XE_BATTLEMAGE) { 223 if (channel == CHANNEL_PKG) 224 return BMG_PACKAGE_TEMPERATURE; 225 else if (channel == CHANNEL_VRAM) 226 return BMG_VRAM_TEMPERATURE; 227 } else if (xe->info.platform == XE_DG2) { 228 if (channel == CHANNEL_PKG) 229 return PCU_CR_PACKAGE_TEMPERATURE; 230 else if (channel == CHANNEL_VRAM) 231 return BMG_VRAM_TEMPERATURE; 232 } 233 break; 234 case REG_PKG_RAPL_LIMIT: 235 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) 236 return PVC_GT0_PACKAGE_RAPL_LIMIT; 237 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) 238 return PCU_CR_PACKAGE_RAPL_LIMIT; 239 break; 240 case REG_PKG_POWER_SKU: 241 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) 242 return PVC_GT0_PACKAGE_POWER_SKU; 243 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) 244 return PCU_CR_PACKAGE_POWER_SKU; 245 break; 246 case REG_PKG_POWER_SKU_UNIT: 247 if (xe->info.platform == XE_PVC) 248 return PVC_GT0_PACKAGE_POWER_SKU_UNIT; 249 else if (xe->info.platform == XE_DG2) 250 return PCU_CR_PACKAGE_POWER_SKU_UNIT; 251 break; 252 case REG_GT_PERF_STATUS: 253 if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG) 254 return GT_PERF_STATUS; 255 break; 256 case REG_PKG_ENERGY_STATUS: 257 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) { 258 return PVC_GT0_PLATFORM_ENERGY_STATUS; 259 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) { 260 return PCU_CR_PACKAGE_ENERGY_STATUS; 261 } 262 break; 263 case REG_FAN_SPEED: 264 if (channel == FAN_1) 265 return BMG_FAN_1_SPEED; 266 else if (channel == FAN_2) 267 return BMG_FAN_2_SPEED; 268 else if (channel == FAN_3) 269 return BMG_FAN_3_SPEED; 270 break; 271 default: 272 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg); 273 break; 274 } 275 276 return XE_REG(0); 277 } 278 279 #define PL_DISABLE 0 280 281 /* 282 * HW allows arbitrary PL1 limits to be set but silently clamps these values to 283 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the 284 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display 285 * clamped values when read. 286 */ 287 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value) 288 { 289 u64 reg_val = 0, min, max; 290 struct xe_device *xe = hwmon->xe; 291 struct xe_reg rapl_limit, pkg_power_sku; 292 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 293 294 mutex_lock(&hwmon->hwmon_lock); 295 296 if (hwmon->xe->info.has_mbx_power_limits) { 297 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)®_val); 298 } else { 299 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 300 pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 301 reg_val = xe_mmio_read32(mmio, rapl_limit); 302 } 303 304 /* Check if PL limits are disabled. */ 305 if (!(reg_val & PWR_LIM_EN)) { 306 *value = PL_DISABLE; 307 drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n", 308 PWR_ATTR_TO_STR(attr), channel, reg_val); 309 goto unlock; 310 } 311 312 reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val); 313 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); 314 315 /* For platforms with mailbox power limit support clamping would be done by pcode. */ 316 if (!hwmon->xe->info.has_mbx_power_limits) { 317 reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku); 318 min = REG_FIELD_GET(PKG_MIN_PWR, reg_val); 319 max = REG_FIELD_GET(PKG_MAX_PWR, reg_val); 320 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power); 321 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power); 322 if (min && max) 323 *value = clamp_t(u64, *value, min, max); 324 } 325 unlock: 326 mutex_unlock(&hwmon->hwmon_lock); 327 } 328 329 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channel, long value) 330 { 331 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 332 int ret = 0; 333 u32 reg_val, max; 334 struct xe_reg rapl_limit; 335 u64 max_supp_power_limit = 0; 336 337 mutex_lock(&hwmon->hwmon_lock); 338 339 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 340 341 /* Disable Power Limit and verify, as limit cannot be disabled on all platforms. */ 342 if (value == PL_DISABLE) { 343 if (hwmon->xe->info.has_mbx_power_limits) { 344 drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n", 345 PWR_ATTR_TO_STR(attr), channel); 346 xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0); 347 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, ®_val); 348 } else { 349 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0); 350 reg_val = xe_mmio_read32(mmio, rapl_limit); 351 } 352 353 if (reg_val & PWR_LIM_EN) { 354 drm_warn(&hwmon->xe->drm, "Power limit disable is not supported!\n"); 355 ret = -EOPNOTSUPP; 356 } 357 goto unlock; 358 } 359 360 /* 361 * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to 362 * the supported maximum (U12.3 format). 363 * This is to avoid truncation during reg_val calculation below and ensure the valid 364 * power limit is sent for pcode which would clamp it to card-supported value. 365 */ 366 max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER; 367 if (value > max_supp_power_limit) { 368 value = max_supp_power_limit; 369 drm_info(&hwmon->xe->drm, 370 "Power limit clamped as selected %s exceeds channel %d limit\n", 371 PWR_ATTR_TO_STR(attr), channel); 372 } 373 374 /* Computation in 64-bits to avoid overflow. Round to nearest. */ 375 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); 376 377 /* 378 * Clamp power limit to GPU firmware default as maximum, as an additional protection to 379 * pcode clamp. 380 */ 381 if (hwmon->xe->info.has_mbx_power_limits) { 382 max = (attr == PL1_HWMON_ATTR) ? 383 hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel]; 384 max = REG_FIELD_PREP(PWR_LIM_VAL, max); 385 if (reg_val > max) { 386 reg_val = max; 387 drm_dbg(&hwmon->xe->drm, 388 "Clamping power limit to GPU firmware default 0x%x\n", 389 reg_val); 390 } 391 } 392 393 reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val); 394 395 if (hwmon->xe->info.has_mbx_power_limits) 396 ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val); 397 else 398 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val); 399 unlock: 400 mutex_unlock(&hwmon->hwmon_lock); 401 return ret; 402 } 403 404 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, 405 long *value) 406 { 407 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 408 u32 reg_val; 409 410 if (hwmon->xe->info.has_mbx_power_limits) { 411 /* PL1 is rated max if supported. */ 412 xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel, ®_val); 413 } else { 414 /* 415 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check 416 * for this register can be skipped. 417 * See xe_hwmon_power_is_visible. 418 */ 419 struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 420 421 reg_val = xe_mmio_read32(mmio, reg); 422 } 423 424 reg_val = REG_FIELD_GET(PKG_TDP, reg_val); 425 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); 426 } 427 428 /* 429 * xe_hwmon_energy_get - Obtain energy value 430 * 431 * The underlying energy hardware register is 32-bits and is subject to 432 * overflow. How long before overflow? For example, with an example 433 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and 434 * a power draw of 1000 watts, the 32-bit counter will overflow in 435 * approximately 4.36 minutes. 436 * 437 * Examples: 438 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days 439 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes 440 * 441 * The function significantly increases overflow duration (from 4.36 442 * minutes) by accumulating the energy register into a 'long' as allowed by 443 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()), 444 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and 445 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before 446 * energyN_input overflows. This at 1000 W is an overflow duration of 278 years. 447 */ 448 static void 449 xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) 450 { 451 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 452 struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; 453 u32 reg_val; 454 int ret = 0; 455 456 /* Energy is supported only for card and pkg */ 457 if (channel > CHANNEL_PKG) { 458 *energy = 0; 459 return; 460 } 461 462 if (hwmon->xe->info.platform == XE_BATTLEMAGE) { 463 u64 pmt_val; 464 465 ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev), 466 xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID), 467 &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val)); 468 if (ret != sizeof(pmt_val)) { 469 drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret); 470 *energy = 0; 471 return; 472 } 473 474 if (channel == CHANNEL_PKG) 475 reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val); 476 else 477 reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val); 478 } else { 479 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, 480 channel)); 481 } 482 483 ei->accum_energy += reg_val - ei->reg_val_prev; 484 ei->reg_val_prev = reg_val; 485 486 *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, 487 hwmon->scl_shift_energy); 488 } 489 490 static ssize_t 491 xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr, 492 char *buf) 493 { 494 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 495 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 496 u32 x, y, x_w = 2; /* 2 bits */ 497 u64 r, tau4, out; 498 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 499 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 500 501 int ret = 0; 502 503 xe_pm_runtime_get(hwmon->xe); 504 505 mutex_lock(&hwmon->hwmon_lock); 506 507 if (hwmon->xe->info.has_mbx_power_limits) { 508 ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r); 509 if (ret) { 510 drm_err(&hwmon->xe->drm, 511 "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n", 512 channel, power_attr, r, ret); 513 r = 0; 514 } 515 } else { 516 r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel)); 517 } 518 519 mutex_unlock(&hwmon->hwmon_lock); 520 521 xe_pm_runtime_put(hwmon->xe); 522 523 x = REG_FIELD_GET(PWR_LIM_TIME_X, r); 524 y = REG_FIELD_GET(PWR_LIM_TIME_Y, r); 525 526 /* 527 * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17) 528 * = (4 | x) << (y - 2) 529 * 530 * Here (y - 2) ensures a 1.x fixed point representation of 1.x 531 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75 532 * 533 * As y can be < 2, we compute tau4 = (4 | x) << y 534 * and then add 2 when doing the final right shift to account for units 535 */ 536 tau4 = (u64)((1 << x_w) | x) << y; 537 538 /* val in hwmon interface units (millisec) */ 539 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 540 541 return sysfs_emit(buf, "%llu\n", out); 542 } 543 544 static ssize_t 545 xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr, 546 const char *buf, size_t count) 547 { 548 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 549 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 550 u32 x, y, rxy, x_w = 2; /* 2 bits */ 551 u64 tau4, r, max_win; 552 unsigned long val; 553 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 554 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 555 int ret; 556 557 ret = kstrtoul(buf, 0, &val); 558 if (ret) 559 return ret; 560 561 /* 562 * Max HW supported tau in '(1 + (x / 4)) * power(2,y)' format, x = 0, y = 0x12. 563 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds. 564 * 565 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register. 566 * However, it is observed that existing discrete GPUs does not provide correct 567 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs 568 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU. 569 */ 570 #define PKG_MAX_WIN_DEFAULT 0x12ull 571 572 /* 573 * val must be < max in hwmon interface units. The steps below are 574 * explained in xe_hwmon_power_max_interval_show() 575 */ 576 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT); 577 x = REG_FIELD_GET(PKG_MAX_WIN_X, r); 578 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r); 579 tau4 = (u64)((1 << x_w) | x) << y; 580 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 581 582 if (val > max_win) 583 return -EINVAL; 584 585 /* val in hw units */ 586 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1; 587 588 /* 589 * Convert val to 1.x * power(2,y) 590 * y = ilog2(val) 591 * x = (val - (1 << y)) >> (y - 2) 592 */ 593 if (!val) { 594 y = 0; 595 x = 0; 596 } else { 597 y = ilog2(val); 598 x = (val - (1ul << y)) << x_w >> y; 599 } 600 601 rxy = REG_FIELD_PREP(PWR_LIM_TIME_X, x) | 602 REG_FIELD_PREP(PWR_LIM_TIME_Y, y); 603 604 xe_pm_runtime_get(hwmon->xe); 605 606 mutex_lock(&hwmon->hwmon_lock); 607 608 if (hwmon->xe->info.has_mbx_power_limits) 609 xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy); 610 else 611 r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel), 612 PWR_LIM_TIME, rxy); 613 614 mutex_unlock(&hwmon->hwmon_lock); 615 616 xe_pm_runtime_put(hwmon->xe); 617 618 return count; 619 } 620 621 /* PSYS PL1 */ 622 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664, 623 xe_hwmon_power_max_interval_show, 624 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1); 625 /* PKG PL1 */ 626 static SENSOR_DEVICE_ATTR(power2_max_interval, 0664, 627 xe_hwmon_power_max_interval_show, 628 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1); 629 /* PSYS PL2 */ 630 static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664, 631 xe_hwmon_power_max_interval_show, 632 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2); 633 /* PKG PL2 */ 634 static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664, 635 xe_hwmon_power_max_interval_show, 636 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2); 637 638 static struct attribute *hwmon_attributes[] = { 639 &sensor_dev_attr_power1_max_interval.dev_attr.attr, 640 &sensor_dev_attr_power2_max_interval.dev_attr.attr, 641 &sensor_dev_attr_power1_cap_interval.dev_attr.attr, 642 &sensor_dev_attr_power2_cap_interval.dev_attr.attr, 643 NULL 644 }; 645 646 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, 647 struct attribute *attr, int index) 648 { 649 struct device *dev = kobj_to_dev(kobj); 650 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 651 int ret = 0; 652 int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 653 u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 654 u32 uval = 0; 655 struct xe_reg rapl_limit; 656 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 657 658 xe_pm_runtime_get(hwmon->xe); 659 660 if (hwmon->xe->info.has_mbx_power_limits) { 661 xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval); 662 } else if (power_attr != PL2_HWMON_ATTR) { 663 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 664 if (xe_reg_is_valid(rapl_limit)) 665 uval = xe_mmio_read32(mmio, rapl_limit); 666 } 667 ret = (uval & PWR_LIM_EN) ? attr->mode : 0; 668 669 xe_pm_runtime_put(hwmon->xe); 670 671 return ret; 672 } 673 674 static const struct attribute_group hwmon_attrgroup = { 675 .attrs = hwmon_attributes, 676 .is_visible = xe_hwmon_attributes_visible, 677 }; 678 679 static const struct attribute_group *hwmon_groups[] = { 680 &hwmon_attrgroup, 681 NULL 682 }; 683 684 static const struct hwmon_channel_info * const hwmon_info[] = { 685 HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, 686 HWMON_T_INPUT | HWMON_T_LABEL), 687 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT | 688 HWMON_P_CAP, 689 HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP), 690 HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL), 691 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), 692 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL), 693 HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT), 694 NULL 695 }; 696 697 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */ 698 static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval) 699 { 700 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 701 702 /* Avoid Illegal Subcommand error */ 703 if (hwmon->xe->info.platform == XE_DG2) 704 return -ENXIO; 705 706 return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 707 POWER_SETUP_SUBCOMMAND_READ_I1, 0), 708 uval, NULL); 709 } 710 711 static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval) 712 { 713 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 714 715 return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 716 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0), 717 (uval & POWER_SETUP_I1_DATA_MASK)); 718 } 719 720 static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval) 721 { 722 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 723 724 /* Platforms that don't return correct value */ 725 if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) { 726 *uval = 2; 727 return 0; 728 } 729 730 return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL); 731 } 732 733 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel, 734 long *value, u32 scale_factor) 735 { 736 int ret; 737 u32 uval; 738 739 mutex_lock(&hwmon->hwmon_lock); 740 741 ret = xe_hwmon_pcode_read_i1(hwmon, &uval); 742 if (ret) 743 goto unlock; 744 745 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval), 746 scale_factor, POWER_SETUP_I1_SHIFT); 747 unlock: 748 mutex_unlock(&hwmon->hwmon_lock); 749 return ret; 750 } 751 752 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, 753 long value, u32 scale_factor) 754 { 755 int ret; 756 u32 uval; 757 u64 max_crit_power_curr = 0; 758 759 mutex_lock(&hwmon->hwmon_lock); 760 761 /* 762 * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1 763 * max supported value, clamp it to the command's max (U10.6 format). 764 * This is to avoid truncation during uval calculation below and ensure the valid power 765 * limit is sent for pcode which would clamp it to card-supported value. 766 */ 767 max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor; 768 if (value > max_crit_power_curr) { 769 value = max_crit_power_curr; 770 drm_info(&hwmon->xe->drm, 771 "Power limit clamped as selected exceeds channel %d limit\n", 772 channel); 773 } 774 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); 775 ret = xe_hwmon_pcode_write_i1(hwmon, uval); 776 777 mutex_unlock(&hwmon->hwmon_lock); 778 return ret; 779 } 780 781 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value) 782 { 783 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 784 u64 reg_val; 785 786 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); 787 /* HW register value in units of 2.5 millivolt */ 788 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE); 789 } 790 791 static umode_t 792 xe_hwmon_temp_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 793 { 794 switch (attr) { 795 case hwmon_temp_input: 796 case hwmon_temp_label: 797 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_TEMP, channel)) ? 0444 : 0; 798 default: 799 return 0; 800 } 801 } 802 803 static int 804 xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 805 { 806 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 807 u64 reg_val; 808 809 switch (attr) { 810 case hwmon_temp_input: 811 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_TEMP, channel)); 812 813 /* HW register value is in degrees Celsius, convert to millidegrees. */ 814 *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE; 815 return 0; 816 default: 817 return -EOPNOTSUPP; 818 } 819 } 820 821 static umode_t 822 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 823 { 824 u32 uval = 0; 825 struct xe_reg reg; 826 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 827 828 switch (attr) { 829 case hwmon_power_max: 830 case hwmon_power_cap: 831 if (hwmon->xe->info.has_mbx_power_limits) { 832 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval); 833 } else if (attr != PL2_HWMON_ATTR) { 834 reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 835 if (xe_reg_is_valid(reg)) 836 uval = xe_mmio_read32(mmio, reg); 837 } 838 if (uval & PWR_LIM_EN) { 839 drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n", 840 PWR_ATTR_TO_STR(attr), channel); 841 return 0664; 842 } 843 drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n", 844 PWR_ATTR_TO_STR(attr), channel); 845 return 0; 846 case hwmon_power_rated_max: 847 if (hwmon->xe->info.has_mbx_power_limits) { 848 return 0; 849 } else { 850 reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 851 if (xe_reg_is_valid(reg)) 852 uval = xe_mmio_read32(mmio, reg); 853 return uval ? 0444 : 0; 854 } 855 case hwmon_power_crit: 856 if (channel == CHANNEL_CARD) { 857 xe_hwmon_pcode_read_i1(hwmon, &uval); 858 return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0; 859 } 860 break; 861 case hwmon_power_label: 862 if (hwmon->xe->info.has_mbx_power_limits) { 863 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval); 864 } else { 865 reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 866 if (xe_reg_is_valid(reg)) 867 uval = xe_mmio_read32(mmio, reg); 868 869 if (!uval) { 870 reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 871 if (xe_reg_is_valid(reg)) 872 uval = xe_mmio_read32(mmio, reg); 873 } 874 } 875 if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) { 876 xe_hwmon_pcode_read_i1(hwmon, &uval); 877 return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0; 878 } 879 return (uval) ? 0444 : 0; 880 default: 881 return 0; 882 } 883 return 0; 884 } 885 886 static int 887 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 888 { 889 switch (attr) { 890 case hwmon_power_max: 891 case hwmon_power_cap: 892 xe_hwmon_power_max_read(hwmon, attr, channel, val); 893 return 0; 894 case hwmon_power_rated_max: 895 xe_hwmon_power_rated_max_read(hwmon, attr, channel, val); 896 return 0; 897 case hwmon_power_crit: 898 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER); 899 default: 900 return -EOPNOTSUPP; 901 } 902 } 903 904 static int 905 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) 906 { 907 switch (attr) { 908 case hwmon_power_cap: 909 case hwmon_power_max: 910 return xe_hwmon_power_max_write(hwmon, attr, channel, val); 911 case hwmon_power_crit: 912 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER); 913 default: 914 return -EOPNOTSUPP; 915 } 916 } 917 918 static umode_t 919 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel) 920 { 921 u32 uval; 922 923 /* hwmon sysfs attribute of current available only for package */ 924 if (channel != CHANNEL_PKG) 925 return 0; 926 927 switch (attr) { 928 case hwmon_curr_crit: 929 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 930 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644; 931 case hwmon_curr_label: 932 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 933 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444; 934 break; 935 default: 936 return 0; 937 } 938 return 0; 939 } 940 941 static int 942 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 943 { 944 switch (attr) { 945 case hwmon_curr_crit: 946 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR); 947 default: 948 return -EOPNOTSUPP; 949 } 950 } 951 952 static int 953 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) 954 { 955 switch (attr) { 956 case hwmon_curr_crit: 957 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR); 958 default: 959 return -EOPNOTSUPP; 960 } 961 } 962 963 static umode_t 964 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 965 { 966 switch (attr) { 967 case hwmon_in_input: 968 case hwmon_in_label: 969 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, 970 channel)) ? 0444 : 0; 971 default: 972 return 0; 973 } 974 } 975 976 static int 977 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 978 { 979 switch (attr) { 980 case hwmon_in_input: 981 xe_hwmon_get_voltage(hwmon, channel, val); 982 return 0; 983 default: 984 return -EOPNOTSUPP; 985 } 986 } 987 988 static umode_t 989 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 990 { 991 long energy = 0; 992 993 switch (attr) { 994 case hwmon_energy_input: 995 case hwmon_energy_label: 996 if (hwmon->xe->info.platform == XE_BATTLEMAGE) { 997 xe_hwmon_energy_get(hwmon, channel, &energy); 998 return energy ? 0444 : 0; 999 } else { 1000 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, 1001 channel)) ? 0444 : 0; 1002 } 1003 default: 1004 return 0; 1005 } 1006 } 1007 1008 static int 1009 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 1010 { 1011 switch (attr) { 1012 case hwmon_energy_input: 1013 xe_hwmon_energy_get(hwmon, channel, val); 1014 return 0; 1015 default: 1016 return -EOPNOTSUPP; 1017 } 1018 } 1019 1020 static umode_t 1021 xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 1022 { 1023 u32 uval; 1024 1025 if (!hwmon->xe->info.has_fan_control) 1026 return 0; 1027 1028 switch (attr) { 1029 case hwmon_fan_input: 1030 if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval)) 1031 return 0; 1032 1033 return channel < uval ? 0444 : 0; 1034 default: 1035 return 0; 1036 } 1037 } 1038 1039 static int 1040 xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val) 1041 { 1042 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 1043 struct xe_hwmon_fan_info *fi = &hwmon->fi[channel]; 1044 u64 rotations, time_now, time; 1045 u32 reg_val; 1046 int ret = 0; 1047 1048 mutex_lock(&hwmon->hwmon_lock); 1049 1050 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel)); 1051 time_now = get_jiffies_64(); 1052 1053 /* 1054 * HW register value is accumulated count of pulses from PWM fan with the scale 1055 * of 2 pulses per rotation. 1056 */ 1057 rotations = (reg_val - fi->reg_val_prev) / 2; 1058 1059 time = jiffies_delta_to_msecs(time_now - fi->time_prev); 1060 if (unlikely(!time)) { 1061 ret = -EAGAIN; 1062 goto unlock; 1063 } 1064 1065 /* 1066 * Calculate fan speed in RPM by time averaging two subsequent readings in minutes. 1067 * RPM = number of rotations * msecs per minute / time in msecs 1068 */ 1069 *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time); 1070 1071 fi->reg_val_prev = reg_val; 1072 fi->time_prev = time_now; 1073 unlock: 1074 mutex_unlock(&hwmon->hwmon_lock); 1075 return ret; 1076 } 1077 1078 static int 1079 xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 1080 { 1081 switch (attr) { 1082 case hwmon_fan_input: 1083 return xe_hwmon_fan_input_read(hwmon, channel, val); 1084 default: 1085 return -EOPNOTSUPP; 1086 } 1087 } 1088 1089 static umode_t 1090 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, 1091 u32 attr, int channel) 1092 { 1093 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata; 1094 int ret; 1095 1096 xe_pm_runtime_get(hwmon->xe); 1097 1098 switch (type) { 1099 case hwmon_temp: 1100 ret = xe_hwmon_temp_is_visible(hwmon, attr, channel); 1101 break; 1102 case hwmon_power: 1103 ret = xe_hwmon_power_is_visible(hwmon, attr, channel); 1104 break; 1105 case hwmon_curr: 1106 ret = xe_hwmon_curr_is_visible(hwmon, attr, channel); 1107 break; 1108 case hwmon_in: 1109 ret = xe_hwmon_in_is_visible(hwmon, attr, channel); 1110 break; 1111 case hwmon_energy: 1112 ret = xe_hwmon_energy_is_visible(hwmon, attr, channel); 1113 break; 1114 case hwmon_fan: 1115 ret = xe_hwmon_fan_is_visible(hwmon, attr, channel); 1116 break; 1117 default: 1118 ret = 0; 1119 break; 1120 } 1121 1122 xe_pm_runtime_put(hwmon->xe); 1123 1124 return ret; 1125 } 1126 1127 static int 1128 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, 1129 int channel, long *val) 1130 { 1131 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 1132 int ret; 1133 1134 xe_pm_runtime_get(hwmon->xe); 1135 1136 switch (type) { 1137 case hwmon_temp: 1138 ret = xe_hwmon_temp_read(hwmon, attr, channel, val); 1139 break; 1140 case hwmon_power: 1141 ret = xe_hwmon_power_read(hwmon, attr, channel, val); 1142 break; 1143 case hwmon_curr: 1144 ret = xe_hwmon_curr_read(hwmon, attr, channel, val); 1145 break; 1146 case hwmon_in: 1147 ret = xe_hwmon_in_read(hwmon, attr, channel, val); 1148 break; 1149 case hwmon_energy: 1150 ret = xe_hwmon_energy_read(hwmon, attr, channel, val); 1151 break; 1152 case hwmon_fan: 1153 ret = xe_hwmon_fan_read(hwmon, attr, channel, val); 1154 break; 1155 default: 1156 ret = -EOPNOTSUPP; 1157 break; 1158 } 1159 1160 xe_pm_runtime_put(hwmon->xe); 1161 1162 return ret; 1163 } 1164 1165 static int 1166 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, 1167 int channel, long val) 1168 { 1169 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 1170 int ret; 1171 1172 xe_pm_runtime_get(hwmon->xe); 1173 1174 switch (type) { 1175 case hwmon_power: 1176 ret = xe_hwmon_power_write(hwmon, attr, channel, val); 1177 break; 1178 case hwmon_curr: 1179 ret = xe_hwmon_curr_write(hwmon, attr, channel, val); 1180 break; 1181 default: 1182 ret = -EOPNOTSUPP; 1183 break; 1184 } 1185 1186 xe_pm_runtime_put(hwmon->xe); 1187 1188 return ret; 1189 } 1190 1191 static int xe_hwmon_read_label(struct device *dev, 1192 enum hwmon_sensor_types type, 1193 u32 attr, int channel, const char **str) 1194 { 1195 switch (type) { 1196 case hwmon_temp: 1197 if (channel == CHANNEL_PKG) 1198 *str = "pkg"; 1199 else if (channel == CHANNEL_VRAM) 1200 *str = "vram"; 1201 return 0; 1202 case hwmon_power: 1203 case hwmon_energy: 1204 case hwmon_curr: 1205 case hwmon_in: 1206 if (channel == CHANNEL_CARD) 1207 *str = "card"; 1208 else if (channel == CHANNEL_PKG) 1209 *str = "pkg"; 1210 return 0; 1211 default: 1212 return -EOPNOTSUPP; 1213 } 1214 } 1215 1216 static const struct hwmon_ops hwmon_ops = { 1217 .is_visible = xe_hwmon_is_visible, 1218 .read = xe_hwmon_read, 1219 .write = xe_hwmon_write, 1220 .read_string = xe_hwmon_read_label, 1221 }; 1222 1223 static const struct hwmon_chip_info hwmon_chip_info = { 1224 .ops = &hwmon_ops, 1225 .info = hwmon_info, 1226 }; 1227 1228 static void 1229 xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) 1230 { 1231 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 1232 long energy, fan_speed; 1233 u64 val_sku_unit = 0; 1234 int channel; 1235 struct xe_reg pkg_power_sku_unit; 1236 1237 if (hwmon->xe->info.has_mbx_power_limits) { 1238 /* Check if GPU firmware support mailbox power limits commands. */ 1239 if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD, 1240 &hwmon->pl1_on_boot[CHANNEL_CARD]) | 1241 xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG, 1242 &hwmon->pl1_on_boot[CHANNEL_PKG]) | 1243 xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD, 1244 &hwmon->pl2_on_boot[CHANNEL_CARD]) | 1245 xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG, 1246 &hwmon->pl2_on_boot[CHANNEL_PKG])) { 1247 drm_warn(&hwmon->xe->drm, 1248 "Failed to read power limits, check GPU firmware !\n"); 1249 } else { 1250 drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n"); 1251 /* Write default limits to read from pcode from now on. */ 1252 xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR, 1253 CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME, 1254 hwmon->pl1_on_boot[CHANNEL_CARD]); 1255 xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR, 1256 CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME, 1257 hwmon->pl1_on_boot[CHANNEL_PKG]); 1258 xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR, 1259 CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME, 1260 hwmon->pl2_on_boot[CHANNEL_CARD]); 1261 xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR, 1262 CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME, 1263 hwmon->pl2_on_boot[CHANNEL_PKG]); 1264 hwmon->scl_shift_power = PWR_UNIT; 1265 hwmon->scl_shift_energy = ENERGY_UNIT; 1266 hwmon->scl_shift_time = TIME_UNIT; 1267 hwmon->boot_power_limit_read = true; 1268 } 1269 } else { 1270 drm_info(&hwmon->xe->drm, "Using register for power limits\n"); 1271 /* 1272 * The contents of register PKG_POWER_SKU_UNIT do not change, 1273 * so read it once and store the shift values. 1274 */ 1275 pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0); 1276 if (xe_reg_is_valid(pkg_power_sku_unit)) { 1277 val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit); 1278 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit); 1279 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit); 1280 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit); 1281 } 1282 } 1283 /* 1284 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the 1285 * first value of the energy register read 1286 */ 1287 for (channel = 0; channel < CHANNEL_MAX; channel++) 1288 if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel)) 1289 xe_hwmon_energy_get(hwmon, channel, &energy); 1290 1291 /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */ 1292 for (channel = 0; channel < FAN_MAX; channel++) 1293 if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel)) 1294 xe_hwmon_fan_input_read(hwmon, channel, &fan_speed); 1295 } 1296 1297 static void xe_hwmon_mutex_destroy(void *arg) 1298 { 1299 struct xe_hwmon *hwmon = arg; 1300 1301 mutex_destroy(&hwmon->hwmon_lock); 1302 } 1303 1304 int xe_hwmon_register(struct xe_device *xe) 1305 { 1306 struct device *dev = xe->drm.dev; 1307 struct xe_hwmon *hwmon; 1308 int ret; 1309 1310 /* hwmon is available only for dGfx */ 1311 if (!IS_DGFX(xe)) 1312 return 0; 1313 1314 /* hwmon is not available on VFs */ 1315 if (IS_SRIOV_VF(xe)) 1316 return 0; 1317 1318 hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); 1319 if (!hwmon) 1320 return -ENOMEM; 1321 1322 mutex_init(&hwmon->hwmon_lock); 1323 ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon); 1324 if (ret) 1325 return ret; 1326 1327 /* There's only one instance of hwmon per device */ 1328 hwmon->xe = xe; 1329 xe->hwmon = hwmon; 1330 1331 xe_hwmon_get_preregistration_info(hwmon); 1332 1333 drm_dbg(&xe->drm, "Register xe hwmon interface\n"); 1334 1335 /* hwmon_dev points to device hwmon<i> */ 1336 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon, 1337 &hwmon_chip_info, 1338 hwmon_groups); 1339 if (IS_ERR(hwmon->hwmon_dev)) { 1340 drm_err(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev); 1341 xe->hwmon = NULL; 1342 return PTR_ERR(hwmon->hwmon_dev); 1343 } 1344 1345 return 0; 1346 } 1347 MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY"); 1348