1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/hwmon-sysfs.h> 7 #include <linux/hwmon.h> 8 #include <linux/jiffies.h> 9 #include <linux/types.h> 10 #include <linux/units.h> 11 12 #include <drm/drm_managed.h> 13 #include "regs/xe_gt_regs.h" 14 #include "regs/xe_mchbar_regs.h" 15 #include "regs/xe_pcode_regs.h" 16 #include "xe_device.h" 17 #include "xe_hwmon.h" 18 #include "xe_mmio.h" 19 #include "xe_pcode.h" 20 #include "xe_pcode_api.h" 21 #include "xe_sriov.h" 22 #include "xe_pm.h" 23 #include "xe_vsec.h" 24 #include "regs/xe_pmt.h" 25 26 enum xe_hwmon_reg { 27 REG_TEMP, 28 REG_PKG_RAPL_LIMIT, 29 REG_PKG_POWER_SKU, 30 REG_PKG_POWER_SKU_UNIT, 31 REG_GT_PERF_STATUS, 32 REG_PKG_ENERGY_STATUS, 33 REG_FAN_SPEED, 34 }; 35 36 enum xe_hwmon_reg_operation { 37 REG_READ32, 38 REG_RMW32, 39 REG_READ64, 40 }; 41 42 enum xe_hwmon_channel { 43 CHANNEL_CARD, 44 CHANNEL_PKG, 45 CHANNEL_VRAM, 46 CHANNEL_MAX, 47 }; 48 49 enum xe_fan_channel { 50 FAN_1, 51 FAN_2, 52 FAN_3, 53 FAN_MAX, 54 }; 55 56 /* Attribute index for powerX_xxx_interval sysfs entries */ 57 enum sensor_attr_power { 58 SENSOR_INDEX_PSYS_PL1, 59 SENSOR_INDEX_PKG_PL1, 60 SENSOR_INDEX_PSYS_PL2, 61 SENSOR_INDEX_PKG_PL2, 62 }; 63 64 /* 65 * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is 66 * not supported and below are SKU units to be used. 67 */ 68 #define PWR_UNIT 0x3 69 #define ENERGY_UNIT 0xe 70 #define TIME_UNIT 0xa 71 72 /* 73 * SF_* - scale factors for particular quantities according to hwmon spec. 74 */ 75 #define SF_POWER 1000000 /* microwatts */ 76 #define SF_CURR 1000 /* milliamperes */ 77 #define SF_VOLTAGE 1000 /* millivolts */ 78 #define SF_ENERGY 1000000 /* microjoules */ 79 #define SF_TIME 1000 /* milliseconds */ 80 81 /* 82 * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute. 83 */ 84 #define PL1_HWMON_ATTR hwmon_power_max 85 #define PL2_HWMON_ATTR hwmon_power_cap 86 87 #define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2") 88 89 /* 90 * Timeout for power limit write mailbox command. 91 */ 92 #define PL_WRITE_MBX_TIMEOUT_MS (1) 93 94 /** 95 * struct xe_hwmon_energy_info - to accumulate energy 96 */ 97 struct xe_hwmon_energy_info { 98 /** @reg_val_prev: previous energy reg val */ 99 u32 reg_val_prev; 100 /** @accum_energy: accumulated energy */ 101 long accum_energy; 102 }; 103 104 /** 105 * struct xe_hwmon_fan_info - to cache previous fan reading 106 */ 107 struct xe_hwmon_fan_info { 108 /** @reg_val_prev: previous fan reg val */ 109 u32 reg_val_prev; 110 /** @time_prev: previous timestamp */ 111 u64 time_prev; 112 }; 113 114 /** 115 * struct xe_hwmon - xe hwmon data structure 116 */ 117 struct xe_hwmon { 118 /** @hwmon_dev: hwmon device for xe */ 119 struct device *hwmon_dev; 120 /** @xe: Xe device */ 121 struct xe_device *xe; 122 /** @hwmon_lock: lock for rw attributes*/ 123 struct mutex hwmon_lock; 124 /** @scl_shift_power: pkg power unit */ 125 int scl_shift_power; 126 /** @scl_shift_energy: pkg energy unit */ 127 int scl_shift_energy; 128 /** @scl_shift_time: pkg time unit */ 129 int scl_shift_time; 130 /** @ei: Energy info for energyN_input */ 131 struct xe_hwmon_energy_info ei[CHANNEL_MAX]; 132 /** @fi: Fan info for fanN_input */ 133 struct xe_hwmon_fan_info fi[FAN_MAX]; 134 /** @boot_power_limit_read: is boot power limits read */ 135 bool boot_power_limit_read; 136 /** @pl1_on_boot: power limit PL1 on boot */ 137 u32 pl1_on_boot[CHANNEL_MAX]; 138 /** @pl2_on_boot: power limit PL2 on boot */ 139 u32 pl2_on_boot[CHANNEL_MAX]; 140 141 }; 142 143 static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel, 144 u32 *uval) 145 { 146 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 147 u32 val0 = 0, val1 = 0; 148 int ret = 0; 149 150 ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 151 (channel == CHANNEL_CARD) ? 152 READ_PSYSGPU_POWER_LIMIT : 153 READ_PACKAGE_POWER_LIMIT, 154 hwmon->boot_power_limit_read ? 155 READ_PL_FROM_PCODE : READ_PL_FROM_FW), 156 &val0, &val1); 157 158 if (ret) { 159 drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n", 160 channel, val0, val1, ret); 161 *uval = 0; 162 return ret; 163 } 164 165 /* return the value only if limit is enabled */ 166 if (attr == PL1_HWMON_ATTR) 167 *uval = (val0 & PWR_LIM_EN) ? val0 : 0; 168 else if (attr == PL2_HWMON_ATTR) 169 *uval = (val1 & PWR_LIM_EN) ? val1 : 0; 170 else if (attr == hwmon_power_label) 171 *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0; 172 else 173 *uval = 0; 174 175 return ret; 176 } 177 178 static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel, 179 u32 clr, u32 set) 180 { 181 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 182 u32 val0 = 0, val1 = 0; 183 int ret = 0; 184 185 ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 186 (channel == CHANNEL_CARD) ? 187 READ_PSYSGPU_POWER_LIMIT : 188 READ_PACKAGE_POWER_LIMIT, 189 hwmon->boot_power_limit_read ? 190 READ_PL_FROM_PCODE : READ_PL_FROM_FW), 191 &val0, &val1); 192 193 if (ret) 194 drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n", 195 channel, val0, val1, ret); 196 197 if (attr == PL1_HWMON_ATTR) 198 val0 = (val0 & ~clr) | set; 199 else if (attr == PL2_HWMON_ATTR) 200 val1 = (val1 & ~clr) | set; 201 else 202 return -EIO; 203 204 ret = xe_pcode_write64_timeout(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 205 (channel == CHANNEL_CARD) ? 206 WRITE_PSYSGPU_POWER_LIMIT : 207 WRITE_PACKAGE_POWER_LIMIT, 0), 208 val0, val1, PL_WRITE_MBX_TIMEOUT_MS); 209 if (ret) 210 drm_dbg(&hwmon->xe->drm, "write failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n", 211 channel, val0, val1, ret); 212 return ret; 213 } 214 215 static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg, 216 int channel) 217 { 218 struct xe_device *xe = hwmon->xe; 219 220 switch (hwmon_reg) { 221 case REG_TEMP: 222 if (xe->info.platform == XE_BATTLEMAGE) { 223 if (channel == CHANNEL_PKG) 224 return BMG_PACKAGE_TEMPERATURE; 225 else if (channel == CHANNEL_VRAM) 226 return BMG_VRAM_TEMPERATURE; 227 } else if (xe->info.platform == XE_DG2) { 228 if (channel == CHANNEL_PKG) 229 return PCU_CR_PACKAGE_TEMPERATURE; 230 else if (channel == CHANNEL_VRAM) 231 return BMG_VRAM_TEMPERATURE; 232 } 233 break; 234 case REG_PKG_RAPL_LIMIT: 235 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) 236 return PVC_GT0_PACKAGE_RAPL_LIMIT; 237 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) 238 return PCU_CR_PACKAGE_RAPL_LIMIT; 239 break; 240 case REG_PKG_POWER_SKU: 241 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) 242 return PVC_GT0_PACKAGE_POWER_SKU; 243 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) 244 return PCU_CR_PACKAGE_POWER_SKU; 245 break; 246 case REG_PKG_POWER_SKU_UNIT: 247 if (xe->info.platform == XE_PVC) 248 return PVC_GT0_PACKAGE_POWER_SKU_UNIT; 249 else if (xe->info.platform == XE_DG2) 250 return PCU_CR_PACKAGE_POWER_SKU_UNIT; 251 break; 252 case REG_GT_PERF_STATUS: 253 if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG) 254 return GT_PERF_STATUS; 255 break; 256 case REG_PKG_ENERGY_STATUS: 257 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) { 258 return PVC_GT0_PLATFORM_ENERGY_STATUS; 259 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) { 260 return PCU_CR_PACKAGE_ENERGY_STATUS; 261 } 262 break; 263 case REG_FAN_SPEED: 264 if (channel == FAN_1) 265 return BMG_FAN_1_SPEED; 266 else if (channel == FAN_2) 267 return BMG_FAN_2_SPEED; 268 else if (channel == FAN_3) 269 return BMG_FAN_3_SPEED; 270 break; 271 default: 272 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg); 273 break; 274 } 275 276 return XE_REG(0); 277 } 278 279 #define PL_DISABLE 0 280 281 /* 282 * HW allows arbitrary PL1 limits to be set but silently clamps these values to 283 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the 284 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display 285 * clamped values when read. 286 */ 287 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value) 288 { 289 u32 reg_val = 0; 290 struct xe_device *xe = hwmon->xe; 291 struct xe_reg rapl_limit, pkg_power_sku; 292 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 293 294 mutex_lock(&hwmon->hwmon_lock); 295 296 if (hwmon->xe->info.has_mbx_power_limits) { 297 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, ®_val); 298 } else { 299 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 300 pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 301 reg_val = xe_mmio_read32(mmio, rapl_limit); 302 } 303 304 /* Check if PL limits are disabled. */ 305 if (!(reg_val & PWR_LIM_EN)) { 306 *value = PL_DISABLE; 307 drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n", 308 PWR_ATTR_TO_STR(attr), channel, reg_val); 309 goto unlock; 310 } 311 312 reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val); 313 *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power; 314 315 /* For platforms with mailbox power limit support clamping would be done by pcode. */ 316 if (!hwmon->xe->info.has_mbx_power_limits) { 317 u64 pkg_pwr, min, max; 318 319 pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku); 320 min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr); 321 max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr); 322 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power); 323 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power); 324 if (min && max) 325 *value = clamp_t(u64, *value, min, max); 326 } 327 unlock: 328 mutex_unlock(&hwmon->hwmon_lock); 329 } 330 331 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channel, long value) 332 { 333 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 334 int ret = 0; 335 u32 reg_val, max; 336 struct xe_reg rapl_limit; 337 u64 max_supp_power_limit = 0; 338 339 mutex_lock(&hwmon->hwmon_lock); 340 341 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 342 343 /* Disable Power Limit and verify, as limit cannot be disabled on all platforms. */ 344 if (value == PL_DISABLE) { 345 if (hwmon->xe->info.has_mbx_power_limits) { 346 drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n", 347 PWR_ATTR_TO_STR(attr), channel); 348 xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0); 349 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, ®_val); 350 } else { 351 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0); 352 reg_val = xe_mmio_read32(mmio, rapl_limit); 353 } 354 355 if (reg_val & PWR_LIM_EN) { 356 drm_warn(&hwmon->xe->drm, "Power limit disable is not supported!\n"); 357 ret = -EOPNOTSUPP; 358 } 359 goto unlock; 360 } 361 362 /* 363 * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to 364 * the supported maximum (U12.3 format). 365 * This is to avoid truncation during reg_val calculation below and ensure the valid 366 * power limit is sent for pcode which would clamp it to card-supported value. 367 */ 368 max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER; 369 if (value > max_supp_power_limit) { 370 value = max_supp_power_limit; 371 drm_info(&hwmon->xe->drm, 372 "Power limit clamped as selected %s exceeds channel %d limit\n", 373 PWR_ATTR_TO_STR(attr), channel); 374 } 375 376 /* Computation in 64-bits to avoid overflow. Round to nearest. */ 377 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); 378 379 /* 380 * Clamp power limit to GPU firmware default as maximum, as an additional protection to 381 * pcode clamp. 382 */ 383 if (hwmon->xe->info.has_mbx_power_limits) { 384 max = (attr == PL1_HWMON_ATTR) ? 385 hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel]; 386 max = REG_FIELD_PREP(PWR_LIM_VAL, max); 387 if (reg_val > max) { 388 reg_val = max; 389 drm_dbg(&hwmon->xe->drm, 390 "Clamping power limit to GPU firmware default 0x%x\n", 391 reg_val); 392 } 393 } 394 395 reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val); 396 397 if (hwmon->xe->info.has_mbx_power_limits) 398 ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val); 399 else 400 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val); 401 unlock: 402 mutex_unlock(&hwmon->hwmon_lock); 403 return ret; 404 } 405 406 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, 407 long *value) 408 { 409 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 410 u32 reg_val; 411 412 if (hwmon->xe->info.has_mbx_power_limits) { 413 /* PL1 is rated max if supported. */ 414 xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel, ®_val); 415 } else { 416 /* 417 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check 418 * for this register can be skipped. 419 * See xe_hwmon_power_is_visible. 420 */ 421 struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 422 423 reg_val = xe_mmio_read32(mmio, reg); 424 } 425 426 reg_val = REG_FIELD_GET(PKG_TDP, reg_val); 427 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); 428 } 429 430 /* 431 * xe_hwmon_energy_get - Obtain energy value 432 * 433 * The underlying energy hardware register is 32-bits and is subject to 434 * overflow. How long before overflow? For example, with an example 435 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and 436 * a power draw of 1000 watts, the 32-bit counter will overflow in 437 * approximately 4.36 minutes. 438 * 439 * Examples: 440 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days 441 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes 442 * 443 * The function significantly increases overflow duration (from 4.36 444 * minutes) by accumulating the energy register into a 'long' as allowed by 445 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()), 446 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and 447 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before 448 * energyN_input overflows. This at 1000 W is an overflow duration of 278 years. 449 */ 450 static void 451 xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) 452 { 453 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 454 struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; 455 u32 reg_val; 456 int ret = 0; 457 458 /* Energy is supported only for card and pkg */ 459 if (channel > CHANNEL_PKG) { 460 *energy = 0; 461 return; 462 } 463 464 if (hwmon->xe->info.platform == XE_BATTLEMAGE) { 465 u64 pmt_val; 466 467 ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev), 468 xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID), 469 &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val)); 470 if (ret != sizeof(pmt_val)) { 471 drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret); 472 *energy = 0; 473 return; 474 } 475 476 if (channel == CHANNEL_PKG) 477 reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val); 478 else 479 reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val); 480 } else { 481 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, 482 channel)); 483 } 484 485 ei->accum_energy += reg_val - ei->reg_val_prev; 486 ei->reg_val_prev = reg_val; 487 488 *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, 489 hwmon->scl_shift_energy); 490 } 491 492 static ssize_t 493 xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr, 494 char *buf) 495 { 496 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 497 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 498 u32 reg_val, x, y, x_w = 2; /* 2 bits */ 499 u64 tau4, out; 500 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 501 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 502 503 int ret = 0; 504 505 xe_pm_runtime_get(hwmon->xe); 506 507 mutex_lock(&hwmon->hwmon_lock); 508 509 if (hwmon->xe->info.has_mbx_power_limits) { 510 ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, ®_val); 511 if (ret) { 512 drm_err(&hwmon->xe->drm, 513 "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n", 514 channel, power_attr, reg_val, ret); 515 reg_val = 0; 516 } 517 } else { 518 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, 519 channel)); 520 } 521 522 mutex_unlock(&hwmon->hwmon_lock); 523 524 xe_pm_runtime_put(hwmon->xe); 525 526 x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val); 527 y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val); 528 529 /* 530 * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17) 531 * = (4 | x) << (y - 2) 532 * 533 * Here (y - 2) ensures a 1.x fixed point representation of 1.x 534 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75 535 * 536 * As y can be < 2, we compute tau4 = (4 | x) << y 537 * and then add 2 when doing the final right shift to account for units 538 */ 539 tau4 = (u64)((1 << x_w) | x) << y; 540 541 /* val in hwmon interface units (millisec) */ 542 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 543 544 return sysfs_emit(buf, "%llu\n", out); 545 } 546 547 static ssize_t 548 xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr, 549 const char *buf, size_t count) 550 { 551 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 552 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 553 u32 x, y, rxy, x_w = 2; /* 2 bits */ 554 u64 tau4, r, max_win; 555 unsigned long val; 556 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 557 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 558 int ret; 559 560 ret = kstrtoul(buf, 0, &val); 561 if (ret) 562 return ret; 563 564 /* 565 * Max HW supported tau in '(1 + (x / 4)) * power(2,y)' format, x = 0, y = 0x12. 566 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds. 567 * 568 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register. 569 * However, it is observed that existing discrete GPUs does not provide correct 570 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs 571 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU. 572 */ 573 #define PKG_MAX_WIN_DEFAULT 0x12ull 574 575 /* 576 * val must be < max in hwmon interface units. The steps below are 577 * explained in xe_hwmon_power_max_interval_show() 578 */ 579 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT); 580 x = REG_FIELD_GET(PKG_MAX_WIN_X, r); 581 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r); 582 tau4 = (u64)((1 << x_w) | x) << y; 583 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 584 585 if (val > max_win) 586 return -EINVAL; 587 588 /* val in hw units */ 589 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1; 590 591 /* 592 * Convert val to 1.x * power(2,y) 593 * y = ilog2(val) 594 * x = (val - (1 << y)) >> (y - 2) 595 */ 596 if (!val) { 597 y = 0; 598 x = 0; 599 } else { 600 y = ilog2(val); 601 x = (val - (1ul << y)) << x_w >> y; 602 } 603 604 rxy = REG_FIELD_PREP(PWR_LIM_TIME_X, x) | 605 REG_FIELD_PREP(PWR_LIM_TIME_Y, y); 606 607 xe_pm_runtime_get(hwmon->xe); 608 609 mutex_lock(&hwmon->hwmon_lock); 610 611 if (hwmon->xe->info.has_mbx_power_limits) 612 xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy); 613 else 614 r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel), 615 PWR_LIM_TIME, rxy); 616 617 mutex_unlock(&hwmon->hwmon_lock); 618 619 xe_pm_runtime_put(hwmon->xe); 620 621 return count; 622 } 623 624 /* PSYS PL1 */ 625 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664, 626 xe_hwmon_power_max_interval_show, 627 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1); 628 /* PKG PL1 */ 629 static SENSOR_DEVICE_ATTR(power2_max_interval, 0664, 630 xe_hwmon_power_max_interval_show, 631 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1); 632 /* PSYS PL2 */ 633 static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664, 634 xe_hwmon_power_max_interval_show, 635 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2); 636 /* PKG PL2 */ 637 static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664, 638 xe_hwmon_power_max_interval_show, 639 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2); 640 641 static struct attribute *hwmon_attributes[] = { 642 &sensor_dev_attr_power1_max_interval.dev_attr.attr, 643 &sensor_dev_attr_power2_max_interval.dev_attr.attr, 644 &sensor_dev_attr_power1_cap_interval.dev_attr.attr, 645 &sensor_dev_attr_power2_cap_interval.dev_attr.attr, 646 NULL 647 }; 648 649 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, 650 struct attribute *attr, int index) 651 { 652 struct device *dev = kobj_to_dev(kobj); 653 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 654 int ret = 0; 655 int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD; 656 u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR; 657 u32 uval = 0; 658 struct xe_reg rapl_limit; 659 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 660 661 xe_pm_runtime_get(hwmon->xe); 662 663 if (hwmon->xe->info.has_mbx_power_limits) { 664 xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval); 665 } else if (power_attr != PL2_HWMON_ATTR) { 666 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 667 if (xe_reg_is_valid(rapl_limit)) 668 uval = xe_mmio_read32(mmio, rapl_limit); 669 } 670 ret = (uval & PWR_LIM_EN) ? attr->mode : 0; 671 672 xe_pm_runtime_put(hwmon->xe); 673 674 return ret; 675 } 676 677 static const struct attribute_group hwmon_attrgroup = { 678 .attrs = hwmon_attributes, 679 .is_visible = xe_hwmon_attributes_visible, 680 }; 681 682 static const struct attribute_group *hwmon_groups[] = { 683 &hwmon_attrgroup, 684 NULL 685 }; 686 687 static const struct hwmon_channel_info * const hwmon_info[] = { 688 HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, 689 HWMON_T_INPUT | HWMON_T_LABEL), 690 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT | 691 HWMON_P_CAP, 692 HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP), 693 HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL), 694 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), 695 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL), 696 HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT), 697 NULL 698 }; 699 700 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */ 701 static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval) 702 { 703 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 704 705 /* Avoid Illegal Subcommand error */ 706 if (hwmon->xe->info.platform == XE_DG2) 707 return -ENXIO; 708 709 return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 710 POWER_SETUP_SUBCOMMAND_READ_I1, 0), 711 uval, NULL); 712 } 713 714 static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval) 715 { 716 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 717 718 return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 719 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0), 720 (uval & POWER_SETUP_I1_DATA_MASK)); 721 } 722 723 static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval) 724 { 725 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 726 727 /* Platforms that don't return correct value */ 728 if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) { 729 *uval = 2; 730 return 0; 731 } 732 733 return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL); 734 } 735 736 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel, 737 long *value, u32 scale_factor) 738 { 739 int ret; 740 u32 uval = 0; 741 742 mutex_lock(&hwmon->hwmon_lock); 743 744 ret = xe_hwmon_pcode_read_i1(hwmon, &uval); 745 if (ret) 746 goto unlock; 747 748 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval), 749 scale_factor, POWER_SETUP_I1_SHIFT); 750 unlock: 751 mutex_unlock(&hwmon->hwmon_lock); 752 return ret; 753 } 754 755 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, 756 long value, u32 scale_factor) 757 { 758 int ret; 759 u32 uval; 760 u64 max_crit_power_curr = 0; 761 762 mutex_lock(&hwmon->hwmon_lock); 763 764 /* 765 * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1 766 * max supported value, clamp it to the command's max (U10.6 format). 767 * This is to avoid truncation during uval calculation below and ensure the valid power 768 * limit is sent for pcode which would clamp it to card-supported value. 769 */ 770 max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor; 771 if (value > max_crit_power_curr) { 772 value = max_crit_power_curr; 773 drm_info(&hwmon->xe->drm, 774 "Power limit clamped as selected exceeds channel %d limit\n", 775 channel); 776 } 777 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); 778 ret = xe_hwmon_pcode_write_i1(hwmon, uval); 779 780 mutex_unlock(&hwmon->hwmon_lock); 781 return ret; 782 } 783 784 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value) 785 { 786 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 787 u64 reg_val; 788 789 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); 790 /* HW register value in units of 2.5 millivolt */ 791 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE); 792 } 793 794 static umode_t 795 xe_hwmon_temp_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 796 { 797 switch (attr) { 798 case hwmon_temp_input: 799 case hwmon_temp_label: 800 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_TEMP, channel)) ? 0444 : 0; 801 default: 802 return 0; 803 } 804 } 805 806 static int 807 xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 808 { 809 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 810 u64 reg_val; 811 812 switch (attr) { 813 case hwmon_temp_input: 814 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_TEMP, channel)); 815 816 /* HW register value is in degrees Celsius, convert to millidegrees. */ 817 *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE; 818 return 0; 819 default: 820 return -EOPNOTSUPP; 821 } 822 } 823 824 static umode_t 825 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 826 { 827 u32 uval = 0; 828 struct xe_reg reg; 829 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 830 831 switch (attr) { 832 case hwmon_power_max: 833 case hwmon_power_cap: 834 if (hwmon->xe->info.has_mbx_power_limits) { 835 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval); 836 } else if (attr != PL2_HWMON_ATTR) { 837 reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 838 if (xe_reg_is_valid(reg)) 839 uval = xe_mmio_read32(mmio, reg); 840 } 841 if (uval & PWR_LIM_EN) { 842 drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n", 843 PWR_ATTR_TO_STR(attr), channel); 844 return 0664; 845 } 846 drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n", 847 PWR_ATTR_TO_STR(attr), channel); 848 return 0; 849 case hwmon_power_rated_max: 850 if (hwmon->xe->info.has_mbx_power_limits) { 851 return 0; 852 } else { 853 reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 854 if (xe_reg_is_valid(reg)) 855 uval = xe_mmio_read32(mmio, reg); 856 return uval ? 0444 : 0; 857 } 858 case hwmon_power_crit: 859 if (channel == CHANNEL_CARD) { 860 xe_hwmon_pcode_read_i1(hwmon, &uval); 861 return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0; 862 } 863 break; 864 case hwmon_power_label: 865 if (hwmon->xe->info.has_mbx_power_limits) { 866 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval); 867 } else { 868 reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 869 if (xe_reg_is_valid(reg)) 870 uval = xe_mmio_read32(mmio, reg); 871 872 if (!uval) { 873 reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 874 if (xe_reg_is_valid(reg)) 875 uval = xe_mmio_read32(mmio, reg); 876 } 877 } 878 if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) { 879 xe_hwmon_pcode_read_i1(hwmon, &uval); 880 return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0; 881 } 882 return (uval) ? 0444 : 0; 883 default: 884 return 0; 885 } 886 return 0; 887 } 888 889 static int 890 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 891 { 892 switch (attr) { 893 case hwmon_power_max: 894 case hwmon_power_cap: 895 xe_hwmon_power_max_read(hwmon, attr, channel, val); 896 return 0; 897 case hwmon_power_rated_max: 898 xe_hwmon_power_rated_max_read(hwmon, attr, channel, val); 899 return 0; 900 case hwmon_power_crit: 901 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER); 902 default: 903 return -EOPNOTSUPP; 904 } 905 } 906 907 static int 908 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) 909 { 910 switch (attr) { 911 case hwmon_power_cap: 912 case hwmon_power_max: 913 return xe_hwmon_power_max_write(hwmon, attr, channel, val); 914 case hwmon_power_crit: 915 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER); 916 default: 917 return -EOPNOTSUPP; 918 } 919 } 920 921 static umode_t 922 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel) 923 { 924 u32 uval = 0; 925 926 /* hwmon sysfs attribute of current available only for package */ 927 if (channel != CHANNEL_PKG) 928 return 0; 929 930 switch (attr) { 931 case hwmon_curr_crit: 932 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 933 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644; 934 case hwmon_curr_label: 935 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 936 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444; 937 break; 938 default: 939 return 0; 940 } 941 return 0; 942 } 943 944 static int 945 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 946 { 947 switch (attr) { 948 case hwmon_curr_crit: 949 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR); 950 default: 951 return -EOPNOTSUPP; 952 } 953 } 954 955 static int 956 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) 957 { 958 switch (attr) { 959 case hwmon_curr_crit: 960 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR); 961 default: 962 return -EOPNOTSUPP; 963 } 964 } 965 966 static umode_t 967 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 968 { 969 switch (attr) { 970 case hwmon_in_input: 971 case hwmon_in_label: 972 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, 973 channel)) ? 0444 : 0; 974 default: 975 return 0; 976 } 977 } 978 979 static int 980 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 981 { 982 switch (attr) { 983 case hwmon_in_input: 984 xe_hwmon_get_voltage(hwmon, channel, val); 985 return 0; 986 default: 987 return -EOPNOTSUPP; 988 } 989 } 990 991 static umode_t 992 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 993 { 994 long energy = 0; 995 996 switch (attr) { 997 case hwmon_energy_input: 998 case hwmon_energy_label: 999 if (hwmon->xe->info.platform == XE_BATTLEMAGE) { 1000 xe_hwmon_energy_get(hwmon, channel, &energy); 1001 return energy ? 0444 : 0; 1002 } else { 1003 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, 1004 channel)) ? 0444 : 0; 1005 } 1006 default: 1007 return 0; 1008 } 1009 } 1010 1011 static int 1012 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 1013 { 1014 switch (attr) { 1015 case hwmon_energy_input: 1016 xe_hwmon_energy_get(hwmon, channel, val); 1017 return 0; 1018 default: 1019 return -EOPNOTSUPP; 1020 } 1021 } 1022 1023 static umode_t 1024 xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 1025 { 1026 u32 uval = 0; 1027 1028 if (!hwmon->xe->info.has_fan_control) 1029 return 0; 1030 1031 switch (attr) { 1032 case hwmon_fan_input: 1033 if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval)) 1034 return 0; 1035 1036 return channel < uval ? 0444 : 0; 1037 default: 1038 return 0; 1039 } 1040 } 1041 1042 static int 1043 xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val) 1044 { 1045 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 1046 struct xe_hwmon_fan_info *fi = &hwmon->fi[channel]; 1047 u64 rotations, time_now, time; 1048 u32 reg_val; 1049 int ret = 0; 1050 1051 mutex_lock(&hwmon->hwmon_lock); 1052 1053 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel)); 1054 time_now = get_jiffies_64(); 1055 1056 /* 1057 * HW register value is accumulated count of pulses from PWM fan with the scale 1058 * of 2 pulses per rotation. 1059 */ 1060 rotations = (reg_val - fi->reg_val_prev) / 2; 1061 1062 time = jiffies_delta_to_msecs(time_now - fi->time_prev); 1063 if (unlikely(!time)) { 1064 ret = -EAGAIN; 1065 goto unlock; 1066 } 1067 1068 /* 1069 * Calculate fan speed in RPM by time averaging two subsequent readings in minutes. 1070 * RPM = number of rotations * msecs per minute / time in msecs 1071 */ 1072 *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time); 1073 1074 fi->reg_val_prev = reg_val; 1075 fi->time_prev = time_now; 1076 unlock: 1077 mutex_unlock(&hwmon->hwmon_lock); 1078 return ret; 1079 } 1080 1081 static int 1082 xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 1083 { 1084 switch (attr) { 1085 case hwmon_fan_input: 1086 return xe_hwmon_fan_input_read(hwmon, channel, val); 1087 default: 1088 return -EOPNOTSUPP; 1089 } 1090 } 1091 1092 static umode_t 1093 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, 1094 u32 attr, int channel) 1095 { 1096 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata; 1097 int ret; 1098 1099 xe_pm_runtime_get(hwmon->xe); 1100 1101 switch (type) { 1102 case hwmon_temp: 1103 ret = xe_hwmon_temp_is_visible(hwmon, attr, channel); 1104 break; 1105 case hwmon_power: 1106 ret = xe_hwmon_power_is_visible(hwmon, attr, channel); 1107 break; 1108 case hwmon_curr: 1109 ret = xe_hwmon_curr_is_visible(hwmon, attr, channel); 1110 break; 1111 case hwmon_in: 1112 ret = xe_hwmon_in_is_visible(hwmon, attr, channel); 1113 break; 1114 case hwmon_energy: 1115 ret = xe_hwmon_energy_is_visible(hwmon, attr, channel); 1116 break; 1117 case hwmon_fan: 1118 ret = xe_hwmon_fan_is_visible(hwmon, attr, channel); 1119 break; 1120 default: 1121 ret = 0; 1122 break; 1123 } 1124 1125 xe_pm_runtime_put(hwmon->xe); 1126 1127 return ret; 1128 } 1129 1130 static int 1131 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, 1132 int channel, long *val) 1133 { 1134 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 1135 int ret; 1136 1137 xe_pm_runtime_get(hwmon->xe); 1138 1139 switch (type) { 1140 case hwmon_temp: 1141 ret = xe_hwmon_temp_read(hwmon, attr, channel, val); 1142 break; 1143 case hwmon_power: 1144 ret = xe_hwmon_power_read(hwmon, attr, channel, val); 1145 break; 1146 case hwmon_curr: 1147 ret = xe_hwmon_curr_read(hwmon, attr, channel, val); 1148 break; 1149 case hwmon_in: 1150 ret = xe_hwmon_in_read(hwmon, attr, channel, val); 1151 break; 1152 case hwmon_energy: 1153 ret = xe_hwmon_energy_read(hwmon, attr, channel, val); 1154 break; 1155 case hwmon_fan: 1156 ret = xe_hwmon_fan_read(hwmon, attr, channel, val); 1157 break; 1158 default: 1159 ret = -EOPNOTSUPP; 1160 break; 1161 } 1162 1163 xe_pm_runtime_put(hwmon->xe); 1164 1165 return ret; 1166 } 1167 1168 static int 1169 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, 1170 int channel, long val) 1171 { 1172 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 1173 int ret; 1174 1175 xe_pm_runtime_get(hwmon->xe); 1176 1177 switch (type) { 1178 case hwmon_power: 1179 ret = xe_hwmon_power_write(hwmon, attr, channel, val); 1180 break; 1181 case hwmon_curr: 1182 ret = xe_hwmon_curr_write(hwmon, attr, channel, val); 1183 break; 1184 default: 1185 ret = -EOPNOTSUPP; 1186 break; 1187 } 1188 1189 xe_pm_runtime_put(hwmon->xe); 1190 1191 return ret; 1192 } 1193 1194 static int xe_hwmon_read_label(struct device *dev, 1195 enum hwmon_sensor_types type, 1196 u32 attr, int channel, const char **str) 1197 { 1198 switch (type) { 1199 case hwmon_temp: 1200 if (channel == CHANNEL_PKG) 1201 *str = "pkg"; 1202 else if (channel == CHANNEL_VRAM) 1203 *str = "vram"; 1204 return 0; 1205 case hwmon_power: 1206 case hwmon_energy: 1207 case hwmon_curr: 1208 case hwmon_in: 1209 if (channel == CHANNEL_CARD) 1210 *str = "card"; 1211 else if (channel == CHANNEL_PKG) 1212 *str = "pkg"; 1213 return 0; 1214 default: 1215 return -EOPNOTSUPP; 1216 } 1217 } 1218 1219 static const struct hwmon_ops hwmon_ops = { 1220 .is_visible = xe_hwmon_is_visible, 1221 .read = xe_hwmon_read, 1222 .write = xe_hwmon_write, 1223 .read_string = xe_hwmon_read_label, 1224 }; 1225 1226 static const struct hwmon_chip_info hwmon_chip_info = { 1227 .ops = &hwmon_ops, 1228 .info = hwmon_info, 1229 }; 1230 1231 static void 1232 xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) 1233 { 1234 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 1235 long energy, fan_speed; 1236 u64 val_sku_unit = 0; 1237 int channel; 1238 struct xe_reg pkg_power_sku_unit; 1239 1240 if (hwmon->xe->info.has_mbx_power_limits) { 1241 /* Check if GPU firmware support mailbox power limits commands. */ 1242 if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD, 1243 &hwmon->pl1_on_boot[CHANNEL_CARD]) | 1244 xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG, 1245 &hwmon->pl1_on_boot[CHANNEL_PKG]) | 1246 xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD, 1247 &hwmon->pl2_on_boot[CHANNEL_CARD]) | 1248 xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG, 1249 &hwmon->pl2_on_boot[CHANNEL_PKG])) { 1250 drm_warn(&hwmon->xe->drm, 1251 "Failed to read power limits, check GPU firmware !\n"); 1252 } else { 1253 drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n"); 1254 /* Write default limits to read from pcode from now on. */ 1255 xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR, 1256 CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME, 1257 hwmon->pl1_on_boot[CHANNEL_CARD]); 1258 xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR, 1259 CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME, 1260 hwmon->pl1_on_boot[CHANNEL_PKG]); 1261 xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR, 1262 CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME, 1263 hwmon->pl2_on_boot[CHANNEL_CARD]); 1264 xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR, 1265 CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME, 1266 hwmon->pl2_on_boot[CHANNEL_PKG]); 1267 hwmon->scl_shift_power = PWR_UNIT; 1268 hwmon->scl_shift_energy = ENERGY_UNIT; 1269 hwmon->scl_shift_time = TIME_UNIT; 1270 hwmon->boot_power_limit_read = true; 1271 } 1272 } else { 1273 drm_info(&hwmon->xe->drm, "Using register for power limits\n"); 1274 /* 1275 * The contents of register PKG_POWER_SKU_UNIT do not change, 1276 * so read it once and store the shift values. 1277 */ 1278 pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0); 1279 if (xe_reg_is_valid(pkg_power_sku_unit)) { 1280 val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit); 1281 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit); 1282 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit); 1283 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit); 1284 } 1285 } 1286 /* 1287 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the 1288 * first value of the energy register read 1289 */ 1290 for (channel = 0; channel < CHANNEL_MAX; channel++) 1291 if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel)) 1292 xe_hwmon_energy_get(hwmon, channel, &energy); 1293 1294 /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */ 1295 for (channel = 0; channel < FAN_MAX; channel++) 1296 if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel)) 1297 xe_hwmon_fan_input_read(hwmon, channel, &fan_speed); 1298 } 1299 1300 int xe_hwmon_register(struct xe_device *xe) 1301 { 1302 struct device *dev = xe->drm.dev; 1303 struct xe_hwmon *hwmon; 1304 int ret; 1305 1306 /* hwmon is available only for dGfx */ 1307 if (!IS_DGFX(xe)) 1308 return 0; 1309 1310 /* hwmon is not available on VFs */ 1311 if (IS_SRIOV_VF(xe)) 1312 return 0; 1313 1314 hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); 1315 if (!hwmon) 1316 return -ENOMEM; 1317 1318 ret = devm_mutex_init(dev, &hwmon->hwmon_lock); 1319 if (ret) 1320 return ret; 1321 1322 /* There's only one instance of hwmon per device */ 1323 hwmon->xe = xe; 1324 xe->hwmon = hwmon; 1325 1326 xe_hwmon_get_preregistration_info(hwmon); 1327 1328 drm_dbg(&xe->drm, "Register xe hwmon interface\n"); 1329 1330 /* hwmon_dev points to device hwmon<i> */ 1331 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon, 1332 &hwmon_chip_info, 1333 hwmon_groups); 1334 if (IS_ERR(hwmon->hwmon_dev)) { 1335 drm_err(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev); 1336 xe->hwmon = NULL; 1337 return PTR_ERR(hwmon->hwmon_dev); 1338 } 1339 1340 return 0; 1341 } 1342 MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY"); 1343