1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/hwmon-sysfs.h> 7 #include <linux/hwmon.h> 8 #include <linux/types.h> 9 10 #include <drm/drm_managed.h> 11 #include "regs/xe_gt_regs.h" 12 #include "regs/xe_mchbar_regs.h" 13 #include "regs/xe_pcode_regs.h" 14 #include "xe_device.h" 15 #include "xe_hwmon.h" 16 #include "xe_mmio.h" 17 #include "xe_pcode.h" 18 #include "xe_pcode_api.h" 19 #include "xe_sriov.h" 20 #include "xe_pm.h" 21 22 enum xe_hwmon_reg { 23 REG_PKG_RAPL_LIMIT, 24 REG_PKG_POWER_SKU, 25 REG_PKG_POWER_SKU_UNIT, 26 REG_GT_PERF_STATUS, 27 REG_PKG_ENERGY_STATUS, 28 }; 29 30 enum xe_hwmon_reg_operation { 31 REG_READ32, 32 REG_RMW32, 33 REG_READ64, 34 }; 35 36 enum xe_hwmon_channel { 37 CHANNEL_CARD, 38 CHANNEL_PKG, 39 CHANNEL_MAX, 40 }; 41 42 /* 43 * SF_* - scale factors for particular quantities according to hwmon spec. 44 */ 45 #define SF_POWER 1000000 /* microwatts */ 46 #define SF_CURR 1000 /* milliamperes */ 47 #define SF_VOLTAGE 1000 /* millivolts */ 48 #define SF_ENERGY 1000000 /* microjoules */ 49 #define SF_TIME 1000 /* milliseconds */ 50 51 /** 52 * struct xe_hwmon_energy_info - to accumulate energy 53 */ 54 struct xe_hwmon_energy_info { 55 /** @reg_val_prev: previous energy reg val */ 56 u32 reg_val_prev; 57 /** @accum_energy: accumulated energy */ 58 long accum_energy; 59 }; 60 61 /** 62 * struct xe_hwmon - xe hwmon data structure 63 */ 64 struct xe_hwmon { 65 /** @hwmon_dev: hwmon device for xe */ 66 struct device *hwmon_dev; 67 /** @xe: Xe device */ 68 struct xe_device *xe; 69 /** @hwmon_lock: lock for rw attributes*/ 70 struct mutex hwmon_lock; 71 /** @scl_shift_power: pkg power unit */ 72 int scl_shift_power; 73 /** @scl_shift_energy: pkg energy unit */ 74 int scl_shift_energy; 75 /** @scl_shift_time: pkg time unit */ 76 int scl_shift_time; 77 /** @ei: Energy info for energyN_input */ 78 struct xe_hwmon_energy_info ei[CHANNEL_MAX]; 79 }; 80 81 static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg, 82 int channel) 83 { 84 struct xe_device *xe = hwmon->xe; 85 86 switch (hwmon_reg) { 87 case REG_PKG_RAPL_LIMIT: 88 if (xe->info.platform == XE_BATTLEMAGE) { 89 if (channel == CHANNEL_PKG) 90 return BMG_PACKAGE_RAPL_LIMIT; 91 else 92 return BMG_PLATFORM_POWER_LIMIT; 93 } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) { 94 return PVC_GT0_PACKAGE_RAPL_LIMIT; 95 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) { 96 return PCU_CR_PACKAGE_RAPL_LIMIT; 97 } 98 break; 99 case REG_PKG_POWER_SKU: 100 if (xe->info.platform == XE_BATTLEMAGE) 101 return BMG_PACKAGE_POWER_SKU; 102 else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) 103 return PVC_GT0_PACKAGE_POWER_SKU; 104 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) 105 return PCU_CR_PACKAGE_POWER_SKU; 106 break; 107 case REG_PKG_POWER_SKU_UNIT: 108 if (xe->info.platform == XE_BATTLEMAGE) 109 return BMG_PACKAGE_POWER_SKU_UNIT; 110 else if (xe->info.platform == XE_PVC) 111 return PVC_GT0_PACKAGE_POWER_SKU_UNIT; 112 else if (xe->info.platform == XE_DG2) 113 return PCU_CR_PACKAGE_POWER_SKU_UNIT; 114 break; 115 case REG_GT_PERF_STATUS: 116 if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG) 117 return GT_PERF_STATUS; 118 break; 119 case REG_PKG_ENERGY_STATUS: 120 if (xe->info.platform == XE_BATTLEMAGE) { 121 if (channel == CHANNEL_PKG) 122 return BMG_PACKAGE_ENERGY_STATUS; 123 else 124 return BMG_PLATFORM_ENERGY_STATUS; 125 } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) { 126 return PVC_GT0_PLATFORM_ENERGY_STATUS; 127 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) { 128 return PCU_CR_PACKAGE_ENERGY_STATUS; 129 } 130 break; 131 default: 132 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg); 133 break; 134 } 135 136 return XE_REG(0); 137 } 138 139 #define PL1_DISABLE 0 140 141 /* 142 * HW allows arbitrary PL1 limits to be set but silently clamps these values to 143 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the 144 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display 145 * clamped values when read. 146 */ 147 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value) 148 { 149 u64 reg_val, min, max; 150 struct xe_device *xe = hwmon->xe; 151 struct xe_reg rapl_limit, pkg_power_sku; 152 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 153 154 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 155 pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 156 157 /* 158 * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible. 159 * So not checking it again here. 160 */ 161 if (!xe_reg_is_valid(pkg_power_sku)) { 162 drm_warn(&xe->drm, "pkg_power_sku invalid\n"); 163 *value = 0; 164 return; 165 } 166 167 mutex_lock(&hwmon->hwmon_lock); 168 169 reg_val = xe_mmio_read32(mmio, rapl_limit); 170 /* Check if PL1 limit is disabled */ 171 if (!(reg_val & PKG_PWR_LIM_1_EN)) { 172 *value = PL1_DISABLE; 173 goto unlock; 174 } 175 176 reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val); 177 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); 178 179 reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku); 180 min = REG_FIELD_GET(PKG_MIN_PWR, reg_val); 181 min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power); 182 max = REG_FIELD_GET(PKG_MAX_PWR, reg_val); 183 max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power); 184 185 if (min && max) 186 *value = clamp_t(u64, *value, min, max); 187 unlock: 188 mutex_unlock(&hwmon->hwmon_lock); 189 } 190 191 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value) 192 { 193 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 194 int ret = 0; 195 u64 reg_val; 196 struct xe_reg rapl_limit; 197 198 rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel); 199 200 mutex_lock(&hwmon->hwmon_lock); 201 202 /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */ 203 if (value == PL1_DISABLE) { 204 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0); 205 reg_val = xe_mmio_read32(mmio, rapl_limit); 206 if (reg_val & PKG_PWR_LIM_1_EN) { 207 drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n"); 208 ret = -EOPNOTSUPP; 209 } 210 goto unlock; 211 } 212 213 /* Computation in 64-bits to avoid overflow. Round to nearest. */ 214 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); 215 reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val); 216 reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val); 217 218 unlock: 219 mutex_unlock(&hwmon->hwmon_lock); 220 return ret; 221 } 222 223 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value) 224 { 225 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 226 struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel); 227 u64 reg_val; 228 229 /* 230 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check 231 * for this register can be skipped. 232 * See xe_hwmon_power_is_visible. 233 */ 234 reg_val = xe_mmio_read32(mmio, reg); 235 reg_val = REG_FIELD_GET(PKG_TDP, reg_val); 236 *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power); 237 } 238 239 /* 240 * xe_hwmon_energy_get - Obtain energy value 241 * 242 * The underlying energy hardware register is 32-bits and is subject to 243 * overflow. How long before overflow? For example, with an example 244 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and 245 * a power draw of 1000 watts, the 32-bit counter will overflow in 246 * approximately 4.36 minutes. 247 * 248 * Examples: 249 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days 250 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes 251 * 252 * The function significantly increases overflow duration (from 4.36 253 * minutes) by accumulating the energy register into a 'long' as allowed by 254 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()), 255 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and 256 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before 257 * energyN_input overflows. This at 1000 W is an overflow duration of 278 years. 258 */ 259 static void 260 xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy) 261 { 262 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 263 struct xe_hwmon_energy_info *ei = &hwmon->ei[channel]; 264 u64 reg_val; 265 266 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, 267 channel)); 268 269 if (reg_val >= ei->reg_val_prev) 270 ei->accum_energy += reg_val - ei->reg_val_prev; 271 else 272 ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val; 273 274 ei->reg_val_prev = reg_val; 275 276 *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, 277 hwmon->scl_shift_energy); 278 } 279 280 static ssize_t 281 xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr, 282 char *buf) 283 { 284 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 285 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 286 u32 x, y, x_w = 2; /* 2 bits */ 287 u64 r, tau4, out; 288 int sensor_index = to_sensor_dev_attr(attr)->index; 289 290 xe_pm_runtime_get(hwmon->xe); 291 292 mutex_lock(&hwmon->hwmon_lock); 293 294 r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index)); 295 296 mutex_unlock(&hwmon->hwmon_lock); 297 298 xe_pm_runtime_put(hwmon->xe); 299 300 x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r); 301 y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r); 302 303 /* 304 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17) 305 * = (4 | x) << (y - 2) 306 * 307 * Here (y - 2) ensures a 1.x fixed point representation of 1.x 308 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75 309 * 310 * As y can be < 2, we compute tau4 = (4 | x) << y 311 * and then add 2 when doing the final right shift to account for units 312 */ 313 tau4 = (u64)((1 << x_w) | x) << y; 314 315 /* val in hwmon interface units (millisec) */ 316 out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 317 318 return sysfs_emit(buf, "%llu\n", out); 319 } 320 321 static ssize_t 322 xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr, 323 const char *buf, size_t count) 324 { 325 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 326 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 327 u32 x, y, rxy, x_w = 2; /* 2 bits */ 328 u64 tau4, r, max_win; 329 unsigned long val; 330 int ret; 331 int sensor_index = to_sensor_dev_attr(attr)->index; 332 333 ret = kstrtoul(buf, 0, &val); 334 if (ret) 335 return ret; 336 337 /* 338 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12. 339 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds. 340 * 341 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register. 342 * However, it is observed that existing discrete GPUs does not provide correct 343 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs 344 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU. 345 */ 346 #define PKG_MAX_WIN_DEFAULT 0x12ull 347 348 /* 349 * val must be < max in hwmon interface units. The steps below are 350 * explained in xe_hwmon_power_max_interval_show() 351 */ 352 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT); 353 x = REG_FIELD_GET(PKG_MAX_WIN_X, r); 354 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r); 355 tau4 = (u64)((1 << x_w) | x) << y; 356 max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); 357 358 if (val > max_win) 359 return -EINVAL; 360 361 /* val in hw units */ 362 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME); 363 364 /* 365 * Convert val to 1.x * power(2,y) 366 * y = ilog2(val) 367 * x = (val - (1 << y)) >> (y - 2) 368 */ 369 if (!val) { 370 y = 0; 371 x = 0; 372 } else { 373 y = ilog2(val); 374 x = (val - (1ul << y)) << x_w >> y; 375 } 376 377 rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y); 378 379 xe_pm_runtime_get(hwmon->xe); 380 381 mutex_lock(&hwmon->hwmon_lock); 382 383 r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index), 384 PKG_PWR_LIM_1_TIME, rxy); 385 386 mutex_unlock(&hwmon->hwmon_lock); 387 388 xe_pm_runtime_put(hwmon->xe); 389 390 return count; 391 } 392 393 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664, 394 xe_hwmon_power_max_interval_show, 395 xe_hwmon_power_max_interval_store, CHANNEL_CARD); 396 397 static SENSOR_DEVICE_ATTR(power2_max_interval, 0664, 398 xe_hwmon_power_max_interval_show, 399 xe_hwmon_power_max_interval_store, CHANNEL_PKG); 400 401 static struct attribute *hwmon_attributes[] = { 402 &sensor_dev_attr_power1_max_interval.dev_attr.attr, 403 &sensor_dev_attr_power2_max_interval.dev_attr.attr, 404 NULL 405 }; 406 407 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj, 408 struct attribute *attr, int index) 409 { 410 struct device *dev = kobj_to_dev(kobj); 411 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 412 int ret = 0; 413 414 xe_pm_runtime_get(hwmon->xe); 415 416 ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0; 417 418 xe_pm_runtime_put(hwmon->xe); 419 420 return ret; 421 } 422 423 static const struct attribute_group hwmon_attrgroup = { 424 .attrs = hwmon_attributes, 425 .is_visible = xe_hwmon_attributes_visible, 426 }; 427 428 static const struct attribute_group *hwmon_groups[] = { 429 &hwmon_attrgroup, 430 NULL 431 }; 432 433 static const struct hwmon_channel_info * const hwmon_info[] = { 434 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL, 435 HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL), 436 HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL), 437 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), 438 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL), 439 NULL 440 }; 441 442 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */ 443 static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval) 444 { 445 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 446 447 /* Avoid Illegal Subcommand error */ 448 if (hwmon->xe->info.platform == XE_DG2) 449 return -ENXIO; 450 451 return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 452 POWER_SETUP_SUBCOMMAND_READ_I1, 0), 453 uval, NULL); 454 } 455 456 static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval) 457 { 458 struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); 459 460 return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP, 461 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0), 462 (uval & POWER_SETUP_I1_DATA_MASK)); 463 } 464 465 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel, 466 long *value, u32 scale_factor) 467 { 468 int ret; 469 u32 uval; 470 471 mutex_lock(&hwmon->hwmon_lock); 472 473 ret = xe_hwmon_pcode_read_i1(hwmon, &uval); 474 if (ret) 475 goto unlock; 476 477 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval), 478 scale_factor, POWER_SETUP_I1_SHIFT); 479 unlock: 480 mutex_unlock(&hwmon->hwmon_lock); 481 return ret; 482 } 483 484 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, 485 long value, u32 scale_factor) 486 { 487 int ret; 488 u32 uval; 489 490 mutex_lock(&hwmon->hwmon_lock); 491 492 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); 493 ret = xe_hwmon_pcode_write_i1(hwmon, uval); 494 495 mutex_unlock(&hwmon->hwmon_lock); 496 return ret; 497 } 498 499 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value) 500 { 501 struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); 502 u64 reg_val; 503 504 reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel)); 505 /* HW register value in units of 2.5 millivolt */ 506 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE); 507 } 508 509 static umode_t 510 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 511 { 512 u32 uval; 513 514 switch (attr) { 515 case hwmon_power_max: 516 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, 517 channel)) ? 0664 : 0; 518 case hwmon_power_rated_max: 519 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, 520 channel)) ? 0444 : 0; 521 case hwmon_power_crit: 522 if (channel == CHANNEL_PKG) 523 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 524 !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644; 525 break; 526 case hwmon_power_label: 527 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 528 channel)) ? 0444 : 0; 529 default: 530 return 0; 531 } 532 return 0; 533 } 534 535 static int 536 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 537 { 538 switch (attr) { 539 case hwmon_power_max: 540 xe_hwmon_power_max_read(hwmon, channel, val); 541 return 0; 542 case hwmon_power_rated_max: 543 xe_hwmon_power_rated_max_read(hwmon, channel, val); 544 return 0; 545 case hwmon_power_crit: 546 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER); 547 default: 548 return -EOPNOTSUPP; 549 } 550 } 551 552 static int 553 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) 554 { 555 switch (attr) { 556 case hwmon_power_max: 557 return xe_hwmon_power_max_write(hwmon, channel, val); 558 case hwmon_power_crit: 559 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER); 560 default: 561 return -EOPNOTSUPP; 562 } 563 } 564 565 static umode_t 566 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel) 567 { 568 u32 uval; 569 570 /* hwmon sysfs attribute of current available only for package */ 571 if (channel != CHANNEL_PKG) 572 return 0; 573 574 switch (attr) { 575 case hwmon_curr_crit: 576 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 577 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644; 578 case hwmon_curr_label: 579 return (xe_hwmon_pcode_read_i1(hwmon, &uval) || 580 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444; 581 break; 582 default: 583 return 0; 584 } 585 return 0; 586 } 587 588 static int 589 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 590 { 591 switch (attr) { 592 case hwmon_curr_crit: 593 return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR); 594 default: 595 return -EOPNOTSUPP; 596 } 597 } 598 599 static int 600 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val) 601 { 602 switch (attr) { 603 case hwmon_curr_crit: 604 return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR); 605 default: 606 return -EOPNOTSUPP; 607 } 608 } 609 610 static umode_t 611 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 612 { 613 switch (attr) { 614 case hwmon_in_input: 615 case hwmon_in_label: 616 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, 617 channel)) ? 0444 : 0; 618 default: 619 return 0; 620 } 621 } 622 623 static int 624 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 625 { 626 switch (attr) { 627 case hwmon_in_input: 628 xe_hwmon_get_voltage(hwmon, channel, val); 629 return 0; 630 default: 631 return -EOPNOTSUPP; 632 } 633 } 634 635 static umode_t 636 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) 637 { 638 switch (attr) { 639 case hwmon_energy_input: 640 case hwmon_energy_label: 641 return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS, 642 channel)) ? 0444 : 0; 643 default: 644 return 0; 645 } 646 } 647 648 static int 649 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) 650 { 651 switch (attr) { 652 case hwmon_energy_input: 653 xe_hwmon_energy_get(hwmon, channel, val); 654 return 0; 655 default: 656 return -EOPNOTSUPP; 657 } 658 } 659 660 static umode_t 661 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, 662 u32 attr, int channel) 663 { 664 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata; 665 int ret; 666 667 xe_pm_runtime_get(hwmon->xe); 668 669 switch (type) { 670 case hwmon_power: 671 ret = xe_hwmon_power_is_visible(hwmon, attr, channel); 672 break; 673 case hwmon_curr: 674 ret = xe_hwmon_curr_is_visible(hwmon, attr, channel); 675 break; 676 case hwmon_in: 677 ret = xe_hwmon_in_is_visible(hwmon, attr, channel); 678 break; 679 case hwmon_energy: 680 ret = xe_hwmon_energy_is_visible(hwmon, attr, channel); 681 break; 682 default: 683 ret = 0; 684 break; 685 } 686 687 xe_pm_runtime_put(hwmon->xe); 688 689 return ret; 690 } 691 692 static int 693 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, 694 int channel, long *val) 695 { 696 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 697 int ret; 698 699 xe_pm_runtime_get(hwmon->xe); 700 701 switch (type) { 702 case hwmon_power: 703 ret = xe_hwmon_power_read(hwmon, attr, channel, val); 704 break; 705 case hwmon_curr: 706 ret = xe_hwmon_curr_read(hwmon, attr, channel, val); 707 break; 708 case hwmon_in: 709 ret = xe_hwmon_in_read(hwmon, attr, channel, val); 710 break; 711 case hwmon_energy: 712 ret = xe_hwmon_energy_read(hwmon, attr, channel, val); 713 break; 714 default: 715 ret = -EOPNOTSUPP; 716 break; 717 } 718 719 xe_pm_runtime_put(hwmon->xe); 720 721 return ret; 722 } 723 724 static int 725 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, 726 int channel, long val) 727 { 728 struct xe_hwmon *hwmon = dev_get_drvdata(dev); 729 int ret; 730 731 xe_pm_runtime_get(hwmon->xe); 732 733 switch (type) { 734 case hwmon_power: 735 ret = xe_hwmon_power_write(hwmon, attr, channel, val); 736 break; 737 case hwmon_curr: 738 ret = xe_hwmon_curr_write(hwmon, attr, channel, val); 739 break; 740 default: 741 ret = -EOPNOTSUPP; 742 break; 743 } 744 745 xe_pm_runtime_put(hwmon->xe); 746 747 return ret; 748 } 749 750 static int xe_hwmon_read_label(struct device *dev, 751 enum hwmon_sensor_types type, 752 u32 attr, int channel, const char **str) 753 { 754 switch (type) { 755 case hwmon_power: 756 case hwmon_energy: 757 case hwmon_curr: 758 case hwmon_in: 759 if (channel == CHANNEL_CARD) 760 *str = "card"; 761 else if (channel == CHANNEL_PKG) 762 *str = "pkg"; 763 return 0; 764 default: 765 return -EOPNOTSUPP; 766 } 767 } 768 769 static const struct hwmon_ops hwmon_ops = { 770 .is_visible = xe_hwmon_is_visible, 771 .read = xe_hwmon_read, 772 .write = xe_hwmon_write, 773 .read_string = xe_hwmon_read_label, 774 }; 775 776 static const struct hwmon_chip_info hwmon_chip_info = { 777 .ops = &hwmon_ops, 778 .info = hwmon_info, 779 }; 780 781 static void 782 xe_hwmon_get_preregistration_info(struct xe_device *xe) 783 { 784 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 785 struct xe_hwmon *hwmon = xe->hwmon; 786 long energy; 787 u64 val_sku_unit = 0; 788 int channel; 789 struct xe_reg pkg_power_sku_unit; 790 791 /* 792 * The contents of register PKG_POWER_SKU_UNIT do not change, 793 * so read it once and store the shift values. 794 */ 795 pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0); 796 if (xe_reg_is_valid(pkg_power_sku_unit)) { 797 val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit); 798 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit); 799 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit); 800 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit); 801 } 802 803 /* 804 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the 805 * first value of the energy register read 806 */ 807 for (channel = 0; channel < CHANNEL_MAX; channel++) 808 if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel)) 809 xe_hwmon_energy_get(hwmon, channel, &energy); 810 } 811 812 static void xe_hwmon_mutex_destroy(void *arg) 813 { 814 struct xe_hwmon *hwmon = arg; 815 816 mutex_destroy(&hwmon->hwmon_lock); 817 } 818 819 void xe_hwmon_register(struct xe_device *xe) 820 { 821 struct device *dev = xe->drm.dev; 822 struct xe_hwmon *hwmon; 823 824 /* hwmon is available only for dGfx */ 825 if (!IS_DGFX(xe)) 826 return; 827 828 /* hwmon is not available on VFs */ 829 if (IS_SRIOV_VF(xe)) 830 return; 831 832 hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); 833 if (!hwmon) 834 return; 835 836 xe->hwmon = hwmon; 837 838 mutex_init(&hwmon->hwmon_lock); 839 if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon)) 840 return; 841 842 /* There's only one instance of hwmon per device */ 843 hwmon->xe = xe; 844 845 xe_hwmon_get_preregistration_info(xe); 846 847 drm_dbg(&xe->drm, "Register xe hwmon interface\n"); 848 849 /* hwmon_dev points to device hwmon<i> */ 850 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon, 851 &hwmon_chip_info, 852 hwmon_groups); 853 854 if (IS_ERR(hwmon->hwmon_dev)) { 855 drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev); 856 xe->hwmon = NULL; 857 return; 858 } 859 } 860 861