xref: /linux/drivers/gpu/drm/xe/xe_hwmon.c (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/hwmon-sysfs.h>
7 #include <linux/hwmon.h>
8 #include <linux/types.h>
9 
10 #include <drm/drm_managed.h>
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_mchbar_regs.h"
13 #include "regs/xe_pcode_regs.h"
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hwmon.h"
17 #include "xe_mmio.h"
18 #include "xe_pcode.h"
19 #include "xe_pcode_api.h"
20 #include "xe_sriov.h"
21 #include "xe_pm.h"
22 
23 enum xe_hwmon_reg {
24 	REG_PKG_RAPL_LIMIT,
25 	REG_PKG_POWER_SKU,
26 	REG_PKG_POWER_SKU_UNIT,
27 	REG_GT_PERF_STATUS,
28 	REG_PKG_ENERGY_STATUS,
29 };
30 
31 enum xe_hwmon_reg_operation {
32 	REG_READ32,
33 	REG_RMW32,
34 	REG_READ64,
35 };
36 
37 enum xe_hwmon_channel {
38 	CHANNEL_CARD,
39 	CHANNEL_PKG,
40 	CHANNEL_MAX,
41 };
42 
43 /*
44  * SF_* - scale factors for particular quantities according to hwmon spec.
45  */
46 #define SF_POWER	1000000		/* microwatts */
47 #define SF_CURR		1000		/* milliamperes */
48 #define SF_VOLTAGE	1000		/* millivolts */
49 #define SF_ENERGY	1000000		/* microjoules */
50 #define SF_TIME		1000		/* milliseconds */
51 
52 /**
53  * struct xe_hwmon_energy_info - to accumulate energy
54  */
55 struct xe_hwmon_energy_info {
56 	/** @reg_val_prev: previous energy reg val */
57 	u32 reg_val_prev;
58 	/** @accum_energy: accumulated energy */
59 	long accum_energy;
60 };
61 
62 /**
63  * struct xe_hwmon - xe hwmon data structure
64  */
65 struct xe_hwmon {
66 	/** @hwmon_dev: hwmon device for xe */
67 	struct device *hwmon_dev;
68 	/** @gt: primary gt */
69 	struct xe_gt *gt;
70 	/** @hwmon_lock: lock for rw attributes*/
71 	struct mutex hwmon_lock;
72 	/** @scl_shift_power: pkg power unit */
73 	int scl_shift_power;
74 	/** @scl_shift_energy: pkg energy unit */
75 	int scl_shift_energy;
76 	/** @scl_shift_time: pkg time unit */
77 	int scl_shift_time;
78 	/** @ei: Energy info for energyN_input */
79 	struct xe_hwmon_energy_info ei[CHANNEL_MAX];
80 };
81 
82 static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
83 				      int channel)
84 {
85 	struct xe_device *xe = gt_to_xe(hwmon->gt);
86 
87 	switch (hwmon_reg) {
88 	case REG_PKG_RAPL_LIMIT:
89 		if (xe->info.platform == XE_BATTLEMAGE) {
90 			if (channel == CHANNEL_PKG)
91 				return BMG_PACKAGE_RAPL_LIMIT;
92 			else
93 				return BMG_PLATFORM_POWER_LIMIT;
94 		} else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
95 			return PVC_GT0_PACKAGE_RAPL_LIMIT;
96 		} else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
97 			return PCU_CR_PACKAGE_RAPL_LIMIT;
98 		}
99 		break;
100 	case REG_PKG_POWER_SKU:
101 		if (xe->info.platform == XE_BATTLEMAGE)
102 			return BMG_PACKAGE_POWER_SKU;
103 		else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
104 			return PVC_GT0_PACKAGE_POWER_SKU;
105 		else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
106 			return PCU_CR_PACKAGE_POWER_SKU;
107 		break;
108 	case REG_PKG_POWER_SKU_UNIT:
109 		if (xe->info.platform == XE_BATTLEMAGE)
110 			return BMG_PACKAGE_POWER_SKU_UNIT;
111 		else if (xe->info.platform == XE_PVC)
112 			return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
113 		else if (xe->info.platform == XE_DG2)
114 			return PCU_CR_PACKAGE_POWER_SKU_UNIT;
115 		break;
116 	case REG_GT_PERF_STATUS:
117 		if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
118 			return GT_PERF_STATUS;
119 		break;
120 	case REG_PKG_ENERGY_STATUS:
121 		if (xe->info.platform == XE_BATTLEMAGE) {
122 			if (channel == CHANNEL_PKG)
123 				return BMG_PACKAGE_ENERGY_STATUS;
124 			else
125 				return BMG_PLATFORM_ENERGY_STATUS;
126 		} else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
127 			return PVC_GT0_PLATFORM_ENERGY_STATUS;
128 		} else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
129 			return PCU_CR_PACKAGE_ENERGY_STATUS;
130 		}
131 		break;
132 	default:
133 		drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
134 		break;
135 	}
136 
137 	return XE_REG(0);
138 }
139 
140 #define PL1_DISABLE 0
141 
142 /*
143  * HW allows arbitrary PL1 limits to be set but silently clamps these values to
144  * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
145  * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
146  * clamped values when read.
147  */
148 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
149 {
150 	u64 reg_val, min, max;
151 	struct xe_device *xe = gt_to_xe(hwmon->gt);
152 	struct xe_reg rapl_limit, pkg_power_sku;
153 
154 	rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
155 	pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
156 
157 	/*
158 	 * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
159 	 * So not checking it again here.
160 	 */
161 	if (!xe_reg_is_valid(pkg_power_sku)) {
162 		drm_warn(&xe->drm, "pkg_power_sku invalid\n");
163 		*value = 0;
164 		return;
165 	}
166 
167 	mutex_lock(&hwmon->hwmon_lock);
168 
169 	reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
170 	/* Check if PL1 limit is disabled */
171 	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
172 		*value = PL1_DISABLE;
173 		goto unlock;
174 	}
175 
176 	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
177 	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
178 
179 	reg_val = xe_mmio_read64_2x32(hwmon->gt, pkg_power_sku);
180 	min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
181 	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
182 	max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
183 	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
184 
185 	if (min && max)
186 		*value = clamp_t(u64, *value, min, max);
187 unlock:
188 	mutex_unlock(&hwmon->hwmon_lock);
189 }
190 
191 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
192 {
193 	int ret = 0;
194 	u64 reg_val;
195 	struct xe_reg rapl_limit;
196 
197 	rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
198 
199 	mutex_lock(&hwmon->hwmon_lock);
200 
201 	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
202 	if (value == PL1_DISABLE) {
203 		reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
204 		reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
205 		if (reg_val & PKG_PWR_LIM_1_EN) {
206 			ret = -EOPNOTSUPP;
207 			goto unlock;
208 		}
209 	}
210 
211 	/* Computation in 64-bits to avoid overflow. Round to nearest. */
212 	reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
213 	reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
214 	reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
215 
216 unlock:
217 	mutex_unlock(&hwmon->hwmon_lock);
218 	return ret;
219 }
220 
221 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
222 {
223 	struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
224 	u64 reg_val;
225 
226 	/*
227 	 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
228 	 * for this register can be skipped.
229 	 * See xe_hwmon_power_is_visible.
230 	 */
231 	reg_val = xe_mmio_read32(hwmon->gt, reg);
232 	reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
233 	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
234 }
235 
236 /*
237  * xe_hwmon_energy_get - Obtain energy value
238  *
239  * The underlying energy hardware register is 32-bits and is subject to
240  * overflow. How long before overflow? For example, with an example
241  * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
242  * a power draw of 1000 watts, the 32-bit counter will overflow in
243  * approximately 4.36 minutes.
244  *
245  * Examples:
246  *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
247  * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
248  *
249  * The function significantly increases overflow duration (from 4.36
250  * minutes) by accumulating the energy register into a 'long' as allowed by
251  * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
252  * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
253  * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
254  * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
255  */
256 static void
257 xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
258 {
259 	struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
260 	u64 reg_val;
261 
262 	reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
263 							     channel));
264 
265 	if (reg_val >= ei->reg_val_prev)
266 		ei->accum_energy += reg_val - ei->reg_val_prev;
267 	else
268 		ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
269 
270 	ei->reg_val_prev = reg_val;
271 
272 	*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
273 				  hwmon->scl_shift_energy);
274 }
275 
276 static ssize_t
277 xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
278 				 char *buf)
279 {
280 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
281 	u32 x, y, x_w = 2; /* 2 bits */
282 	u64 r, tau4, out;
283 	int sensor_index = to_sensor_dev_attr(attr)->index;
284 
285 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
286 
287 	mutex_lock(&hwmon->hwmon_lock);
288 
289 	r = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
290 
291 	mutex_unlock(&hwmon->hwmon_lock);
292 
293 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
294 
295 	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
296 	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
297 
298 	/*
299 	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
300 	 *     = (4 | x) << (y - 2)
301 	 *
302 	 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
303 	 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
304 	 *
305 	 * As y can be < 2, we compute tau4 = (4 | x) << y
306 	 * and then add 2 when doing the final right shift to account for units
307 	 */
308 	tau4 = (u64)((1 << x_w) | x) << y;
309 
310 	/* val in hwmon interface units (millisec) */
311 	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
312 
313 	return sysfs_emit(buf, "%llu\n", out);
314 }
315 
316 static ssize_t
317 xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
318 				  const char *buf, size_t count)
319 {
320 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
321 	u32 x, y, rxy, x_w = 2; /* 2 bits */
322 	u64 tau4, r, max_win;
323 	unsigned long val;
324 	int ret;
325 	int sensor_index = to_sensor_dev_attr(attr)->index;
326 
327 	ret = kstrtoul(buf, 0, &val);
328 	if (ret)
329 		return ret;
330 
331 	/*
332 	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
333 	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
334 	 *
335 	 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
336 	 * However, it is observed that existing discrete GPUs does not provide correct
337 	 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
338 	 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
339 	 */
340 #define PKG_MAX_WIN_DEFAULT 0x12ull
341 
342 	/*
343 	 * val must be < max in hwmon interface units. The steps below are
344 	 * explained in xe_hwmon_power_max_interval_show()
345 	 */
346 	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
347 	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
348 	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
349 	tau4 = (u64)((1 << x_w) | x) << y;
350 	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
351 
352 	if (val > max_win)
353 		return -EINVAL;
354 
355 	/* val in hw units */
356 	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
357 
358 	/*
359 	 * Convert val to 1.x * power(2,y)
360 	 * y = ilog2(val)
361 	 * x = (val - (1 << y)) >> (y - 2)
362 	 */
363 	if (!val) {
364 		y = 0;
365 		x = 0;
366 	} else {
367 		y = ilog2(val);
368 		x = (val - (1ul << y)) << x_w >> y;
369 	}
370 
371 	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
372 
373 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
374 
375 	mutex_lock(&hwmon->hwmon_lock);
376 
377 	r = xe_mmio_rmw32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
378 			  PKG_PWR_LIM_1_TIME, rxy);
379 
380 	mutex_unlock(&hwmon->hwmon_lock);
381 
382 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
383 
384 	return count;
385 }
386 
387 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
388 			  xe_hwmon_power_max_interval_show,
389 			  xe_hwmon_power_max_interval_store, CHANNEL_CARD);
390 
391 static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
392 			  xe_hwmon_power_max_interval_show,
393 			  xe_hwmon_power_max_interval_store, CHANNEL_PKG);
394 
395 static struct attribute *hwmon_attributes[] = {
396 	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
397 	&sensor_dev_attr_power2_max_interval.dev_attr.attr,
398 	NULL
399 };
400 
401 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
402 					   struct attribute *attr, int index)
403 {
404 	struct device *dev = kobj_to_dev(kobj);
405 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
406 	int ret = 0;
407 
408 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
409 
410 	ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
411 
412 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
413 
414 	return ret;
415 }
416 
417 static const struct attribute_group hwmon_attrgroup = {
418 	.attrs = hwmon_attributes,
419 	.is_visible = xe_hwmon_attributes_visible,
420 };
421 
422 static const struct attribute_group *hwmon_groups[] = {
423 	&hwmon_attrgroup,
424 	NULL
425 };
426 
427 static const struct hwmon_channel_info * const hwmon_info[] = {
428 	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
429 			   HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
430 	HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
431 	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
432 	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
433 	NULL
434 };
435 
436 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
437 static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
438 {
439 	/* Avoid Illegal Subcommand error */
440 	if (gt_to_xe(gt)->info.platform == XE_DG2)
441 		return -ENXIO;
442 
443 	return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
444 			     POWER_SETUP_SUBCOMMAND_READ_I1, 0),
445 			     uval, NULL);
446 }
447 
448 static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
449 {
450 	return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
451 			      POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
452 			      uval);
453 }
454 
455 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
456 					 long *value, u32 scale_factor)
457 {
458 	int ret;
459 	u32 uval;
460 
461 	mutex_lock(&hwmon->hwmon_lock);
462 
463 	ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
464 	if (ret)
465 		goto unlock;
466 
467 	*value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
468 				 scale_factor, POWER_SETUP_I1_SHIFT);
469 unlock:
470 	mutex_unlock(&hwmon->hwmon_lock);
471 	return ret;
472 }
473 
474 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
475 					  long value, u32 scale_factor)
476 {
477 	int ret;
478 	u32 uval;
479 
480 	mutex_lock(&hwmon->hwmon_lock);
481 
482 	uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
483 	ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
484 
485 	mutex_unlock(&hwmon->hwmon_lock);
486 	return ret;
487 }
488 
489 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
490 {
491 	u64 reg_val;
492 
493 	reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
494 	/* HW register value in units of 2.5 millivolt */
495 	*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
496 }
497 
498 static umode_t
499 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
500 {
501 	u32 uval;
502 
503 	switch (attr) {
504 	case hwmon_power_max:
505 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
506 				       channel)) ? 0664 : 0;
507 	case hwmon_power_rated_max:
508 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
509 				       channel)) ? 0444 : 0;
510 	case hwmon_power_crit:
511 		if (channel == CHANNEL_PKG)
512 			return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
513 				!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
514 		break;
515 	case hwmon_power_label:
516 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
517 				       channel)) ? 0444 : 0;
518 	default:
519 		return 0;
520 	}
521 	return 0;
522 }
523 
524 static int
525 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
526 {
527 	switch (attr) {
528 	case hwmon_power_max:
529 		xe_hwmon_power_max_read(hwmon, channel, val);
530 		return 0;
531 	case hwmon_power_rated_max:
532 		xe_hwmon_power_rated_max_read(hwmon, channel, val);
533 		return 0;
534 	case hwmon_power_crit:
535 		return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
536 	default:
537 		return -EOPNOTSUPP;
538 	}
539 }
540 
541 static int
542 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
543 {
544 	switch (attr) {
545 	case hwmon_power_max:
546 		return xe_hwmon_power_max_write(hwmon, channel, val);
547 	case hwmon_power_crit:
548 		return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
549 	default:
550 		return -EOPNOTSUPP;
551 	}
552 }
553 
554 static umode_t
555 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
556 {
557 	u32 uval;
558 
559 	/* hwmon sysfs attribute of current available only for package */
560 	if (channel != CHANNEL_PKG)
561 		return 0;
562 
563 	switch (attr) {
564 	case hwmon_curr_crit:
565 			return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
566 				(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
567 	case hwmon_curr_label:
568 			return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
569 				(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
570 		break;
571 	default:
572 		return 0;
573 	}
574 	return 0;
575 }
576 
577 static int
578 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
579 {
580 	switch (attr) {
581 	case hwmon_curr_crit:
582 		return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR);
583 	default:
584 		return -EOPNOTSUPP;
585 	}
586 }
587 
588 static int
589 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
590 {
591 	switch (attr) {
592 	case hwmon_curr_crit:
593 		return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR);
594 	default:
595 		return -EOPNOTSUPP;
596 	}
597 }
598 
599 static umode_t
600 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
601 {
602 	switch (attr) {
603 	case hwmon_in_input:
604 	case hwmon_in_label:
605 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS,
606 				       channel)) ? 0444 : 0;
607 	default:
608 		return 0;
609 	}
610 }
611 
612 static int
613 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
614 {
615 	switch (attr) {
616 	case hwmon_in_input:
617 		xe_hwmon_get_voltage(hwmon, channel, val);
618 		return 0;
619 	default:
620 		return -EOPNOTSUPP;
621 	}
622 }
623 
624 static umode_t
625 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
626 {
627 	switch (attr) {
628 	case hwmon_energy_input:
629 	case hwmon_energy_label:
630 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
631 				       channel)) ? 0444 : 0;
632 	default:
633 		return 0;
634 	}
635 }
636 
637 static int
638 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
639 {
640 	switch (attr) {
641 	case hwmon_energy_input:
642 		xe_hwmon_energy_get(hwmon, channel, val);
643 		return 0;
644 	default:
645 		return -EOPNOTSUPP;
646 	}
647 }
648 
649 static umode_t
650 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
651 		    u32 attr, int channel)
652 {
653 	struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
654 	int ret;
655 
656 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
657 
658 	switch (type) {
659 	case hwmon_power:
660 		ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
661 		break;
662 	case hwmon_curr:
663 		ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
664 		break;
665 	case hwmon_in:
666 		ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
667 		break;
668 	case hwmon_energy:
669 		ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
670 		break;
671 	default:
672 		ret = 0;
673 		break;
674 	}
675 
676 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
677 
678 	return ret;
679 }
680 
681 static int
682 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
683 	      int channel, long *val)
684 {
685 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
686 	int ret;
687 
688 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
689 
690 	switch (type) {
691 	case hwmon_power:
692 		ret = xe_hwmon_power_read(hwmon, attr, channel, val);
693 		break;
694 	case hwmon_curr:
695 		ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
696 		break;
697 	case hwmon_in:
698 		ret = xe_hwmon_in_read(hwmon, attr, channel, val);
699 		break;
700 	case hwmon_energy:
701 		ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
702 		break;
703 	default:
704 		ret = -EOPNOTSUPP;
705 		break;
706 	}
707 
708 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
709 
710 	return ret;
711 }
712 
713 static int
714 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
715 	       int channel, long val)
716 {
717 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
718 	int ret;
719 
720 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
721 
722 	switch (type) {
723 	case hwmon_power:
724 		ret = xe_hwmon_power_write(hwmon, attr, channel, val);
725 		break;
726 	case hwmon_curr:
727 		ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
728 		break;
729 	default:
730 		ret = -EOPNOTSUPP;
731 		break;
732 	}
733 
734 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
735 
736 	return ret;
737 }
738 
739 static int xe_hwmon_read_label(struct device *dev,
740 			       enum hwmon_sensor_types type,
741 			       u32 attr, int channel, const char **str)
742 {
743 	switch (type) {
744 	case hwmon_power:
745 	case hwmon_energy:
746 	case hwmon_curr:
747 	case hwmon_in:
748 		if (channel == CHANNEL_CARD)
749 			*str = "card";
750 		else if (channel == CHANNEL_PKG)
751 			*str = "pkg";
752 		return 0;
753 	default:
754 		return -EOPNOTSUPP;
755 	}
756 }
757 
758 static const struct hwmon_ops hwmon_ops = {
759 	.is_visible = xe_hwmon_is_visible,
760 	.read = xe_hwmon_read,
761 	.write = xe_hwmon_write,
762 	.read_string = xe_hwmon_read_label,
763 };
764 
765 static const struct hwmon_chip_info hwmon_chip_info = {
766 	.ops = &hwmon_ops,
767 	.info = hwmon_info,
768 };
769 
770 static void
771 xe_hwmon_get_preregistration_info(struct xe_device *xe)
772 {
773 	struct xe_hwmon *hwmon = xe->hwmon;
774 	long energy;
775 	u64 val_sku_unit = 0;
776 	int channel;
777 	struct xe_reg pkg_power_sku_unit;
778 
779 	/*
780 	 * The contents of register PKG_POWER_SKU_UNIT do not change,
781 	 * so read it once and store the shift values.
782 	 */
783 	pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
784 	if (xe_reg_is_valid(pkg_power_sku_unit)) {
785 		val_sku_unit = xe_mmio_read32(hwmon->gt, pkg_power_sku_unit);
786 		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
787 		hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
788 		hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
789 	}
790 
791 	/*
792 	 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
793 	 * first value of the energy register read
794 	 */
795 	for (channel = 0; channel < CHANNEL_MAX; channel++)
796 		if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
797 			xe_hwmon_energy_get(hwmon, channel, &energy);
798 }
799 
800 static void xe_hwmon_mutex_destroy(void *arg)
801 {
802 	struct xe_hwmon *hwmon = arg;
803 
804 	mutex_destroy(&hwmon->hwmon_lock);
805 }
806 
807 void xe_hwmon_register(struct xe_device *xe)
808 {
809 	struct device *dev = xe->drm.dev;
810 	struct xe_hwmon *hwmon;
811 
812 	/* hwmon is available only for dGfx */
813 	if (!IS_DGFX(xe))
814 		return;
815 
816 	/* hwmon is not available on VFs */
817 	if (IS_SRIOV_VF(xe))
818 		return;
819 
820 	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
821 	if (!hwmon)
822 		return;
823 
824 	xe->hwmon = hwmon;
825 
826 	mutex_init(&hwmon->hwmon_lock);
827 	if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
828 		return;
829 
830 	/* primary GT to access device level properties */
831 	hwmon->gt = xe->tiles[0].primary_gt;
832 
833 	xe_hwmon_get_preregistration_info(xe);
834 
835 	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
836 
837 	/*  hwmon_dev points to device hwmon<i> */
838 	hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
839 								&hwmon_chip_info,
840 								hwmon_groups);
841 
842 	if (IS_ERR(hwmon->hwmon_dev)) {
843 		drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
844 		xe->hwmon = NULL;
845 		return;
846 	}
847 }
848 
849