xref: /linux/drivers/gpu/drm/xe/xe_hwmon.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/hwmon-sysfs.h>
7 #include <linux/hwmon.h>
8 #include <linux/types.h>
9 
10 #include <drm/drm_managed.h>
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_mchbar_regs.h"
13 #include "regs/xe_pcode_regs.h"
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hwmon.h"
17 #include "xe_mmio.h"
18 #include "xe_pcode.h"
19 #include "xe_pcode_api.h"
20 #include "xe_sriov.h"
21 #include "xe_pm.h"
22 
23 enum xe_hwmon_reg {
24 	REG_PKG_RAPL_LIMIT,
25 	REG_PKG_POWER_SKU,
26 	REG_PKG_POWER_SKU_UNIT,
27 	REG_GT_PERF_STATUS,
28 	REG_PKG_ENERGY_STATUS,
29 };
30 
31 enum xe_hwmon_reg_operation {
32 	REG_READ32,
33 	REG_RMW32,
34 	REG_READ64,
35 };
36 
37 enum xe_hwmon_channel {
38 	CHANNEL_CARD,
39 	CHANNEL_PKG,
40 	CHANNEL_MAX,
41 };
42 
43 /*
44  * SF_* - scale factors for particular quantities according to hwmon spec.
45  */
46 #define SF_POWER	1000000		/* microwatts */
47 #define SF_CURR		1000		/* milliamperes */
48 #define SF_VOLTAGE	1000		/* millivolts */
49 #define SF_ENERGY	1000000		/* microjoules */
50 #define SF_TIME		1000		/* milliseconds */
51 
52 /**
53  * struct xe_hwmon_energy_info - to accumulate energy
54  */
55 struct xe_hwmon_energy_info {
56 	/** @reg_val_prev: previous energy reg val */
57 	u32 reg_val_prev;
58 	/** @accum_energy: accumulated energy */
59 	long accum_energy;
60 };
61 
62 /**
63  * struct xe_hwmon - xe hwmon data structure
64  */
65 struct xe_hwmon {
66 	/** @hwmon_dev: hwmon device for xe */
67 	struct device *hwmon_dev;
68 	/** @gt: primary gt */
69 	struct xe_gt *gt;
70 	/** @hwmon_lock: lock for rw attributes*/
71 	struct mutex hwmon_lock;
72 	/** @scl_shift_power: pkg power unit */
73 	int scl_shift_power;
74 	/** @scl_shift_energy: pkg energy unit */
75 	int scl_shift_energy;
76 	/** @scl_shift_time: pkg time unit */
77 	int scl_shift_time;
78 	/** @ei: Energy info for energyN_input */
79 	struct xe_hwmon_energy_info ei[CHANNEL_MAX];
80 };
81 
82 static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
83 				      int channel)
84 {
85 	struct xe_device *xe = gt_to_xe(hwmon->gt);
86 
87 	switch (hwmon_reg) {
88 	case REG_PKG_RAPL_LIMIT:
89 		if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
90 			return PVC_GT0_PACKAGE_RAPL_LIMIT;
91 		else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
92 			return PCU_CR_PACKAGE_RAPL_LIMIT;
93 		break;
94 	case REG_PKG_POWER_SKU:
95 		if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
96 			return PVC_GT0_PACKAGE_POWER_SKU;
97 		else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
98 			return PCU_CR_PACKAGE_POWER_SKU;
99 		break;
100 	case REG_PKG_POWER_SKU_UNIT:
101 		if (xe->info.platform == XE_PVC)
102 			return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
103 		else if (xe->info.platform == XE_DG2)
104 			return PCU_CR_PACKAGE_POWER_SKU_UNIT;
105 		break;
106 	case REG_GT_PERF_STATUS:
107 		if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
108 			return GT_PERF_STATUS;
109 		break;
110 	case REG_PKG_ENERGY_STATUS:
111 		if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
112 			return PVC_GT0_PLATFORM_ENERGY_STATUS;
113 		else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
114 			return PCU_CR_PACKAGE_ENERGY_STATUS;
115 		break;
116 	default:
117 		drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
118 		break;
119 	}
120 
121 	return XE_REG(0);
122 }
123 
124 static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
125 				 enum xe_hwmon_reg_operation operation, u64 *value,
126 				 u32 clr, u32 set, int channel)
127 {
128 	struct xe_reg reg;
129 
130 	reg = xe_hwmon_get_reg(hwmon, hwmon_reg, channel);
131 
132 	if (!xe_reg_is_valid(reg))
133 		return;
134 
135 	switch (operation) {
136 	case REG_READ32:
137 		*value = xe_mmio_read32(hwmon->gt, reg);
138 		break;
139 	case REG_RMW32:
140 		*value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
141 		break;
142 	case REG_READ64:
143 		*value = xe_mmio_read64_2x32(hwmon->gt, reg);
144 		break;
145 	default:
146 		drm_warn(&gt_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
147 			 operation);
148 		break;
149 	}
150 }
151 
152 #define PL1_DISABLE 0
153 
154 /*
155  * HW allows arbitrary PL1 limits to be set but silently clamps these values to
156  * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
157  * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
158  * clamped values when read.
159  */
160 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
161 {
162 	u64 reg_val, min, max;
163 
164 	mutex_lock(&hwmon->hwmon_lock);
165 
166 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0, channel);
167 	/* Check if PL1 limit is disabled */
168 	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
169 		*value = PL1_DISABLE;
170 		goto unlock;
171 	}
172 
173 	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
174 	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
175 
176 	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0, channel);
177 	min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
178 	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
179 	max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
180 	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
181 
182 	if (min && max)
183 		*value = clamp_t(u64, *value, min, max);
184 unlock:
185 	mutex_unlock(&hwmon->hwmon_lock);
186 }
187 
188 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
189 {
190 	int ret = 0;
191 	u64 reg_val;
192 
193 	mutex_lock(&hwmon->hwmon_lock);
194 
195 	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
196 	if (value == PL1_DISABLE) {
197 		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
198 				     PKG_PWR_LIM_1_EN, 0, channel);
199 		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val,
200 				     PKG_PWR_LIM_1_EN, 0, channel);
201 
202 		if (reg_val & PKG_PWR_LIM_1_EN) {
203 			ret = -EOPNOTSUPP;
204 			goto unlock;
205 		}
206 	}
207 
208 	/* Computation in 64-bits to avoid overflow. Round to nearest. */
209 	reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
210 	reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
211 
212 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
213 			     PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val, channel);
214 unlock:
215 	mutex_unlock(&hwmon->hwmon_lock);
216 	return ret;
217 }
218 
219 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
220 {
221 	u64 reg_val;
222 
223 	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0, channel);
224 	reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
225 	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
226 }
227 
228 /*
229  * xe_hwmon_energy_get - Obtain energy value
230  *
231  * The underlying energy hardware register is 32-bits and is subject to
232  * overflow. How long before overflow? For example, with an example
233  * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
234  * a power draw of 1000 watts, the 32-bit counter will overflow in
235  * approximately 4.36 minutes.
236  *
237  * Examples:
238  *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
239  * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
240  *
241  * The function significantly increases overflow duration (from 4.36
242  * minutes) by accumulating the energy register into a 'long' as allowed by
243  * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
244  * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
245  * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
246  * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
247  */
248 static void
249 xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
250 {
251 	struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
252 	u64 reg_val;
253 
254 	xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
255 			     &reg_val, 0, 0, channel);
256 
257 	if (reg_val >= ei->reg_val_prev)
258 		ei->accum_energy += reg_val - ei->reg_val_prev;
259 	else
260 		ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
261 
262 	ei->reg_val_prev = reg_val;
263 
264 	*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
265 				  hwmon->scl_shift_energy);
266 }
267 
268 static ssize_t
269 xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
270 				 char *buf)
271 {
272 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
273 	u32 x, y, x_w = 2; /* 2 bits */
274 	u64 r, tau4, out;
275 	int sensor_index = to_sensor_dev_attr(attr)->index;
276 
277 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
278 
279 	mutex_lock(&hwmon->hwmon_lock);
280 
281 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
282 			     REG_READ32, &r, 0, 0, sensor_index);
283 
284 	mutex_unlock(&hwmon->hwmon_lock);
285 
286 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
287 
288 	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
289 	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
290 
291 	/*
292 	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
293 	 *     = (4 | x) << (y - 2)
294 	 *
295 	 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
296 	 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
297 	 *
298 	 * As y can be < 2, we compute tau4 = (4 | x) << y
299 	 * and then add 2 when doing the final right shift to account for units
300 	 */
301 	tau4 = (u64)((1 << x_w) | x) << y;
302 
303 	/* val in hwmon interface units (millisec) */
304 	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
305 
306 	return sysfs_emit(buf, "%llu\n", out);
307 }
308 
309 static ssize_t
310 xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
311 				  const char *buf, size_t count)
312 {
313 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
314 	u32 x, y, rxy, x_w = 2; /* 2 bits */
315 	u64 tau4, r, max_win;
316 	unsigned long val;
317 	int ret;
318 	int sensor_index = to_sensor_dev_attr(attr)->index;
319 
320 	ret = kstrtoul(buf, 0, &val);
321 	if (ret)
322 		return ret;
323 
324 	/*
325 	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
326 	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
327 	 *
328 	 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
329 	 * However, it is observed that existing discrete GPUs does not provide correct
330 	 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
331 	 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
332 	 */
333 #define PKG_MAX_WIN_DEFAULT 0x12ull
334 
335 	/*
336 	 * val must be < max in hwmon interface units. The steps below are
337 	 * explained in xe_hwmon_power_max_interval_show()
338 	 */
339 	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
340 	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
341 	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
342 	tau4 = (u64)((1 << x_w) | x) << y;
343 	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
344 
345 	if (val > max_win)
346 		return -EINVAL;
347 
348 	/* val in hw units */
349 	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
350 
351 	/*
352 	 * Convert val to 1.x * power(2,y)
353 	 * y = ilog2(val)
354 	 * x = (val - (1 << y)) >> (y - 2)
355 	 */
356 	if (!val) {
357 		y = 0;
358 		x = 0;
359 	} else {
360 		y = ilog2(val);
361 		x = (val - (1ul << y)) << x_w >> y;
362 	}
363 
364 	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
365 
366 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
367 
368 	mutex_lock(&hwmon->hwmon_lock);
369 
370 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
371 			     PKG_PWR_LIM_1_TIME, rxy, sensor_index);
372 
373 	mutex_unlock(&hwmon->hwmon_lock);
374 
375 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
376 
377 	return count;
378 }
379 
380 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
381 			  xe_hwmon_power_max_interval_show,
382 			  xe_hwmon_power_max_interval_store, CHANNEL_CARD);
383 
384 static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
385 			  xe_hwmon_power_max_interval_show,
386 			  xe_hwmon_power_max_interval_store, CHANNEL_PKG);
387 
388 static struct attribute *hwmon_attributes[] = {
389 	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
390 	&sensor_dev_attr_power2_max_interval.dev_attr.attr,
391 	NULL
392 };
393 
394 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
395 					   struct attribute *attr, int index)
396 {
397 	struct device *dev = kobj_to_dev(kobj);
398 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
399 	int ret = 0;
400 
401 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
402 
403 	ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
404 
405 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
406 
407 	return ret;
408 }
409 
410 static const struct attribute_group hwmon_attrgroup = {
411 	.attrs = hwmon_attributes,
412 	.is_visible = xe_hwmon_attributes_visible,
413 };
414 
415 static const struct attribute_group *hwmon_groups[] = {
416 	&hwmon_attrgroup,
417 	NULL
418 };
419 
420 static const struct hwmon_channel_info * const hwmon_info[] = {
421 	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
422 			   HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
423 	HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
424 	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
425 	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
426 	NULL
427 };
428 
429 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
430 static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
431 {
432 	/* Avoid Illegal Subcommand error */
433 	if (gt_to_xe(gt)->info.platform == XE_DG2)
434 		return -ENXIO;
435 
436 	return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
437 			     POWER_SETUP_SUBCOMMAND_READ_I1, 0),
438 			     uval, NULL);
439 }
440 
441 static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
442 {
443 	return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
444 			      POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
445 			      uval);
446 }
447 
448 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
449 					 long *value, u32 scale_factor)
450 {
451 	int ret;
452 	u32 uval;
453 
454 	mutex_lock(&hwmon->hwmon_lock);
455 
456 	ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
457 	if (ret)
458 		goto unlock;
459 
460 	*value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
461 				 scale_factor, POWER_SETUP_I1_SHIFT);
462 unlock:
463 	mutex_unlock(&hwmon->hwmon_lock);
464 	return ret;
465 }
466 
467 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
468 					  long value, u32 scale_factor)
469 {
470 	int ret;
471 	u32 uval;
472 
473 	mutex_lock(&hwmon->hwmon_lock);
474 
475 	uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
476 	ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
477 
478 	mutex_unlock(&hwmon->hwmon_lock);
479 	return ret;
480 }
481 
482 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
483 {
484 	u64 reg_val;
485 
486 	xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
487 			     REG_READ32, &reg_val, 0, 0, channel);
488 	/* HW register value in units of 2.5 millivolt */
489 	*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
490 }
491 
492 static umode_t
493 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
494 {
495 	u32 uval;
496 
497 	switch (attr) {
498 	case hwmon_power_max:
499 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
500 				       channel)) ? 0664 : 0;
501 	case hwmon_power_rated_max:
502 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
503 				       channel)) ? 0444 : 0;
504 	case hwmon_power_crit:
505 		if (channel == CHANNEL_PKG)
506 			return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
507 				!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
508 		break;
509 	case hwmon_power_label:
510 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
511 				       channel)) ? 0444 : 0;
512 	default:
513 		return 0;
514 	}
515 	return 0;
516 }
517 
518 static int
519 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
520 {
521 	switch (attr) {
522 	case hwmon_power_max:
523 		xe_hwmon_power_max_read(hwmon, channel, val);
524 		return 0;
525 	case hwmon_power_rated_max:
526 		xe_hwmon_power_rated_max_read(hwmon, channel, val);
527 		return 0;
528 	case hwmon_power_crit:
529 		return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
530 	default:
531 		return -EOPNOTSUPP;
532 	}
533 }
534 
535 static int
536 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
537 {
538 	switch (attr) {
539 	case hwmon_power_max:
540 		return xe_hwmon_power_max_write(hwmon, channel, val);
541 	case hwmon_power_crit:
542 		return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
543 	default:
544 		return -EOPNOTSUPP;
545 	}
546 }
547 
548 static umode_t
549 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
550 {
551 	u32 uval;
552 
553 	switch (attr) {
554 	case hwmon_curr_crit:
555 	case hwmon_curr_label:
556 		if (channel == CHANNEL_PKG)
557 			return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
558 				(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
559 		break;
560 	default:
561 		return 0;
562 	}
563 	return 0;
564 }
565 
566 static int
567 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
568 {
569 	switch (attr) {
570 	case hwmon_curr_crit:
571 		return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_CURR);
572 	default:
573 		return -EOPNOTSUPP;
574 	}
575 }
576 
577 static int
578 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
579 {
580 	switch (attr) {
581 	case hwmon_curr_crit:
582 		return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_CURR);
583 	default:
584 		return -EOPNOTSUPP;
585 	}
586 }
587 
588 static umode_t
589 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
590 {
591 	switch (attr) {
592 	case hwmon_in_input:
593 	case hwmon_in_label:
594 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS,
595 				       channel)) ? 0444 : 0;
596 	default:
597 		return 0;
598 	}
599 }
600 
601 static int
602 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
603 {
604 	switch (attr) {
605 	case hwmon_in_input:
606 		xe_hwmon_get_voltage(hwmon, channel, val);
607 		return 0;
608 	default:
609 		return -EOPNOTSUPP;
610 	}
611 }
612 
613 static umode_t
614 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
615 {
616 	switch (attr) {
617 	case hwmon_energy_input:
618 	case hwmon_energy_label:
619 		return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
620 				       channel)) ? 0444 : 0;
621 	default:
622 		return 0;
623 	}
624 }
625 
626 static int
627 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
628 {
629 	switch (attr) {
630 	case hwmon_energy_input:
631 		xe_hwmon_energy_get(hwmon, channel, val);
632 		return 0;
633 	default:
634 		return -EOPNOTSUPP;
635 	}
636 }
637 
638 static umode_t
639 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
640 		    u32 attr, int channel)
641 {
642 	struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
643 	int ret;
644 
645 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
646 
647 	switch (type) {
648 	case hwmon_power:
649 		ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
650 		break;
651 	case hwmon_curr:
652 		ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
653 		break;
654 	case hwmon_in:
655 		ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
656 		break;
657 	case hwmon_energy:
658 		ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
659 		break;
660 	default:
661 		ret = 0;
662 		break;
663 	}
664 
665 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
666 
667 	return ret;
668 }
669 
670 static int
671 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
672 	      int channel, long *val)
673 {
674 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
675 	int ret;
676 
677 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
678 
679 	switch (type) {
680 	case hwmon_power:
681 		ret = xe_hwmon_power_read(hwmon, attr, channel, val);
682 		break;
683 	case hwmon_curr:
684 		ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
685 		break;
686 	case hwmon_in:
687 		ret = xe_hwmon_in_read(hwmon, attr, channel, val);
688 		break;
689 	case hwmon_energy:
690 		ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
691 		break;
692 	default:
693 		ret = -EOPNOTSUPP;
694 		break;
695 	}
696 
697 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
698 
699 	return ret;
700 }
701 
702 static int
703 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
704 	       int channel, long val)
705 {
706 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
707 	int ret;
708 
709 	xe_pm_runtime_get(gt_to_xe(hwmon->gt));
710 
711 	switch (type) {
712 	case hwmon_power:
713 		ret = xe_hwmon_power_write(hwmon, attr, channel, val);
714 		break;
715 	case hwmon_curr:
716 		ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
717 		break;
718 	default:
719 		ret = -EOPNOTSUPP;
720 		break;
721 	}
722 
723 	xe_pm_runtime_put(gt_to_xe(hwmon->gt));
724 
725 	return ret;
726 }
727 
728 static int xe_hwmon_read_label(struct device *dev,
729 			       enum hwmon_sensor_types type,
730 			       u32 attr, int channel, const char **str)
731 {
732 	switch (type) {
733 	case hwmon_power:
734 	case hwmon_energy:
735 	case hwmon_curr:
736 	case hwmon_in:
737 		if (channel == CHANNEL_CARD)
738 			*str = "card";
739 		else if (channel == CHANNEL_PKG)
740 			*str = "pkg";
741 		return 0;
742 	default:
743 		return -EOPNOTSUPP;
744 	}
745 }
746 
747 static const struct hwmon_ops hwmon_ops = {
748 	.is_visible = xe_hwmon_is_visible,
749 	.read = xe_hwmon_read,
750 	.write = xe_hwmon_write,
751 	.read_string = xe_hwmon_read_label,
752 };
753 
754 static const struct hwmon_chip_info hwmon_chip_info = {
755 	.ops = &hwmon_ops,
756 	.info = hwmon_info,
757 };
758 
759 static void
760 xe_hwmon_get_preregistration_info(struct xe_device *xe)
761 {
762 	struct xe_hwmon *hwmon = xe->hwmon;
763 	long energy;
764 	u64 val_sku_unit = 0;
765 	int channel;
766 
767 	/*
768 	 * The contents of register PKG_POWER_SKU_UNIT do not change,
769 	 * so read it once and store the shift values.
770 	 */
771 	if (xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0))) {
772 		xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
773 				     REG_READ32, &val_sku_unit, 0, 0, 0);
774 		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
775 		hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
776 		hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
777 	}
778 
779 	/*
780 	 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
781 	 * first value of the energy register read
782 	 */
783 	for (channel = 0; channel < CHANNEL_MAX; channel++)
784 		if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
785 			xe_hwmon_energy_get(hwmon, channel, &energy);
786 }
787 
788 static void xe_hwmon_mutex_destroy(void *arg)
789 {
790 	struct xe_hwmon *hwmon = arg;
791 
792 	mutex_destroy(&hwmon->hwmon_lock);
793 }
794 
795 void xe_hwmon_register(struct xe_device *xe)
796 {
797 	struct device *dev = xe->drm.dev;
798 	struct xe_hwmon *hwmon;
799 
800 	/* hwmon is available only for dGfx */
801 	if (!IS_DGFX(xe))
802 		return;
803 
804 	/* hwmon is not available on VFs */
805 	if (IS_SRIOV_VF(xe))
806 		return;
807 
808 	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
809 	if (!hwmon)
810 		return;
811 
812 	xe->hwmon = hwmon;
813 
814 	mutex_init(&hwmon->hwmon_lock);
815 	if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
816 		return;
817 
818 	/* primary GT to access device level properties */
819 	hwmon->gt = xe->tiles[0].primary_gt;
820 
821 	xe_hwmon_get_preregistration_info(xe);
822 
823 	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
824 
825 	/*  hwmon_dev points to device hwmon<i> */
826 	hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
827 								&hwmon_chip_info,
828 								hwmon_groups);
829 
830 	if (IS_ERR(hwmon->hwmon_dev)) {
831 		drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
832 		xe->hwmon = NULL;
833 		return;
834 	}
835 }
836 
837