xref: /linux/drivers/gpu/drm/xe/xe_hwmon.c (revision b4db9f840283caca0d904436f187ef56a9126eaa)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/hwmon-sysfs.h>
7 #include <linux/hwmon.h>
8 #include <linux/types.h>
9 
10 #include <drm/drm_managed.h>
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_mchbar_regs.h"
13 #include "regs/xe_pcode_regs.h"
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hwmon.h"
17 #include "xe_mmio.h"
18 #include "xe_pcode.h"
19 #include "xe_pcode_api.h"
20 #include "xe_sriov.h"
21 
22 enum xe_hwmon_reg {
23 	REG_PKG_RAPL_LIMIT,
24 	REG_PKG_POWER_SKU,
25 	REG_PKG_POWER_SKU_UNIT,
26 	REG_GT_PERF_STATUS,
27 	REG_PKG_ENERGY_STATUS,
28 };
29 
30 enum xe_hwmon_reg_operation {
31 	REG_READ32,
32 	REG_RMW32,
33 	REG_READ64,
34 };
35 
36 /*
37  * SF_* - scale factors for particular quantities according to hwmon spec.
38  */
39 #define SF_POWER	1000000		/* microwatts */
40 #define SF_CURR		1000		/* milliamperes */
41 #define SF_VOLTAGE	1000		/* millivolts */
42 #define SF_ENERGY	1000000		/* microjoules */
43 #define SF_TIME		1000		/* milliseconds */
44 
45 /**
46  * struct xe_hwmon_energy_info - to accumulate energy
47  */
48 struct xe_hwmon_energy_info {
49 	/** @reg_val_prev: previous energy reg val */
50 	u32 reg_val_prev;
51 	/** @accum_energy: accumulated energy */
52 	long accum_energy;
53 };
54 
55 /**
56  * struct xe_hwmon - xe hwmon data structure
57  */
58 struct xe_hwmon {
59 	/** @hwmon_dev: hwmon device for xe */
60 	struct device *hwmon_dev;
61 	/** @gt: primary gt */
62 	struct xe_gt *gt;
63 	/** @hwmon_lock: lock for rw attributes*/
64 	struct mutex hwmon_lock;
65 	/** @scl_shift_power: pkg power unit */
66 	int scl_shift_power;
67 	/** @scl_shift_energy: pkg energy unit */
68 	int scl_shift_energy;
69 	/** @scl_shift_time: pkg time unit */
70 	int scl_shift_time;
71 	/** @ei: Energy info for energy1_input */
72 	struct xe_hwmon_energy_info ei;
73 };
74 
75 static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
76 {
77 	struct xe_device *xe = gt_to_xe(hwmon->gt);
78 	struct xe_reg reg = XE_REG(0);
79 
80 	switch (hwmon_reg) {
81 	case REG_PKG_RAPL_LIMIT:
82 		if (xe->info.platform == XE_PVC)
83 			reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
84 		else if (xe->info.platform == XE_DG2)
85 			reg = PCU_CR_PACKAGE_RAPL_LIMIT;
86 		break;
87 	case REG_PKG_POWER_SKU:
88 		if (xe->info.platform == XE_PVC)
89 			reg = PVC_GT0_PACKAGE_POWER_SKU;
90 		else if (xe->info.platform == XE_DG2)
91 			reg = PCU_CR_PACKAGE_POWER_SKU;
92 		break;
93 	case REG_PKG_POWER_SKU_UNIT:
94 		if (xe->info.platform == XE_PVC)
95 			reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
96 		else if (xe->info.platform == XE_DG2)
97 			reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
98 		break;
99 	case REG_GT_PERF_STATUS:
100 		if (xe->info.platform == XE_DG2)
101 			reg = GT_PERF_STATUS;
102 		break;
103 	case REG_PKG_ENERGY_STATUS:
104 		if (xe->info.platform == XE_PVC)
105 			reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
106 		else if (xe->info.platform == XE_DG2)
107 			reg = PCU_CR_PACKAGE_ENERGY_STATUS;
108 		break;
109 	default:
110 		drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
111 		break;
112 	}
113 
114 	return reg.raw;
115 }
116 
117 static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
118 				 enum xe_hwmon_reg_operation operation, u64 *value,
119 				 u32 clr, u32 set)
120 {
121 	struct xe_reg reg;
122 
123 	reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
124 
125 	if (!reg.raw)
126 		return;
127 
128 	switch (operation) {
129 	case REG_READ32:
130 		*value = xe_mmio_read32(hwmon->gt, reg);
131 		break;
132 	case REG_RMW32:
133 		*value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
134 		break;
135 	case REG_READ64:
136 		*value = xe_mmio_read64_2x32(hwmon->gt, reg);
137 		break;
138 	default:
139 		drm_warn(&gt_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
140 			 operation);
141 		break;
142 	}
143 }
144 
145 #define PL1_DISABLE 0
146 
147 /*
148  * HW allows arbitrary PL1 limits to be set but silently clamps these values to
149  * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
150  * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
151  * clamped values when read.
152  */
153 static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
154 {
155 	u64 reg_val, min, max;
156 
157 	mutex_lock(&hwmon->hwmon_lock);
158 
159 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0);
160 	/* Check if PL1 limit is disabled */
161 	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
162 		*value = PL1_DISABLE;
163 		goto unlock;
164 	}
165 
166 	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
167 	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
168 
169 	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0);
170 	min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
171 	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
172 	max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
173 	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
174 
175 	if (min && max)
176 		*value = clamp_t(u64, *value, min, max);
177 unlock:
178 	mutex_unlock(&hwmon->hwmon_lock);
179 }
180 
181 static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
182 {
183 	int ret = 0;
184 	u64 reg_val;
185 
186 	mutex_lock(&hwmon->hwmon_lock);
187 
188 	/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
189 	if (value == PL1_DISABLE) {
190 		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
191 				     PKG_PWR_LIM_1_EN, 0);
192 		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val,
193 				     PKG_PWR_LIM_1_EN, 0);
194 
195 		if (reg_val & PKG_PWR_LIM_1_EN) {
196 			ret = -EOPNOTSUPP;
197 			goto unlock;
198 		}
199 	}
200 
201 	/* Computation in 64-bits to avoid overflow. Round to nearest. */
202 	reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
203 	reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
204 
205 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
206 			     PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
207 unlock:
208 	mutex_unlock(&hwmon->hwmon_lock);
209 	return ret;
210 }
211 
212 static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
213 {
214 	u64 reg_val;
215 
216 	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0);
217 	reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
218 	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
219 }
220 
221 /*
222  * xe_hwmon_energy_get - Obtain energy value
223  *
224  * The underlying energy hardware register is 32-bits and is subject to
225  * overflow. How long before overflow? For example, with an example
226  * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
227  * a power draw of 1000 watts, the 32-bit counter will overflow in
228  * approximately 4.36 minutes.
229  *
230  * Examples:
231  *    1 watt:  (2^32 >> 14) /    1 W / (60 * 60 * 24) secs/day -> 3 days
232  * 1000 watts: (2^32 >> 14) / 1000 W / 60             secs/min -> 4.36 minutes
233  *
234  * The function significantly increases overflow duration (from 4.36
235  * minutes) by accumulating the energy register into a 'long' as allowed by
236  * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
237  * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
238  * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
239  * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
240  */
241 static void
242 xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
243 {
244 	struct xe_hwmon_energy_info *ei = &hwmon->ei;
245 	u64 reg_val;
246 
247 	xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
248 			     &reg_val, 0, 0);
249 
250 	if (reg_val >= ei->reg_val_prev)
251 		ei->accum_energy += reg_val - ei->reg_val_prev;
252 	else
253 		ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
254 
255 	ei->reg_val_prev = reg_val;
256 
257 	*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
258 				  hwmon->scl_shift_energy);
259 }
260 
261 static ssize_t
262 xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
263 				  char *buf)
264 {
265 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
266 	u32 x, y, x_w = 2; /* 2 bits */
267 	u64 r, tau4, out;
268 
269 	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
270 
271 	mutex_lock(&hwmon->hwmon_lock);
272 
273 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
274 			     REG_READ32, &r, 0, 0);
275 
276 	mutex_unlock(&hwmon->hwmon_lock);
277 
278 	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
279 
280 	x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
281 	y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
282 
283 	/*
284 	 * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
285 	 *     = (4 | x) << (y - 2)
286 	 *
287 	 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
288 	 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
289 	 *
290 	 * As y can be < 2, we compute tau4 = (4 | x) << y
291 	 * and then add 2 when doing the final right shift to account for units
292 	 */
293 	tau4 = ((1 << x_w) | x) << y;
294 
295 	/* val in hwmon interface units (millisec) */
296 	out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
297 
298 	return sysfs_emit(buf, "%llu\n", out);
299 }
300 
301 static ssize_t
302 xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
303 				   const char *buf, size_t count)
304 {
305 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
306 	u32 x, y, rxy, x_w = 2; /* 2 bits */
307 	u64 tau4, r, max_win;
308 	unsigned long val;
309 	int ret;
310 
311 	ret = kstrtoul(buf, 0, &val);
312 	if (ret)
313 		return ret;
314 
315 	/*
316 	 * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
317 	 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
318 	 *
319 	 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
320 	 * However, it is observed that existing discrete GPUs does not provide correct
321 	 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
322 	 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
323 	 */
324 #define PKG_MAX_WIN_DEFAULT 0x12ull
325 
326 	/*
327 	 * val must be < max in hwmon interface units. The steps below are
328 	 * explained in xe_hwmon_power1_max_interval_show()
329 	 */
330 	r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
331 	x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
332 	y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
333 	tau4 = ((1 << x_w) | x) << y;
334 	max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
335 
336 	if (val > max_win)
337 		return -EINVAL;
338 
339 	/* val in hw units */
340 	val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
341 
342 	/*
343 	 * Convert val to 1.x * power(2,y)
344 	 * y = ilog2(val)
345 	 * x = (val - (1 << y)) >> (y - 2)
346 	 */
347 	if (!val) {
348 		y = 0;
349 		x = 0;
350 	} else {
351 		y = ilog2(val);
352 		x = (val - (1ul << y)) << x_w >> y;
353 	}
354 
355 	rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
356 
357 	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
358 
359 	mutex_lock(&hwmon->hwmon_lock);
360 
361 	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
362 			     PKG_PWR_LIM_1_TIME, rxy);
363 
364 	mutex_unlock(&hwmon->hwmon_lock);
365 
366 	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
367 
368 	return count;
369 }
370 
371 static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
372 			  xe_hwmon_power1_max_interval_show,
373 			  xe_hwmon_power1_max_interval_store, 0);
374 
375 static struct attribute *hwmon_attributes[] = {
376 	&sensor_dev_attr_power1_max_interval.dev_attr.attr,
377 	NULL
378 };
379 
380 static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
381 					   struct attribute *attr, int index)
382 {
383 	struct device *dev = kobj_to_dev(kobj);
384 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
385 	int ret = 0;
386 
387 	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
388 
389 	if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
390 		ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
391 
392 	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
393 
394 	return ret;
395 }
396 
397 static const struct attribute_group hwmon_attrgroup = {
398 	.attrs = hwmon_attributes,
399 	.is_visible = xe_hwmon_attributes_visible,
400 };
401 
402 static const struct attribute_group *hwmon_groups[] = {
403 	&hwmon_attrgroup,
404 	NULL
405 };
406 
407 static const struct hwmon_channel_info * const hwmon_info[] = {
408 	HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
409 	HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
410 	HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
411 	HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
412 	NULL
413 };
414 
415 /* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
416 static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
417 {
418 	/* Avoid Illegal Subcommand error */
419 	if (gt_to_xe(gt)->info.platform == XE_DG2)
420 		return -ENXIO;
421 
422 	return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
423 			     POWER_SETUP_SUBCOMMAND_READ_I1, 0),
424 			     uval, NULL);
425 }
426 
427 static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
428 {
429 	return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
430 			      POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
431 			      uval);
432 }
433 
434 static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
435 {
436 	int ret;
437 	u32 uval;
438 
439 	mutex_lock(&hwmon->hwmon_lock);
440 
441 	ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
442 	if (ret)
443 		goto unlock;
444 
445 	*value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
446 				 scale_factor, POWER_SETUP_I1_SHIFT);
447 unlock:
448 	mutex_unlock(&hwmon->hwmon_lock);
449 	return ret;
450 }
451 
452 static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
453 {
454 	int ret;
455 	u32 uval;
456 
457 	mutex_lock(&hwmon->hwmon_lock);
458 
459 	uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
460 	ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
461 
462 	mutex_unlock(&hwmon->hwmon_lock);
463 	return ret;
464 }
465 
466 static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
467 {
468 	u64 reg_val;
469 
470 	xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
471 			     REG_READ32, &reg_val, 0, 0);
472 	/* HW register value in units of 2.5 millivolt */
473 	*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
474 }
475 
476 static umode_t
477 xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
478 {
479 	u32 uval;
480 
481 	switch (attr) {
482 	case hwmon_power_max:
483 		return xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? 0664 : 0;
484 	case hwmon_power_rated_max:
485 		return xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU) ? 0444 : 0;
486 	case hwmon_power_crit:
487 		return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
488 			!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
489 	default:
490 		return 0;
491 	}
492 }
493 
494 static int
495 xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
496 {
497 	switch (attr) {
498 	case hwmon_power_max:
499 		xe_hwmon_power_max_read(hwmon, val);
500 		return 0;
501 	case hwmon_power_rated_max:
502 		xe_hwmon_power_rated_max_read(hwmon, val);
503 		return 0;
504 	case hwmon_power_crit:
505 		return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
506 	default:
507 		return -EOPNOTSUPP;
508 	}
509 }
510 
511 static int
512 xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
513 {
514 	switch (attr) {
515 	case hwmon_power_max:
516 		return xe_hwmon_power_max_write(hwmon, val);
517 	case hwmon_power_crit:
518 		return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
519 	default:
520 		return -EOPNOTSUPP;
521 	}
522 }
523 
524 static umode_t
525 xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
526 {
527 	u32 uval;
528 
529 	switch (attr) {
530 	case hwmon_curr_crit:
531 		return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
532 			(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
533 	default:
534 		return 0;
535 	}
536 }
537 
538 static int
539 xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
540 {
541 	switch (attr) {
542 	case hwmon_curr_crit:
543 		return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
544 	default:
545 		return -EOPNOTSUPP;
546 	}
547 }
548 
549 static int
550 xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
551 {
552 	switch (attr) {
553 	case hwmon_curr_crit:
554 		return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
555 	default:
556 		return -EOPNOTSUPP;
557 	}
558 }
559 
560 static umode_t
561 xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr)
562 {
563 	switch (attr) {
564 	case hwmon_in_input:
565 		return xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS) ? 0444 : 0;
566 	default:
567 		return 0;
568 	}
569 }
570 
571 static int
572 xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
573 {
574 	switch (attr) {
575 	case hwmon_in_input:
576 		xe_hwmon_get_voltage(hwmon, val);
577 		return 0;
578 	default:
579 		return -EOPNOTSUPP;
580 	}
581 }
582 
583 static umode_t
584 xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr)
585 {
586 	switch (attr) {
587 	case hwmon_energy_input:
588 		return xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS) ? 0444 : 0;
589 	default:
590 		return 0;
591 	}
592 }
593 
594 static int
595 xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, long *val)
596 {
597 	switch (attr) {
598 	case hwmon_energy_input:
599 		xe_hwmon_energy_get(hwmon, val);
600 		return 0;
601 	default:
602 		return -EOPNOTSUPP;
603 	}
604 }
605 
606 static umode_t
607 xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
608 		    u32 attr, int channel)
609 {
610 	struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
611 	int ret;
612 
613 	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
614 
615 	switch (type) {
616 	case hwmon_power:
617 		ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
618 		break;
619 	case hwmon_curr:
620 		ret = xe_hwmon_curr_is_visible(hwmon, attr);
621 		break;
622 	case hwmon_in:
623 		ret = xe_hwmon_in_is_visible(hwmon, attr);
624 		break;
625 	case hwmon_energy:
626 		ret = xe_hwmon_energy_is_visible(hwmon, attr);
627 		break;
628 	default:
629 		ret = 0;
630 		break;
631 	}
632 
633 	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
634 
635 	return ret;
636 }
637 
638 static int
639 xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
640 	      int channel, long *val)
641 {
642 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
643 	int ret;
644 
645 	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
646 
647 	switch (type) {
648 	case hwmon_power:
649 		ret = xe_hwmon_power_read(hwmon, attr, channel, val);
650 		break;
651 	case hwmon_curr:
652 		ret = xe_hwmon_curr_read(hwmon, attr, val);
653 		break;
654 	case hwmon_in:
655 		ret = xe_hwmon_in_read(hwmon, attr, val);
656 		break;
657 	case hwmon_energy:
658 		ret = xe_hwmon_energy_read(hwmon, attr, val);
659 		break;
660 	default:
661 		ret = -EOPNOTSUPP;
662 		break;
663 	}
664 
665 	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
666 
667 	return ret;
668 }
669 
670 static int
671 xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
672 	       int channel, long val)
673 {
674 	struct xe_hwmon *hwmon = dev_get_drvdata(dev);
675 	int ret;
676 
677 	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
678 
679 	switch (type) {
680 	case hwmon_power:
681 		ret = xe_hwmon_power_write(hwmon, attr, channel, val);
682 		break;
683 	case hwmon_curr:
684 		ret = xe_hwmon_curr_write(hwmon, attr, val);
685 		break;
686 	default:
687 		ret = -EOPNOTSUPP;
688 		break;
689 	}
690 
691 	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
692 
693 	return ret;
694 }
695 
696 static const struct hwmon_ops hwmon_ops = {
697 	.is_visible = xe_hwmon_is_visible,
698 	.read = xe_hwmon_read,
699 	.write = xe_hwmon_write,
700 };
701 
702 static const struct hwmon_chip_info hwmon_chip_info = {
703 	.ops = &hwmon_ops,
704 	.info = hwmon_info,
705 };
706 
707 static void
708 xe_hwmon_get_preregistration_info(struct xe_device *xe)
709 {
710 	struct xe_hwmon *hwmon = xe->hwmon;
711 	long energy;
712 	u64 val_sku_unit = 0;
713 
714 	/*
715 	 * The contents of register PKG_POWER_SKU_UNIT do not change,
716 	 * so read it once and store the shift values.
717 	 */
718 	if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
719 		xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
720 				     REG_READ32, &val_sku_unit, 0, 0);
721 		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
722 		hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
723 		hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
724 	}
725 
726 	/*
727 	 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
728 	 * first value of the energy register read
729 	 */
730 	if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, 0))
731 		xe_hwmon_energy_get(hwmon, &energy);
732 }
733 
734 static void xe_hwmon_mutex_destroy(void *arg)
735 {
736 	struct xe_hwmon *hwmon = arg;
737 
738 	mutex_destroy(&hwmon->hwmon_lock);
739 }
740 
741 void xe_hwmon_register(struct xe_device *xe)
742 {
743 	struct device *dev = xe->drm.dev;
744 	struct xe_hwmon *hwmon;
745 
746 	/* hwmon is available only for dGfx */
747 	if (!IS_DGFX(xe))
748 		return;
749 
750 	/* hwmon is not available on VFs */
751 	if (IS_SRIOV_VF(xe))
752 		return;
753 
754 	hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
755 	if (!hwmon)
756 		return;
757 
758 	xe->hwmon = hwmon;
759 
760 	mutex_init(&hwmon->hwmon_lock);
761 	if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
762 		return;
763 
764 	/* primary GT to access device level properties */
765 	hwmon->gt = xe->tiles[0].primary_gt;
766 
767 	xe_hwmon_get_preregistration_info(xe);
768 
769 	drm_dbg(&xe->drm, "Register xe hwmon interface\n");
770 
771 	/*  hwmon_dev points to device hwmon<i> */
772 	hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
773 								&hwmon_chip_info,
774 								hwmon_groups);
775 
776 	if (IS_ERR(hwmon->hwmon_dev)) {
777 		drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
778 		xe->hwmon = NULL;
779 		return;
780 	}
781 }
782 
783