xref: /linux/drivers/hwmon/k10temp.c (revision a2a58b5ca124f4a0178d0ada801f1ed2c84c393d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h
4  *		processor hardware monitoring
5  *
6  * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
7  * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net>
8  *
9  * Implementation notes:
10  * - CCD register address information as well as the calculation to
11  *   convert raw register values is from https://github.com/ocerman/zenpower.
12  *   The information is not confirmed from chip datasheets, but experiments
13  *   suggest that it provides reasonable temperature values.
14  */
15 
16 #include <linux/bitops.h>
17 #include <linux/err.h>
18 #include <linux/hwmon.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <asm/amd_nb.h>
24 #include <asm/processor.h>
25 
26 MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
27 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
28 MODULE_LICENSE("GPL");
29 
30 static bool force;
31 module_param(force, bool, 0444);
32 MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
33 
34 /* Provide lock for writing to NB_SMU_IND_ADDR */
35 static DEFINE_MUTEX(nb_smu_ind_mutex);
36 
37 #ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
38 #define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3	0x15b3
39 #endif
40 
41 /* CPUID function 0x80000001, ebx */
42 #define CPUID_PKGTYPE_MASK	GENMASK(31, 28)
43 #define CPUID_PKGTYPE_F		0x00000000
44 #define CPUID_PKGTYPE_AM2R2_AM3	0x10000000
45 
46 /* DRAM controller (PCI function 2) */
47 #define REG_DCT0_CONFIG_HIGH		0x094
48 #define  DDR3_MODE			BIT(8)
49 
50 /* miscellaneous (PCI function 3) */
51 #define REG_HARDWARE_THERMAL_CONTROL	0x64
52 #define  HTC_ENABLE			BIT(0)
53 
54 #define REG_REPORTED_TEMPERATURE	0xa4
55 
56 #define REG_NORTHBRIDGE_CAPABILITIES	0xe8
57 #define  NB_CAP_HTC			BIT(10)
58 
59 /*
60  * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
61  * and REG_REPORTED_TEMPERATURE have been moved to
62  * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
63  * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
64  */
65 #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET	0xd8200c64
66 #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET	0xd8200ca4
67 
68 /* Common for Zen CPU families (Family 17h and 18h and 19h) */
69 #define ZEN_REPORTED_TEMP_CTRL_BASE		0x00059800
70 
71 #define ZEN_CCD_TEMP(offset, x)			(ZEN_REPORTED_TEMP_CTRL_BASE + \
72 						 (offset) + ((x) * 4))
73 #define ZEN_CCD_TEMP_VALID			BIT(11)
74 #define ZEN_CCD_TEMP_MASK			GENMASK(10, 0)
75 
76 #define ZEN_CUR_TEMP_SHIFT			21
77 #define ZEN_CUR_TEMP_RANGE_SEL_MASK		BIT(19)
78 #define ZEN_CUR_TEMP_TJ_SEL_MASK		GENMASK(17, 16)
79 
80 struct k10temp_data {
81 	struct pci_dev *pdev;
82 	void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
83 	void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
84 	int temp_offset;
85 	u32 temp_adjust_mask;
86 	u32 show_temp;
87 	bool is_zen;
88 	u32 ccd_offset;
89 };
90 
91 #define TCTL_BIT	0
92 #define TDIE_BIT	1
93 #define TCCD_BIT(x)	((x) + 2)
94 
95 #define HAVE_TEMP(d, channel)	((d)->show_temp & BIT(channel))
96 #define HAVE_TDIE(d)		HAVE_TEMP(d, TDIE_BIT)
97 
98 struct tctl_offset {
99 	u8 model;
100 	char const *id;
101 	int offset;
102 };
103 
104 static const struct tctl_offset tctl_offset_table[] = {
105 	{ 0x17, "AMD Ryzen 5 1600X", 20000 },
106 	{ 0x17, "AMD Ryzen 7 1700X", 20000 },
107 	{ 0x17, "AMD Ryzen 7 1800X", 20000 },
108 	{ 0x17, "AMD Ryzen 7 2700X", 10000 },
109 	{ 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */
110 	{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
111 };
112 
113 static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
114 {
115 	pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
116 }
117 
118 static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
119 {
120 	pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
121 }
122 
123 static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
124 			      unsigned int base, int offset, u32 *val)
125 {
126 	mutex_lock(&nb_smu_ind_mutex);
127 	pci_bus_write_config_dword(pdev->bus, devfn,
128 				   base, offset);
129 	pci_bus_read_config_dword(pdev->bus, devfn,
130 				  base + 4, val);
131 	mutex_unlock(&nb_smu_ind_mutex);
132 }
133 
134 static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
135 {
136 	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
137 			  F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
138 }
139 
140 static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
141 {
142 	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
143 			  F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
144 }
145 
146 static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
147 {
148 	amd_smn_read(amd_pci_dev_to_node_id(pdev),
149 		     ZEN_REPORTED_TEMP_CTRL_BASE, regval);
150 }
151 
152 static long get_raw_temp(struct k10temp_data *data)
153 {
154 	u32 regval;
155 	long temp;
156 
157 	data->read_tempreg(data->pdev, &regval);
158 	temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
159 	if ((regval & data->temp_adjust_mask) ||
160 	    (regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK)
161 		temp -= 49000;
162 	return temp;
163 }
164 
165 static const char *k10temp_temp_label[] = {
166 	"Tctl",
167 	"Tdie",
168 	"Tccd1",
169 	"Tccd2",
170 	"Tccd3",
171 	"Tccd4",
172 	"Tccd5",
173 	"Tccd6",
174 	"Tccd7",
175 	"Tccd8",
176 	"Tccd9",
177 	"Tccd10",
178 	"Tccd11",
179 	"Tccd12",
180 };
181 
182 static int k10temp_read_labels(struct device *dev,
183 			       enum hwmon_sensor_types type,
184 			       u32 attr, int channel, const char **str)
185 {
186 	switch (type) {
187 	case hwmon_temp:
188 		*str = k10temp_temp_label[channel];
189 		break;
190 	default:
191 		return -EOPNOTSUPP;
192 	}
193 	return 0;
194 }
195 
196 static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
197 			     long *val)
198 {
199 	struct k10temp_data *data = dev_get_drvdata(dev);
200 	u32 regval;
201 
202 	switch (attr) {
203 	case hwmon_temp_input:
204 		switch (channel) {
205 		case 0:		/* Tctl */
206 			*val = get_raw_temp(data);
207 			if (*val < 0)
208 				*val = 0;
209 			break;
210 		case 1:		/* Tdie */
211 			*val = get_raw_temp(data) - data->temp_offset;
212 			if (*val < 0)
213 				*val = 0;
214 			break;
215 		case 2 ... 13:		/* Tccd{1-12} */
216 			amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
217 				     ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
218 						  &regval);
219 			*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
220 			break;
221 		default:
222 			return -EOPNOTSUPP;
223 		}
224 		break;
225 	case hwmon_temp_max:
226 		*val = 70 * 1000;
227 		break;
228 	case hwmon_temp_crit:
229 		data->read_htcreg(data->pdev, &regval);
230 		*val = ((regval >> 16) & 0x7f) * 500 + 52000;
231 		break;
232 	case hwmon_temp_crit_hyst:
233 		data->read_htcreg(data->pdev, &regval);
234 		*val = (((regval >> 16) & 0x7f)
235 			- ((regval >> 24) & 0xf)) * 500 + 52000;
236 		break;
237 	default:
238 		return -EOPNOTSUPP;
239 	}
240 	return 0;
241 }
242 
243 static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
244 			u32 attr, int channel, long *val)
245 {
246 	switch (type) {
247 	case hwmon_temp:
248 		return k10temp_read_temp(dev, attr, channel, val);
249 	default:
250 		return -EOPNOTSUPP;
251 	}
252 }
253 
254 static umode_t k10temp_is_visible(const void *_data,
255 				  enum hwmon_sensor_types type,
256 				  u32 attr, int channel)
257 {
258 	const struct k10temp_data *data = _data;
259 	struct pci_dev *pdev = data->pdev;
260 	u32 reg;
261 
262 	switch (type) {
263 	case hwmon_temp:
264 		switch (attr) {
265 		case hwmon_temp_input:
266 			if (!HAVE_TEMP(data, channel))
267 				return 0;
268 			break;
269 		case hwmon_temp_max:
270 			if (channel || data->is_zen)
271 				return 0;
272 			break;
273 		case hwmon_temp_crit:
274 		case hwmon_temp_crit_hyst:
275 			if (channel || !data->read_htcreg)
276 				return 0;
277 
278 			pci_read_config_dword(pdev,
279 					      REG_NORTHBRIDGE_CAPABILITIES,
280 					      &reg);
281 			if (!(reg & NB_CAP_HTC))
282 				return 0;
283 
284 			data->read_htcreg(data->pdev, &reg);
285 			if (!(reg & HTC_ENABLE))
286 				return 0;
287 			break;
288 		case hwmon_temp_label:
289 			/* Show temperature labels only on Zen CPUs */
290 			if (!data->is_zen || !HAVE_TEMP(data, channel))
291 				return 0;
292 			break;
293 		default:
294 			return 0;
295 		}
296 		break;
297 	default:
298 		return 0;
299 	}
300 	return 0444;
301 }
302 
303 static bool has_erratum_319(struct pci_dev *pdev)
304 {
305 	u32 pkg_type, reg_dram_cfg;
306 
307 	if (boot_cpu_data.x86 != 0x10)
308 		return false;
309 
310 	/*
311 	 * Erratum 319: The thermal sensor of Socket F/AM2+ processors
312 	 *              may be unreliable.
313 	 */
314 	pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
315 	if (pkg_type == CPUID_PKGTYPE_F)
316 		return true;
317 	if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
318 		return false;
319 
320 	/* DDR3 memory implies socket AM3, which is good */
321 	pci_bus_read_config_dword(pdev->bus,
322 				  PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
323 				  REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
324 	if (reg_dram_cfg & DDR3_MODE)
325 		return false;
326 
327 	/*
328 	 * Unfortunately it is possible to run a socket AM3 CPU with DDR2
329 	 * memory. We blacklist all the cores which do exist in socket AM2+
330 	 * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
331 	 * and AM3 formats, but that's the best we can do.
332 	 */
333 	return boot_cpu_data.x86_model < 4 ||
334 	       (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
335 }
336 
337 static const struct hwmon_channel_info * const k10temp_info[] = {
338 	HWMON_CHANNEL_INFO(temp,
339 			   HWMON_T_INPUT | HWMON_T_MAX |
340 			   HWMON_T_CRIT | HWMON_T_CRIT_HYST |
341 			   HWMON_T_LABEL,
342 			   HWMON_T_INPUT | HWMON_T_LABEL,
343 			   HWMON_T_INPUT | HWMON_T_LABEL,
344 			   HWMON_T_INPUT | HWMON_T_LABEL,
345 			   HWMON_T_INPUT | HWMON_T_LABEL,
346 			   HWMON_T_INPUT | HWMON_T_LABEL,
347 			   HWMON_T_INPUT | HWMON_T_LABEL,
348 			   HWMON_T_INPUT | HWMON_T_LABEL,
349 			   HWMON_T_INPUT | HWMON_T_LABEL,
350 			   HWMON_T_INPUT | HWMON_T_LABEL,
351 			   HWMON_T_INPUT | HWMON_T_LABEL,
352 			   HWMON_T_INPUT | HWMON_T_LABEL,
353 			   HWMON_T_INPUT | HWMON_T_LABEL,
354 			   HWMON_T_INPUT | HWMON_T_LABEL),
355 	NULL
356 };
357 
358 static const struct hwmon_ops k10temp_hwmon_ops = {
359 	.is_visible = k10temp_is_visible,
360 	.read = k10temp_read,
361 	.read_string = k10temp_read_labels,
362 };
363 
364 static const struct hwmon_chip_info k10temp_chip_info = {
365 	.ops = &k10temp_hwmon_ops,
366 	.info = k10temp_info,
367 };
368 
369 static void k10temp_get_ccd_support(struct pci_dev *pdev,
370 				    struct k10temp_data *data, int limit)
371 {
372 	u32 regval;
373 	int i;
374 
375 	for (i = 0; i < limit; i++) {
376 		amd_smn_read(amd_pci_dev_to_node_id(pdev),
377 			     ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
378 		if (regval & ZEN_CCD_TEMP_VALID)
379 			data->show_temp |= BIT(TCCD_BIT(i));
380 	}
381 }
382 
383 static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
384 {
385 	int unreliable = has_erratum_319(pdev);
386 	struct device *dev = &pdev->dev;
387 	struct k10temp_data *data;
388 	struct device *hwmon_dev;
389 	int i;
390 
391 	if (unreliable) {
392 		if (!force) {
393 			dev_err(dev,
394 				"unreliable CPU thermal sensor; monitoring disabled\n");
395 			return -ENODEV;
396 		}
397 		dev_warn(dev,
398 			 "unreliable CPU thermal sensor; check erratum 319\n");
399 	}
400 
401 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
402 	if (!data)
403 		return -ENOMEM;
404 
405 	data->pdev = pdev;
406 	data->show_temp |= BIT(TCTL_BIT);	/* Always show Tctl */
407 
408 	if (boot_cpu_data.x86 == 0x15 &&
409 	    ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
410 	     (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
411 		data->read_htcreg = read_htcreg_nb_f15;
412 		data->read_tempreg = read_tempreg_nb_f15;
413 	} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
414 		data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
415 		data->read_tempreg = read_tempreg_nb_zen;
416 		data->is_zen = true;
417 
418 		switch (boot_cpu_data.x86_model) {
419 		case 0x1:	/* Zen */
420 		case 0x8:	/* Zen+ */
421 		case 0x11:	/* Zen APU */
422 		case 0x18:	/* Zen+ APU */
423 			data->ccd_offset = 0x154;
424 			k10temp_get_ccd_support(pdev, data, 4);
425 			break;
426 		case 0x31:	/* Zen2 Threadripper */
427 		case 0x60:	/* Renoir */
428 		case 0x68:	/* Lucienne */
429 		case 0x71:	/* Zen2 */
430 			data->ccd_offset = 0x154;
431 			k10temp_get_ccd_support(pdev, data, 8);
432 			break;
433 		case 0xa0 ... 0xaf:
434 			data->ccd_offset = 0x300;
435 			k10temp_get_ccd_support(pdev, data, 8);
436 			break;
437 		}
438 	} else if (boot_cpu_data.x86 == 0x19) {
439 		data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
440 		data->read_tempreg = read_tempreg_nb_zen;
441 		data->is_zen = true;
442 
443 		switch (boot_cpu_data.x86_model) {
444 		case 0x0 ... 0x1:	/* Zen3 SP3/TR */
445 		case 0x21:		/* Zen3 Ryzen Desktop */
446 		case 0x50 ... 0x5f:	/* Green Sardine */
447 			data->ccd_offset = 0x154;
448 			k10temp_get_ccd_support(pdev, data, 8);
449 			break;
450 		case 0x40 ... 0x4f:	/* Yellow Carp */
451 			data->ccd_offset = 0x300;
452 			k10temp_get_ccd_support(pdev, data, 8);
453 			break;
454 		case 0x60 ... 0x6f:
455 		case 0x70 ... 0x7f:
456 			data->ccd_offset = 0x308;
457 			k10temp_get_ccd_support(pdev, data, 8);
458 			break;
459 		case 0x10 ... 0x1f:
460 		case 0xa0 ... 0xaf:
461 			data->ccd_offset = 0x300;
462 			k10temp_get_ccd_support(pdev, data, 12);
463 			break;
464 		}
465 	} else {
466 		data->read_htcreg = read_htcreg_pci;
467 		data->read_tempreg = read_tempreg_pci;
468 	}
469 
470 	for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
471 		const struct tctl_offset *entry = &tctl_offset_table[i];
472 
473 		if (boot_cpu_data.x86 == entry->model &&
474 		    strstr(boot_cpu_data.x86_model_id, entry->id)) {
475 			data->show_temp |= BIT(TDIE_BIT);	/* show Tdie */
476 			data->temp_offset = entry->offset;
477 			break;
478 		}
479 	}
480 
481 	hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
482 							 &k10temp_chip_info,
483 							 NULL);
484 	return PTR_ERR_OR_ZERO(hwmon_dev);
485 }
486 
487 static const struct pci_device_id k10temp_id_table[] = {
488 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
489 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
490 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
491 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
492 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
493 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
494 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
495 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) },
496 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
497 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
498 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
499 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
500 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
501 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
502 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
503 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
504 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
505 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
506 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
507 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
508 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
509 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
510 	{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
511 	{}
512 };
513 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
514 
515 static struct pci_driver k10temp_driver = {
516 	.name = "k10temp",
517 	.id_table = k10temp_id_table,
518 	.probe = k10temp_probe,
519 };
520 
521 module_pci_driver(k10temp_driver);
522