xref: /linux/drivers/hwmon/coretemp.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * coretemp.c - Linux kernel module for hardware monitoring
4  *
5  * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
6  *
7  * Inspired from many hwmon drivers
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/jiffies.h>
16 #include <linux/hwmon.h>
17 #include <linux/sysfs.h>
18 #include <linux/hwmon-sysfs.h>
19 #include <linux/err.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpu.h>
24 #include <linux/smp.h>
25 #include <linux/moduleparam.h>
26 #include <linux/pci.h>
27 #include <asm/msr.h>
28 #include <asm/processor.h>
29 #include <asm/cpu_device_id.h>
30 #include <linux/sched/isolation.h>
31 
32 #define DRVNAME	"coretemp"
33 
34 /*
35  * force_tjmax only matters when TjMax can't be read from the CPU itself.
36  * When set, it replaces the driver's suboptimal heuristic.
37  */
38 static int force_tjmax;
39 module_param_named(tjmax, force_tjmax, int, 0444);
40 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
41 
42 #define NUM_REAL_CORES		512	/* Number of Real cores per cpu */
43 #define CORETEMP_NAME_LENGTH	28	/* String Length of attrs */
44 
45 enum coretemp_attr_index {
46 	ATTR_LABEL,
47 	ATTR_CRIT_ALARM,
48 	ATTR_TEMP,
49 	ATTR_TJMAX,
50 	ATTR_TTARGET,
51 	MAX_CORE_ATTRS = ATTR_TJMAX + 1,	/* Maximum no of basic attrs */
52 	TOTAL_ATTRS = ATTR_TTARGET + 1		/* Maximum no of possible attrs */
53 };
54 
55 #ifdef CONFIG_SMP
56 #define for_each_sibling(i, cpu) \
57 	for_each_cpu(i, topology_sibling_cpumask(cpu))
58 #else
59 #define for_each_sibling(i, cpu)	for (i = 0; false; )
60 #endif
61 
62 /*
63  * Per-Core Temperature Data
64  * @tjmax: The static tjmax value when tjmax cannot be retrieved from
65  *		IA32_TEMPERATURE_TARGET MSR.
66  * @last_updated: The time when the current temperature value was updated
67  *		earlier (in jiffies).
68  * @cpu_core_id: The CPU Core from which temperature values should be read
69  *		This value is passed as "id" field to rdmsr/wrmsr functions.
70  * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
71  *		from where the temperature values should be read.
72  * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
73  */
74 struct temp_data {
75 	int temp;
76 	int tjmax;
77 	unsigned long last_updated;
78 	unsigned int cpu;
79 	int index;
80 	u32 cpu_core_id;
81 	u32 status_reg;
82 	int attr_size;
83 	struct device_attribute sd_attrs[TOTAL_ATTRS];
84 	char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
85 	struct attribute *attrs[TOTAL_ATTRS + 1];
86 	struct attribute_group attr_group;
87 	struct mutex update_lock;
88 };
89 
90 /* Platform Data per Physical CPU */
91 struct platform_data {
92 	struct device		*hwmon_dev;
93 	u16			pkg_id;
94 	int			nr_cores;
95 	struct ida		ida;
96 	struct cpumask		cpumask;
97 	struct temp_data	*pkg_data;
98 	struct temp_data	**core_data;
99 	struct device_attribute name_attr;
100 };
101 
102 struct tjmax_pci {
103 	unsigned int device;
104 	int tjmax;
105 };
106 
107 static const struct tjmax_pci tjmax_pci_table[] = {
108 	{ 0x0708, 110000 },	/* CE41x0 (Sodaville ) */
109 	{ 0x0c72, 102000 },	/* Atom S1240 (Centerton) */
110 	{ 0x0c73, 95000 },	/* Atom S1220 (Centerton) */
111 	{ 0x0c75, 95000 },	/* Atom S1260 (Centerton) */
112 };
113 
114 struct tjmax {
115 	char const *id;
116 	int tjmax;
117 };
118 
119 static const struct tjmax tjmax_table[] = {
120 	{ "CPU  230", 100000 },		/* Model 0x1c, stepping 2	*/
121 	{ "CPU  330", 125000 },		/* Model 0x1c, stepping 2	*/
122 };
123 
124 struct tjmax_model {
125 	u32 vfm;
126 	u8 stepping_mask;
127 	int tjmax;
128 };
129 
130 #define ANY 0xff
131 
132 static const struct tjmax_model tjmax_model_table[] = {
133 	{ INTEL_ATOM_BONNELL,	      10,  100000 },	/* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
134 	{ INTEL_ATOM_BONNELL,	      ANY, 90000 },	/* Z5xx, N2xx, possibly others
135 							 * Note: Also matches 230 and 330,
136 							 * which are covered by tjmax_table
137 							 */
138 	{ INTEL_ATOM_BONNELL_MID,     ANY, 90000 },	/* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
139 							 * Note: TjMax for E6xxT is 110C, but CPU type
140 							 * is undetectable by software
141 							 */
142 	{ INTEL_ATOM_SALTWELL_MID,    ANY, 90000 },	/* Atom Medfield (Z2460) */
143 	{ INTEL_ATOM_SALTWELL_TABLET, ANY, 90000 },	/* Atom Clover Trail/Cloverview (Z27x0) */
144 	{ INTEL_ATOM_SALTWELL,	      ANY, 100000 },	/* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
145 							 * Also matches S12x0 (stepping 9), covered by
146 							 * PCI table
147 							 */
148 	{ INTEL_ATOM_SILVERMONT,      9, 110000 },	/* Atom Bay Trail E38xx (embedded) */
149 	{ INTEL_ATOM_SILVERMONT,      ANY, 90000 },	/* Atom Bay Trail Z37xx (tablet) */
150 	{ INTEL_ATOM_SILVERMONT_MID,  ANY, 90000 },	/* Atom Merrifield (Z34xx) */
151 	{ INTEL_ATOM_SILVERMONT_MID2, ANY, 90000 },	/* Atom Moorefield (Z35xx) */
152 	{ INTEL_ATOM_AIRMONT,	      ANY, 90000 },	/* Atom Cherry Trail (Z8xxx) */
153 	{ INTEL_ATOM_GOLDMONT,	      ANY, 105000 },	/* Atom Apollo Lake (J3xxx, N3xxx, E39xx) */
154 	{ INTEL_ATOM_GOLDMONT_PLUS,   ANY, 105000 },	/* Atom Gemini Lake (J4xxx, N4xxx, N5xxx) */
155 	{ INTEL_ATOM_TREMONT,	      ANY, 105000 },	/* Atom Elkhart Lake */
156 	{ INTEL_ATOM_TREMONT_L,	      ANY, 105000 },	/* Atom Jasper Lake */
157 };
158 
159 static bool is_pkg_temp_data(struct temp_data *tdata)
160 {
161 	return tdata->index < 0;
162 }
163 
164 static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
165 {
166 	/* The 100C is default for both mobile and non mobile CPUs */
167 
168 	int tjmax = 100000;
169 	int tjmax_ee = 85000;
170 	int usemsr_ee = 1;
171 	int err;
172 	u32 eax, edx;
173 	int i;
174 	u16 devfn = PCI_DEVFN(0, 0);
175 	struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
176 
177 	/*
178 	 * Explicit tjmax table entries override heuristics.
179 	 * First try PCI host bridge IDs, followed by model ID strings
180 	 * and model/stepping information.
181 	 */
182 	if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
183 		for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
184 			if (host_bridge->device == tjmax_pci_table[i].device) {
185 				pci_dev_put(host_bridge);
186 				return tjmax_pci_table[i].tjmax;
187 			}
188 		}
189 	}
190 	pci_dev_put(host_bridge);
191 
192 	/*
193 	 * This is literally looking for "CPU  XXX" in the model string.
194 	 * Not checking it against the model as well. Just purely a
195 	 * string search.
196 	 */
197 	for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
198 		if (strstr(c->x86_model_id, tjmax_table[i].id))
199 			return tjmax_table[i].tjmax;
200 	}
201 
202 	for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
203 		const struct tjmax_model *tm = &tjmax_model_table[i];
204 		if (c->x86_vfm == tm->vfm &&
205 		    (tm->stepping_mask == ANY ||
206 		     tm->stepping_mask == c->x86_stepping))
207 			return tm->tjmax;
208 	}
209 
210 	/* Early chips have no MSR for TjMax */
211 
212 	if (c->x86_vfm == INTEL_CORE2_MEROM && c->x86_stepping < 4)
213 		usemsr_ee = 0;
214 
215 	if (c->x86_vfm > INTEL_CORE_YONAH && usemsr_ee) {
216 		u8 platform_id;
217 
218 		/*
219 		 * Now we can detect the mobile CPU using Intel provided table
220 		 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
221 		 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
222 		 */
223 		err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
224 		if (err) {
225 			dev_warn(dev,
226 				 "Unable to access MSR 0x17, assuming desktop"
227 				 " CPU\n");
228 			usemsr_ee = 0;
229 		} else if (c->x86_vfm < INTEL_CORE2_PENRYN &&
230 			   !(eax & 0x10000000)) {
231 			/*
232 			 * Trust bit 28 up to Penryn, I could not find any
233 			 * documentation on that; if you happen to know
234 			 * someone at Intel please ask
235 			 */
236 			usemsr_ee = 0;
237 		} else {
238 			/* Platform ID bits 52:50 (EDX starts at bit 32) */
239 			platform_id = (edx >> 18) & 0x7;
240 
241 			/*
242 			 * Mobile Penryn CPU seems to be platform ID 7 or 5
243 			 * (guesswork)
244 			 */
245 			if (c->x86_vfm == INTEL_CORE2_PENRYN &&
246 			    (platform_id == 5 || platform_id == 7)) {
247 				/*
248 				 * If MSR EE bit is set, set it to 90 degrees C,
249 				 * otherwise 105 degrees C
250 				 */
251 				tjmax_ee = 90000;
252 				tjmax = 105000;
253 			}
254 		}
255 	}
256 
257 	if (usemsr_ee) {
258 		err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
259 		if (err) {
260 			dev_warn(dev,
261 				 "Unable to access MSR 0xEE, for Tjmax, left"
262 				 " at default\n");
263 		} else if (eax & 0x40000000) {
264 			tjmax = tjmax_ee;
265 		}
266 	} else if (tjmax == 100000) {
267 		/*
268 		 * If we don't use msr EE it means we are desktop CPU
269 		 * (with exeception of Atom)
270 		 */
271 		dev_warn(dev, "Using relative temperature scale!\n");
272 	}
273 
274 	return tjmax;
275 }
276 
277 static int get_tjmax(struct temp_data *tdata, struct device *dev)
278 {
279 	struct cpuinfo_x86 *c = &cpu_data(tdata->cpu);
280 	int err;
281 	u32 eax, edx;
282 	u32 val;
283 
284 	/* use static tjmax once it is set */
285 	if (tdata->tjmax)
286 		return tdata->tjmax;
287 
288 	/*
289 	 * A new feature of current Intel(R) processors, the
290 	 * IA32_TEMPERATURE_TARGET contains the TjMax value
291 	 */
292 	err = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
293 	if (err) {
294 		dev_warn_once(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
295 	} else {
296 		val = (eax >> 16) & 0xff;
297 		if (val)
298 			return val * 1000;
299 	}
300 
301 	if (force_tjmax) {
302 		dev_notice(dev, "TjMax forced to %d degrees C by user\n",
303 			   force_tjmax);
304 		tdata->tjmax = force_tjmax * 1000;
305 	} else {
306 		/*
307 		 * An assumption is made for early CPUs and unreadable MSR.
308 		 * NOTE: the calculated value may not be correct.
309 		 */
310 		tdata->tjmax = adjust_tjmax(c, tdata->cpu, dev);
311 	}
312 	return tdata->tjmax;
313 }
314 
315 static int get_ttarget(struct temp_data *tdata, struct device *dev)
316 {
317 	u32 eax, edx;
318 	int tjmax, ttarget_offset, ret;
319 
320 	/*
321 	 * ttarget is valid only if tjmax can be retrieved from
322 	 * MSR_IA32_TEMPERATURE_TARGET
323 	 */
324 	if (tdata->tjmax)
325 		return -ENODEV;
326 
327 	ret = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
328 	if (ret)
329 		return ret;
330 
331 	tjmax = (eax >> 16) & 0xff;
332 
333 	/* Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. */
334 	ttarget_offset = (eax >> 8) & 0xff;
335 
336 	return (tjmax - ttarget_offset) * 1000;
337 }
338 
339 /* Keep track of how many zone pointers we allocated in init() */
340 static int max_zones __read_mostly;
341 /* Array of zone pointers. Serialized by cpu hotplug lock */
342 static struct platform_device **zone_devices;
343 
344 static ssize_t show_label(struct device *dev,
345 				struct device_attribute *devattr, char *buf)
346 {
347 	struct platform_data *pdata = dev_get_drvdata(dev);
348 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_LABEL]);
349 
350 	if (is_pkg_temp_data(tdata))
351 		return sprintf(buf, "Package id %u\n", pdata->pkg_id);
352 
353 	return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
354 }
355 
356 static ssize_t show_crit_alarm(struct device *dev,
357 				struct device_attribute *devattr, char *buf)
358 {
359 	u32 eax, edx;
360 	struct temp_data *tdata = container_of(devattr, struct temp_data,
361 						sd_attrs[ATTR_CRIT_ALARM]);
362 
363 	mutex_lock(&tdata->update_lock);
364 	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
365 	mutex_unlock(&tdata->update_lock);
366 
367 	return sprintf(buf, "%d\n", (eax >> 5) & 1);
368 }
369 
370 static ssize_t show_tjmax(struct device *dev,
371 			struct device_attribute *devattr, char *buf)
372 {
373 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TJMAX]);
374 	int tjmax;
375 
376 	mutex_lock(&tdata->update_lock);
377 	tjmax = get_tjmax(tdata, dev);
378 	mutex_unlock(&tdata->update_lock);
379 
380 	return sprintf(buf, "%d\n", tjmax);
381 }
382 
383 static ssize_t show_ttarget(struct device *dev,
384 				struct device_attribute *devattr, char *buf)
385 {
386 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TTARGET]);
387 	int ttarget;
388 
389 	mutex_lock(&tdata->update_lock);
390 	ttarget = get_ttarget(tdata, dev);
391 	mutex_unlock(&tdata->update_lock);
392 
393 	if (ttarget < 0)
394 		return ttarget;
395 	return sprintf(buf, "%d\n", ttarget);
396 }
397 
398 static ssize_t show_temp(struct device *dev,
399 			struct device_attribute *devattr, char *buf)
400 {
401 	u32 eax, edx;
402 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TEMP]);
403 	int tjmax;
404 
405 	mutex_lock(&tdata->update_lock);
406 
407 	tjmax = get_tjmax(tdata, dev);
408 	/* Check whether the time interval has elapsed */
409 	if (time_after(jiffies, tdata->last_updated + HZ)) {
410 		rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
411 		/*
412 		 * Ignore the valid bit. In all observed cases the register
413 		 * value is either low or zero if the valid bit is 0.
414 		 * Return it instead of reporting an error which doesn't
415 		 * really help at all.
416 		 */
417 		tdata->temp = tjmax - ((eax >> 16) & 0xff) * 1000;
418 		tdata->last_updated = jiffies;
419 	}
420 
421 	mutex_unlock(&tdata->update_lock);
422 	return sprintf(buf, "%d\n", tdata->temp);
423 }
424 
425 static int create_core_attrs(struct temp_data *tdata, struct device *dev)
426 {
427 	int i;
428 	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
429 			struct device_attribute *devattr, char *buf) = {
430 			show_label, show_crit_alarm, show_temp, show_tjmax,
431 			show_ttarget };
432 	static const char *const suffixes[TOTAL_ATTRS] = {
433 		"label", "crit_alarm", "input", "crit", "max"
434 	};
435 
436 	for (i = 0; i < tdata->attr_size; i++) {
437 		/*
438 		 * We map the attr number to core id of the CPU
439 		 * The attr number is always core id + 2
440 		 * The Pkgtemp will always show up as temp1_*, if available
441 		 */
442 		int attr_no = is_pkg_temp_data(tdata) ? 1 : tdata->cpu_core_id + 2;
443 
444 		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
445 			 "temp%d_%s", attr_no, suffixes[i]);
446 		sysfs_attr_init(&tdata->sd_attrs[i].attr);
447 		tdata->sd_attrs[i].attr.name = tdata->attr_name[i];
448 		tdata->sd_attrs[i].attr.mode = 0444;
449 		tdata->sd_attrs[i].show = rd_ptr[i];
450 		tdata->attrs[i] = &tdata->sd_attrs[i].attr;
451 	}
452 	tdata->attr_group.attrs = tdata->attrs;
453 	return sysfs_create_group(&dev->kobj, &tdata->attr_group);
454 }
455 
456 
457 static int chk_ucode_version(unsigned int cpu)
458 {
459 	struct cpuinfo_x86 *c = &cpu_data(cpu);
460 
461 	/*
462 	 * Check if we have problem with errata AE18 of Core processors:
463 	 * Readings might stop update when processor visited too deep sleep,
464 	 * fixed for stepping D0 (6EC).
465 	 */
466 	if (c->x86_vfm == INTEL_CORE_YONAH && c->x86_stepping < 0xc && c->microcode < 0x39) {
467 		pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
468 		return -ENODEV;
469 	}
470 	return 0;
471 }
472 
473 static struct platform_device *coretemp_get_pdev(unsigned int cpu)
474 {
475 	int id = topology_logical_die_id(cpu);
476 
477 	if (id >= 0 && id < max_zones)
478 		return zone_devices[id];
479 	return NULL;
480 }
481 
482 static struct temp_data *
483 init_temp_data(struct platform_data *pdata, unsigned int cpu, int pkg_flag)
484 {
485 	struct temp_data *tdata;
486 
487 	if (!pdata->core_data) {
488 		/*
489 		 * TODO:
490 		 * The information of actual possible cores in a package is broken for now.
491 		 * Will replace hardcoded NUM_REAL_CORES with actual per package core count
492 		 * when this information becomes available.
493 		 */
494 		pdata->nr_cores = NUM_REAL_CORES;
495 		pdata->core_data = kcalloc(pdata->nr_cores, sizeof(struct temp_data *),
496 					   GFP_KERNEL);
497 		if (!pdata->core_data)
498 			return NULL;
499 	}
500 
501 	tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
502 	if (!tdata)
503 		return NULL;
504 
505 	if (pkg_flag) {
506 		pdata->pkg_data = tdata;
507 		/* Use tdata->index as indicator of package temp data */
508 		tdata->index = -1;
509 	} else {
510 		tdata->index = ida_alloc_max(&pdata->ida, pdata->nr_cores - 1, GFP_KERNEL);
511 		if (tdata->index < 0) {
512 			kfree(tdata);
513 			return NULL;
514 		}
515 		pdata->core_data[tdata->index] = tdata;
516 	}
517 
518 	tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
519 							MSR_IA32_THERM_STATUS;
520 	tdata->cpu = cpu;
521 	tdata->cpu_core_id = topology_core_id(cpu);
522 	tdata->attr_size = MAX_CORE_ATTRS;
523 	mutex_init(&tdata->update_lock);
524 	return tdata;
525 }
526 
527 static void destroy_temp_data(struct platform_data *pdata, struct temp_data *tdata)
528 {
529 	if (is_pkg_temp_data(tdata)) {
530 		pdata->pkg_data = NULL;
531 		kfree(pdata->core_data);
532 		pdata->core_data = NULL;
533 		pdata->nr_cores = 0;
534 	} else {
535 		pdata->core_data[tdata->index] = NULL;
536 		ida_free(&pdata->ida, tdata->index);
537 	}
538 	kfree(tdata);
539 }
540 
541 static struct temp_data *get_temp_data(struct platform_data *pdata, int cpu)
542 {
543 	int i;
544 
545 	/* cpu < 0 means get pkg temp_data */
546 	if (cpu < 0)
547 		return pdata->pkg_data;
548 
549 	for (i = 0; i < pdata->nr_cores; i++) {
550 		if (pdata->core_data[i] &&
551 		    pdata->core_data[i]->cpu_core_id == topology_core_id(cpu))
552 			return pdata->core_data[i];
553 	}
554 	return NULL;
555 }
556 
557 static int create_core_data(struct platform_device *pdev, unsigned int cpu,
558 			    int pkg_flag)
559 {
560 	struct temp_data *tdata;
561 	struct platform_data *pdata = platform_get_drvdata(pdev);
562 	struct cpuinfo_x86 *c = &cpu_data(cpu);
563 	u32 eax, edx;
564 	int err;
565 
566 	if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
567 		return 0;
568 
569 	tdata = init_temp_data(pdata, cpu, pkg_flag);
570 	if (!tdata)
571 		return -ENOMEM;
572 
573 	/* Test if we can access the status register */
574 	err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
575 	if (err)
576 		goto err;
577 
578 	/* Make sure tdata->tjmax is a valid indicator for dynamic/static tjmax */
579 	get_tjmax(tdata, &pdev->dev);
580 
581 	/*
582 	 * The target temperature is available on older CPUs but not in the
583 	 * MSR_IA32_TEMPERATURE_TARGET register. Atoms don't have the register
584 	 * at all.
585 	 */
586 	if (c->x86_vfm > INTEL_CORE_YONAH && c->x86_vfm != INTEL_ATOM_BONNELL)
587 		if (get_ttarget(tdata, &pdev->dev) >= 0)
588 			tdata->attr_size++;
589 
590 	/* Create sysfs interfaces */
591 	err = create_core_attrs(tdata, pdata->hwmon_dev);
592 	if (err)
593 		goto err;
594 
595 	return 0;
596 
597 err:
598 	destroy_temp_data(pdata, tdata);
599 	return err;
600 }
601 
602 static void
603 coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
604 {
605 	if (create_core_data(pdev, cpu, pkg_flag))
606 		dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
607 }
608 
609 static void coretemp_remove_core(struct platform_data *pdata, struct temp_data *tdata)
610 {
611 	/* if we errored on add then this is already gone */
612 	if (!tdata)
613 		return;
614 
615 	/* Remove the sysfs attributes */
616 	sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
617 
618 	destroy_temp_data(pdata, tdata);
619 }
620 
621 static int coretemp_device_add(int zoneid)
622 {
623 	struct platform_device *pdev;
624 	struct platform_data *pdata;
625 	int err;
626 
627 	/* Initialize the per-zone data structures */
628 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
629 	if (!pdata)
630 		return -ENOMEM;
631 
632 	pdata->pkg_id = zoneid;
633 	ida_init(&pdata->ida);
634 
635 	pdev = platform_device_alloc(DRVNAME, zoneid);
636 	if (!pdev) {
637 		err = -ENOMEM;
638 		goto err_free_pdata;
639 	}
640 
641 	err = platform_device_add(pdev);
642 	if (err)
643 		goto err_put_dev;
644 
645 	platform_set_drvdata(pdev, pdata);
646 	zone_devices[zoneid] = pdev;
647 	return 0;
648 
649 err_put_dev:
650 	platform_device_put(pdev);
651 err_free_pdata:
652 	kfree(pdata);
653 	return err;
654 }
655 
656 static void coretemp_device_remove(int zoneid)
657 {
658 	struct platform_device *pdev = zone_devices[zoneid];
659 	struct platform_data *pdata = platform_get_drvdata(pdev);
660 
661 	ida_destroy(&pdata->ida);
662 	kfree(pdata);
663 	platform_device_unregister(pdev);
664 }
665 
666 static int coretemp_cpu_online(unsigned int cpu)
667 {
668 	struct platform_device *pdev = coretemp_get_pdev(cpu);
669 	struct cpuinfo_x86 *c = &cpu_data(cpu);
670 	struct platform_data *pdata;
671 
672 	/*
673 	 * Don't execute this on resume as the offline callback did
674 	 * not get executed on suspend.
675 	 */
676 	if (cpuhp_tasks_frozen)
677 		return 0;
678 
679 	/*
680 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
681 	 * sensors. We check this bit only, all the early CPUs
682 	 * without thermal sensors will be filtered out.
683 	 */
684 	if (!cpu_has(c, X86_FEATURE_DTHERM))
685 		return -ENODEV;
686 
687 	pdata = platform_get_drvdata(pdev);
688 	if (!pdata->hwmon_dev) {
689 		struct device *hwmon;
690 
691 		/* Check the microcode version of the CPU */
692 		if (chk_ucode_version(cpu))
693 			return -EINVAL;
694 
695 		/*
696 		 * Alright, we have DTS support.
697 		 * We are bringing the _first_ core in this pkg
698 		 * online. So, initialize per-pkg data structures and
699 		 * then bring this core online.
700 		 */
701 		hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME,
702 							  pdata, NULL);
703 		if (IS_ERR(hwmon))
704 			return PTR_ERR(hwmon);
705 		pdata->hwmon_dev = hwmon;
706 
707 		/*
708 		 * Check whether pkgtemp support is available.
709 		 * If so, add interfaces for pkgtemp.
710 		 */
711 		if (cpu_has(c, X86_FEATURE_PTS))
712 			coretemp_add_core(pdev, cpu, 1);
713 	}
714 
715 	/*
716 	 * Check whether a thread sibling is already online. If not add the
717 	 * interface for this CPU core.
718 	 */
719 	if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
720 		coretemp_add_core(pdev, cpu, 0);
721 
722 	cpumask_set_cpu(cpu, &pdata->cpumask);
723 	return 0;
724 }
725 
726 static int coretemp_cpu_offline(unsigned int cpu)
727 {
728 	struct platform_device *pdev = coretemp_get_pdev(cpu);
729 	struct platform_data *pd;
730 	struct temp_data *tdata;
731 	int target;
732 
733 	/* No need to tear down any interfaces for suspend */
734 	if (cpuhp_tasks_frozen)
735 		return 0;
736 
737 	/* If the physical CPU device does not exist, just return */
738 	pd = platform_get_drvdata(pdev);
739 	if (!pd->hwmon_dev)
740 		return 0;
741 
742 	tdata = get_temp_data(pd, cpu);
743 
744 	cpumask_clear_cpu(cpu, &pd->cpumask);
745 
746 	/*
747 	 * If this is the last thread sibling, remove the CPU core
748 	 * interface, If there is still a sibling online, transfer the
749 	 * target cpu of that core interface to it.
750 	 */
751 	target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
752 	if (target >= nr_cpu_ids) {
753 		coretemp_remove_core(pd, tdata);
754 	} else if (tdata && tdata->cpu == cpu) {
755 		mutex_lock(&tdata->update_lock);
756 		tdata->cpu = target;
757 		mutex_unlock(&tdata->update_lock);
758 	}
759 
760 	/*
761 	 * If all cores in this pkg are offline, remove the interface.
762 	 */
763 	tdata = get_temp_data(pd, -1);
764 	if (cpumask_empty(&pd->cpumask)) {
765 		if (tdata)
766 			coretemp_remove_core(pd, tdata);
767 		hwmon_device_unregister(pd->hwmon_dev);
768 		pd->hwmon_dev = NULL;
769 		return 0;
770 	}
771 
772 	/*
773 	 * Check whether this core is the target for the package
774 	 * interface. We need to assign it to some other cpu.
775 	 */
776 	if (tdata && tdata->cpu == cpu) {
777 		target = cpumask_first(&pd->cpumask);
778 		mutex_lock(&tdata->update_lock);
779 		tdata->cpu = target;
780 		mutex_unlock(&tdata->update_lock);
781 	}
782 	return 0;
783 }
784 static const struct x86_cpu_id __initconst coretemp_ids[] = {
785 	X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_DTHERM, NULL),
786 	{}
787 };
788 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
789 
790 static enum cpuhp_state coretemp_hp_online;
791 
792 static int __init coretemp_init(void)
793 {
794 	int i, err;
795 
796 	/*
797 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
798 	 * sensors. We check this bit only, all the early CPUs
799 	 * without thermal sensors will be filtered out. This
800 	 * includes all the Family 5 and Family 15 (Pentium 4)
801 	 * models, since they never set the CPUID bit.
802 	 */
803 	if (!x86_match_cpu(coretemp_ids))
804 		return -ENODEV;
805 
806 	max_zones = topology_max_packages() * topology_max_dies_per_package();
807 	zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
808 			      GFP_KERNEL);
809 	if (!zone_devices)
810 		return -ENOMEM;
811 
812 	for (i = 0; i < max_zones; i++) {
813 		err = coretemp_device_add(i);
814 		if (err)
815 			goto outzone;
816 	}
817 
818 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
819 				coretemp_cpu_online, coretemp_cpu_offline);
820 	if (err < 0)
821 		goto outzone;
822 	coretemp_hp_online = err;
823 	return 0;
824 
825 outzone:
826 	while (i--)
827 		coretemp_device_remove(i);
828 	kfree(zone_devices);
829 	return err;
830 }
831 module_init(coretemp_init)
832 
833 static void __exit coretemp_exit(void)
834 {
835 	int i;
836 
837 	cpuhp_remove_state(coretemp_hp_online);
838 	for (i = 0; i < max_zones; i++)
839 		coretemp_device_remove(i);
840 	kfree(zone_devices);
841 }
842 module_exit(coretemp_exit)
843 
844 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
845 MODULE_DESCRIPTION("Intel Core temperature monitor");
846 MODULE_LICENSE("GPL");
847