xref: /linux/drivers/hwmon/coretemp.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * coretemp.c - Linux kernel module for hardware monitoring
4  *
5  * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
6  *
7  * Inspired from many hwmon drivers
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/jiffies.h>
16 #include <linux/hwmon.h>
17 #include <linux/sysfs.h>
18 #include <linux/hwmon-sysfs.h>
19 #include <linux/err.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpu.h>
24 #include <linux/smp.h>
25 #include <linux/moduleparam.h>
26 #include <linux/pci.h>
27 #include <asm/msr.h>
28 #include <asm/processor.h>
29 #include <asm/cpu_device_id.h>
30 #include <linux/sched/isolation.h>
31 
32 #define DRVNAME	"coretemp"
33 
34 /*
35  * force_tjmax only matters when TjMax can't be read from the CPU itself.
36  * When set, it replaces the driver's suboptimal heuristic.
37  */
38 static int force_tjmax;
39 module_param_named(tjmax, force_tjmax, int, 0444);
40 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
41 
42 #define NUM_REAL_CORES		512	/* Number of Real cores per cpu */
43 #define CORETEMP_NAME_LENGTH	28	/* String Length of attrs */
44 
45 enum coretemp_attr_index {
46 	ATTR_LABEL,
47 	ATTR_CRIT_ALARM,
48 	ATTR_TEMP,
49 	ATTR_TJMAX,
50 	ATTR_TTARGET,
51 	MAX_CORE_ATTRS = ATTR_TJMAX + 1,	/* Maximum no of basic attrs */
52 	TOTAL_ATTRS = ATTR_TTARGET + 1		/* Maximum no of possible attrs */
53 };
54 
55 #ifdef CONFIG_SMP
56 #define for_each_sibling(i, cpu) \
57 	for_each_cpu(i, topology_sibling_cpumask(cpu))
58 #else
59 #define for_each_sibling(i, cpu)	for (i = 0; false; )
60 #endif
61 
62 /*
63  * Per-Core Temperature Data
64  * @tjmax: The static tjmax value when tjmax cannot be retrieved from
65  *		IA32_TEMPERATURE_TARGET MSR.
66  * @last_updated: The time when the current temperature value was updated
67  *		earlier (in jiffies).
68  * @cpu_core_id: The CPU Core from which temperature values should be read
69  *		This value is passed as "id" field to rdmsr/wrmsr functions.
70  * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
71  *		from where the temperature values should be read.
72  * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
73  */
74 struct temp_data {
75 	int temp;
76 	int tjmax;
77 	unsigned long last_updated;
78 	unsigned int cpu;
79 	int index;
80 	u32 cpu_core_id;
81 	u32 status_reg;
82 	int attr_size;
83 	struct device_attribute sd_attrs[TOTAL_ATTRS];
84 	char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
85 	struct attribute *attrs[TOTAL_ATTRS + 1];
86 	struct attribute_group attr_group;
87 	struct mutex update_lock;
88 };
89 
90 /* Platform Data per Physical CPU */
91 struct platform_data {
92 	struct device		*hwmon_dev;
93 	u16			pkg_id;
94 	int			nr_cores;
95 	struct ida		ida;
96 	struct cpumask		cpumask;
97 	struct temp_data	*pkg_data;
98 	struct temp_data	**core_data;
99 	struct device_attribute name_attr;
100 };
101 
102 struct tjmax_pci {
103 	unsigned int device;
104 	int tjmax;
105 };
106 
107 static const struct tjmax_pci tjmax_pci_table[] = {
108 	{ 0x0708, 110000 },	/* CE41x0 (Sodaville ) */
109 	{ 0x0c72, 102000 },	/* Atom S1240 (Centerton) */
110 	{ 0x0c73, 95000 },	/* Atom S1220 (Centerton) */
111 	{ 0x0c75, 95000 },	/* Atom S1260 (Centerton) */
112 };
113 
114 struct tjmax {
115 	char const *id;
116 	int tjmax;
117 };
118 
119 static const struct tjmax tjmax_table[] = {
120 	{ "CPU  230", 100000 },		/* Model 0x1c, stepping 2	*/
121 	{ "CPU  330", 125000 },		/* Model 0x1c, stepping 2	*/
122 };
123 
124 struct tjmax_model {
125 	u32 vfm;
126 	u8 stepping_mask;
127 	int tjmax;
128 };
129 
130 #define ANY 0xff
131 
132 static const struct tjmax_model tjmax_model_table[] = {
133 	{ INTEL_ATOM_BONNELL,	      10,  100000 },	/* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
134 	{ INTEL_ATOM_BONNELL,	      ANY, 90000 },	/* Z5xx, N2xx, possibly others
135 							 * Note: Also matches 230 and 330,
136 							 * which are covered by tjmax_table
137 							 */
138 	{ INTEL_ATOM_BONNELL_MID,     ANY, 90000 },	/* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
139 							 * Note: TjMax for E6xxT is 110C, but CPU type
140 							 * is undetectable by software
141 							 */
142 	{ INTEL_ATOM_SALTWELL_MID,    ANY, 90000 },	/* Atom Medfield (Z2460) */
143 	{ INTEL_ATOM_SALTWELL_TABLET, ANY, 90000 },	/* Atom Clover Trail/Cloverview (Z27x0) */
144 	{ INTEL_ATOM_SALTWELL,	      ANY, 100000 },	/* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
145 							 * Also matches S12x0 (stepping 9), covered by
146 							 * PCI table
147 							 */
148 };
149 
150 static bool is_pkg_temp_data(struct temp_data *tdata)
151 {
152 	return tdata->index < 0;
153 }
154 
155 static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
156 {
157 	/* The 100C is default for both mobile and non mobile CPUs */
158 
159 	int tjmax = 100000;
160 	int tjmax_ee = 85000;
161 	int usemsr_ee = 1;
162 	int err;
163 	u32 eax, edx;
164 	int i;
165 	u16 devfn = PCI_DEVFN(0, 0);
166 	struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
167 
168 	/*
169 	 * Explicit tjmax table entries override heuristics.
170 	 * First try PCI host bridge IDs, followed by model ID strings
171 	 * and model/stepping information.
172 	 */
173 	if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
174 		for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
175 			if (host_bridge->device == tjmax_pci_table[i].device) {
176 				pci_dev_put(host_bridge);
177 				return tjmax_pci_table[i].tjmax;
178 			}
179 		}
180 	}
181 	pci_dev_put(host_bridge);
182 
183 	/*
184 	 * This is literally looking for "CPU  XXX" in the model string.
185 	 * Not checking it against the model as well. Just purely a
186 	 * string search.
187 	 */
188 	for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
189 		if (strstr(c->x86_model_id, tjmax_table[i].id))
190 			return tjmax_table[i].tjmax;
191 	}
192 
193 	for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
194 		const struct tjmax_model *tm = &tjmax_model_table[i];
195 		if (c->x86_vfm == tm->vfm &&
196 		    (tm->stepping_mask == ANY ||
197 		     tm->stepping_mask == c->x86_stepping))
198 			return tm->tjmax;
199 	}
200 
201 	/* Early chips have no MSR for TjMax */
202 
203 	if (c->x86_vfm == INTEL_CORE2_MEROM && c->x86_stepping < 4)
204 		usemsr_ee = 0;
205 
206 	if (c->x86_vfm > INTEL_CORE_YONAH && usemsr_ee) {
207 		u8 platform_id;
208 
209 		/*
210 		 * Now we can detect the mobile CPU using Intel provided table
211 		 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
212 		 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
213 		 */
214 		err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
215 		if (err) {
216 			dev_warn(dev,
217 				 "Unable to access MSR 0x17, assuming desktop"
218 				 " CPU\n");
219 			usemsr_ee = 0;
220 		} else if (c->x86_vfm < INTEL_CORE2_PENRYN &&
221 			   !(eax & 0x10000000)) {
222 			/*
223 			 * Trust bit 28 up to Penryn, I could not find any
224 			 * documentation on that; if you happen to know
225 			 * someone at Intel please ask
226 			 */
227 			usemsr_ee = 0;
228 		} else {
229 			/* Platform ID bits 52:50 (EDX starts at bit 32) */
230 			platform_id = (edx >> 18) & 0x7;
231 
232 			/*
233 			 * Mobile Penryn CPU seems to be platform ID 7 or 5
234 			 * (guesswork)
235 			 */
236 			if (c->x86_vfm == INTEL_CORE2_PENRYN &&
237 			    (platform_id == 5 || platform_id == 7)) {
238 				/*
239 				 * If MSR EE bit is set, set it to 90 degrees C,
240 				 * otherwise 105 degrees C
241 				 */
242 				tjmax_ee = 90000;
243 				tjmax = 105000;
244 			}
245 		}
246 	}
247 
248 	if (usemsr_ee) {
249 		err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
250 		if (err) {
251 			dev_warn(dev,
252 				 "Unable to access MSR 0xEE, for Tjmax, left"
253 				 " at default\n");
254 		} else if (eax & 0x40000000) {
255 			tjmax = tjmax_ee;
256 		}
257 	} else if (tjmax == 100000) {
258 		/*
259 		 * If we don't use msr EE it means we are desktop CPU
260 		 * (with exeception of Atom)
261 		 */
262 		dev_warn(dev, "Using relative temperature scale!\n");
263 	}
264 
265 	return tjmax;
266 }
267 
268 static int get_tjmax(struct temp_data *tdata, struct device *dev)
269 {
270 	struct cpuinfo_x86 *c = &cpu_data(tdata->cpu);
271 	int err;
272 	u32 eax, edx;
273 	u32 val;
274 
275 	/* use static tjmax once it is set */
276 	if (tdata->tjmax)
277 		return tdata->tjmax;
278 
279 	/*
280 	 * A new feature of current Intel(R) processors, the
281 	 * IA32_TEMPERATURE_TARGET contains the TjMax value
282 	 */
283 	err = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
284 	if (err) {
285 		dev_warn_once(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
286 	} else {
287 		val = (eax >> 16) & 0xff;
288 		if (val)
289 			return val * 1000;
290 	}
291 
292 	if (force_tjmax) {
293 		dev_notice(dev, "TjMax forced to %d degrees C by user\n",
294 			   force_tjmax);
295 		tdata->tjmax = force_tjmax * 1000;
296 	} else {
297 		/*
298 		 * An assumption is made for early CPUs and unreadable MSR.
299 		 * NOTE: the calculated value may not be correct.
300 		 */
301 		tdata->tjmax = adjust_tjmax(c, tdata->cpu, dev);
302 	}
303 	return tdata->tjmax;
304 }
305 
306 static int get_ttarget(struct temp_data *tdata, struct device *dev)
307 {
308 	u32 eax, edx;
309 	int tjmax, ttarget_offset, ret;
310 
311 	/*
312 	 * ttarget is valid only if tjmax can be retrieved from
313 	 * MSR_IA32_TEMPERATURE_TARGET
314 	 */
315 	if (tdata->tjmax)
316 		return -ENODEV;
317 
318 	ret = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
319 	if (ret)
320 		return ret;
321 
322 	tjmax = (eax >> 16) & 0xff;
323 
324 	/* Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. */
325 	ttarget_offset = (eax >> 8) & 0xff;
326 
327 	return (tjmax - ttarget_offset) * 1000;
328 }
329 
330 /* Keep track of how many zone pointers we allocated in init() */
331 static int max_zones __read_mostly;
332 /* Array of zone pointers. Serialized by cpu hotplug lock */
333 static struct platform_device **zone_devices;
334 
335 static ssize_t show_label(struct device *dev,
336 				struct device_attribute *devattr, char *buf)
337 {
338 	struct platform_data *pdata = dev_get_drvdata(dev);
339 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_LABEL]);
340 
341 	if (is_pkg_temp_data(tdata))
342 		return sprintf(buf, "Package id %u\n", pdata->pkg_id);
343 
344 	return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
345 }
346 
347 static ssize_t show_crit_alarm(struct device *dev,
348 				struct device_attribute *devattr, char *buf)
349 {
350 	u32 eax, edx;
351 	struct temp_data *tdata = container_of(devattr, struct temp_data,
352 						sd_attrs[ATTR_CRIT_ALARM]);
353 
354 	mutex_lock(&tdata->update_lock);
355 	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
356 	mutex_unlock(&tdata->update_lock);
357 
358 	return sprintf(buf, "%d\n", (eax >> 5) & 1);
359 }
360 
361 static ssize_t show_tjmax(struct device *dev,
362 			struct device_attribute *devattr, char *buf)
363 {
364 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TJMAX]);
365 	int tjmax;
366 
367 	mutex_lock(&tdata->update_lock);
368 	tjmax = get_tjmax(tdata, dev);
369 	mutex_unlock(&tdata->update_lock);
370 
371 	return sprintf(buf, "%d\n", tjmax);
372 }
373 
374 static ssize_t show_ttarget(struct device *dev,
375 				struct device_attribute *devattr, char *buf)
376 {
377 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TTARGET]);
378 	int ttarget;
379 
380 	mutex_lock(&tdata->update_lock);
381 	ttarget = get_ttarget(tdata, dev);
382 	mutex_unlock(&tdata->update_lock);
383 
384 	if (ttarget < 0)
385 		return ttarget;
386 	return sprintf(buf, "%d\n", ttarget);
387 }
388 
389 static ssize_t show_temp(struct device *dev,
390 			struct device_attribute *devattr, char *buf)
391 {
392 	u32 eax, edx;
393 	struct temp_data *tdata = container_of(devattr, struct temp_data, sd_attrs[ATTR_TEMP]);
394 	int tjmax;
395 
396 	mutex_lock(&tdata->update_lock);
397 
398 	tjmax = get_tjmax(tdata, dev);
399 	/* Check whether the time interval has elapsed */
400 	if (time_after(jiffies, tdata->last_updated + HZ)) {
401 		rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
402 		/*
403 		 * Ignore the valid bit. In all observed cases the register
404 		 * value is either low or zero if the valid bit is 0.
405 		 * Return it instead of reporting an error which doesn't
406 		 * really help at all.
407 		 */
408 		tdata->temp = tjmax - ((eax >> 16) & 0xff) * 1000;
409 		tdata->last_updated = jiffies;
410 	}
411 
412 	mutex_unlock(&tdata->update_lock);
413 	return sprintf(buf, "%d\n", tdata->temp);
414 }
415 
416 static int create_core_attrs(struct temp_data *tdata, struct device *dev)
417 {
418 	int i;
419 	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
420 			struct device_attribute *devattr, char *buf) = {
421 			show_label, show_crit_alarm, show_temp, show_tjmax,
422 			show_ttarget };
423 	static const char *const suffixes[TOTAL_ATTRS] = {
424 		"label", "crit_alarm", "input", "crit", "max"
425 	};
426 
427 	for (i = 0; i < tdata->attr_size; i++) {
428 		/*
429 		 * We map the attr number to core id of the CPU
430 		 * The attr number is always core id + 2
431 		 * The Pkgtemp will always show up as temp1_*, if available
432 		 */
433 		int attr_no = is_pkg_temp_data(tdata) ? 1 : tdata->cpu_core_id + 2;
434 
435 		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
436 			 "temp%d_%s", attr_no, suffixes[i]);
437 		sysfs_attr_init(&tdata->sd_attrs[i].attr);
438 		tdata->sd_attrs[i].attr.name = tdata->attr_name[i];
439 		tdata->sd_attrs[i].attr.mode = 0444;
440 		tdata->sd_attrs[i].show = rd_ptr[i];
441 		tdata->attrs[i] = &tdata->sd_attrs[i].attr;
442 	}
443 	tdata->attr_group.attrs = tdata->attrs;
444 	return sysfs_create_group(&dev->kobj, &tdata->attr_group);
445 }
446 
447 
448 static int chk_ucode_version(unsigned int cpu)
449 {
450 	struct cpuinfo_x86 *c = &cpu_data(cpu);
451 
452 	/*
453 	 * Check if we have problem with errata AE18 of Core processors:
454 	 * Readings might stop update when processor visited too deep sleep,
455 	 * fixed for stepping D0 (6EC).
456 	 */
457 	if (c->x86_vfm == INTEL_CORE_YONAH && c->x86_stepping < 0xc && c->microcode < 0x39) {
458 		pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
459 		return -ENODEV;
460 	}
461 	return 0;
462 }
463 
464 static struct platform_device *coretemp_get_pdev(unsigned int cpu)
465 {
466 	int id = topology_logical_die_id(cpu);
467 
468 	if (id >= 0 && id < max_zones)
469 		return zone_devices[id];
470 	return NULL;
471 }
472 
473 static struct temp_data *
474 init_temp_data(struct platform_data *pdata, unsigned int cpu, int pkg_flag)
475 {
476 	struct temp_data *tdata;
477 
478 	if (!pdata->core_data) {
479 		/*
480 		 * TODO:
481 		 * The information of actual possible cores in a package is broken for now.
482 		 * Will replace hardcoded NUM_REAL_CORES with actual per package core count
483 		 * when this information becomes available.
484 		 */
485 		pdata->nr_cores = NUM_REAL_CORES;
486 		pdata->core_data = kcalloc(pdata->nr_cores, sizeof(struct temp_data *),
487 					   GFP_KERNEL);
488 		if (!pdata->core_data)
489 			return NULL;
490 	}
491 
492 	tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
493 	if (!tdata)
494 		return NULL;
495 
496 	if (pkg_flag) {
497 		pdata->pkg_data = tdata;
498 		/* Use tdata->index as indicator of package temp data */
499 		tdata->index = -1;
500 	} else {
501 		tdata->index = ida_alloc_max(&pdata->ida, pdata->nr_cores - 1, GFP_KERNEL);
502 		if (tdata->index < 0) {
503 			kfree(tdata);
504 			return NULL;
505 		}
506 		pdata->core_data[tdata->index] = tdata;
507 	}
508 
509 	tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
510 							MSR_IA32_THERM_STATUS;
511 	tdata->cpu = cpu;
512 	tdata->cpu_core_id = topology_core_id(cpu);
513 	tdata->attr_size = MAX_CORE_ATTRS;
514 	mutex_init(&tdata->update_lock);
515 	return tdata;
516 }
517 
518 static void destroy_temp_data(struct platform_data *pdata, struct temp_data *tdata)
519 {
520 	if (is_pkg_temp_data(tdata)) {
521 		pdata->pkg_data = NULL;
522 		kfree(pdata->core_data);
523 		pdata->core_data = NULL;
524 		pdata->nr_cores = 0;
525 	} else {
526 		pdata->core_data[tdata->index] = NULL;
527 		ida_free(&pdata->ida, tdata->index);
528 	}
529 	kfree(tdata);
530 }
531 
532 static struct temp_data *get_temp_data(struct platform_data *pdata, int cpu)
533 {
534 	int i;
535 
536 	/* cpu < 0 means get pkg temp_data */
537 	if (cpu < 0)
538 		return pdata->pkg_data;
539 
540 	for (i = 0; i < pdata->nr_cores; i++) {
541 		if (pdata->core_data[i] &&
542 		    pdata->core_data[i]->cpu_core_id == topology_core_id(cpu))
543 			return pdata->core_data[i];
544 	}
545 	return NULL;
546 }
547 
548 static int create_core_data(struct platform_device *pdev, unsigned int cpu,
549 			    int pkg_flag)
550 {
551 	struct temp_data *tdata;
552 	struct platform_data *pdata = platform_get_drvdata(pdev);
553 	struct cpuinfo_x86 *c = &cpu_data(cpu);
554 	u32 eax, edx;
555 	int err;
556 
557 	if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
558 		return 0;
559 
560 	tdata = init_temp_data(pdata, cpu, pkg_flag);
561 	if (!tdata)
562 		return -ENOMEM;
563 
564 	/* Test if we can access the status register */
565 	err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
566 	if (err)
567 		goto err;
568 
569 	/* Make sure tdata->tjmax is a valid indicator for dynamic/static tjmax */
570 	get_tjmax(tdata, &pdev->dev);
571 
572 	/*
573 	 * The target temperature is available on older CPUs but not in the
574 	 * MSR_IA32_TEMPERATURE_TARGET register. Atoms don't have the register
575 	 * at all.
576 	 */
577 	if (c->x86_vfm > INTEL_CORE_YONAH && c->x86_vfm != INTEL_ATOM_BONNELL)
578 		if (get_ttarget(tdata, &pdev->dev) >= 0)
579 			tdata->attr_size++;
580 
581 	/* Create sysfs interfaces */
582 	err = create_core_attrs(tdata, pdata->hwmon_dev);
583 	if (err)
584 		goto err;
585 
586 	return 0;
587 
588 err:
589 	destroy_temp_data(pdata, tdata);
590 	return err;
591 }
592 
593 static void
594 coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
595 {
596 	if (create_core_data(pdev, cpu, pkg_flag))
597 		dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
598 }
599 
600 static void coretemp_remove_core(struct platform_data *pdata, struct temp_data *tdata)
601 {
602 	/* if we errored on add then this is already gone */
603 	if (!tdata)
604 		return;
605 
606 	/* Remove the sysfs attributes */
607 	sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
608 
609 	destroy_temp_data(pdata, tdata);
610 }
611 
612 static int coretemp_device_add(int zoneid)
613 {
614 	struct platform_device *pdev;
615 	struct platform_data *pdata;
616 	int err;
617 
618 	/* Initialize the per-zone data structures */
619 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
620 	if (!pdata)
621 		return -ENOMEM;
622 
623 	pdata->pkg_id = zoneid;
624 	ida_init(&pdata->ida);
625 
626 	pdev = platform_device_alloc(DRVNAME, zoneid);
627 	if (!pdev) {
628 		err = -ENOMEM;
629 		goto err_free_pdata;
630 	}
631 
632 	err = platform_device_add(pdev);
633 	if (err)
634 		goto err_put_dev;
635 
636 	platform_set_drvdata(pdev, pdata);
637 	zone_devices[zoneid] = pdev;
638 	return 0;
639 
640 err_put_dev:
641 	platform_device_put(pdev);
642 err_free_pdata:
643 	kfree(pdata);
644 	return err;
645 }
646 
647 static void coretemp_device_remove(int zoneid)
648 {
649 	struct platform_device *pdev = zone_devices[zoneid];
650 	struct platform_data *pdata = platform_get_drvdata(pdev);
651 
652 	ida_destroy(&pdata->ida);
653 	kfree(pdata);
654 	platform_device_unregister(pdev);
655 }
656 
657 static int coretemp_cpu_online(unsigned int cpu)
658 {
659 	struct platform_device *pdev = coretemp_get_pdev(cpu);
660 	struct cpuinfo_x86 *c = &cpu_data(cpu);
661 	struct platform_data *pdata;
662 
663 	/*
664 	 * Don't execute this on resume as the offline callback did
665 	 * not get executed on suspend.
666 	 */
667 	if (cpuhp_tasks_frozen)
668 		return 0;
669 
670 	/*
671 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
672 	 * sensors. We check this bit only, all the early CPUs
673 	 * without thermal sensors will be filtered out.
674 	 */
675 	if (!cpu_has(c, X86_FEATURE_DTHERM))
676 		return -ENODEV;
677 
678 	pdata = platform_get_drvdata(pdev);
679 	if (!pdata->hwmon_dev) {
680 		struct device *hwmon;
681 
682 		/* Check the microcode version of the CPU */
683 		if (chk_ucode_version(cpu))
684 			return -EINVAL;
685 
686 		/*
687 		 * Alright, we have DTS support.
688 		 * We are bringing the _first_ core in this pkg
689 		 * online. So, initialize per-pkg data structures and
690 		 * then bring this core online.
691 		 */
692 		hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME,
693 							  pdata, NULL);
694 		if (IS_ERR(hwmon))
695 			return PTR_ERR(hwmon);
696 		pdata->hwmon_dev = hwmon;
697 
698 		/*
699 		 * Check whether pkgtemp support is available.
700 		 * If so, add interfaces for pkgtemp.
701 		 */
702 		if (cpu_has(c, X86_FEATURE_PTS))
703 			coretemp_add_core(pdev, cpu, 1);
704 	}
705 
706 	/*
707 	 * Check whether a thread sibling is already online. If not add the
708 	 * interface for this CPU core.
709 	 */
710 	if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
711 		coretemp_add_core(pdev, cpu, 0);
712 
713 	cpumask_set_cpu(cpu, &pdata->cpumask);
714 	return 0;
715 }
716 
717 static int coretemp_cpu_offline(unsigned int cpu)
718 {
719 	struct platform_device *pdev = coretemp_get_pdev(cpu);
720 	struct platform_data *pd;
721 	struct temp_data *tdata;
722 	int target;
723 
724 	/* No need to tear down any interfaces for suspend */
725 	if (cpuhp_tasks_frozen)
726 		return 0;
727 
728 	/* If the physical CPU device does not exist, just return */
729 	pd = platform_get_drvdata(pdev);
730 	if (!pd->hwmon_dev)
731 		return 0;
732 
733 	tdata = get_temp_data(pd, cpu);
734 
735 	cpumask_clear_cpu(cpu, &pd->cpumask);
736 
737 	/*
738 	 * If this is the last thread sibling, remove the CPU core
739 	 * interface, If there is still a sibling online, transfer the
740 	 * target cpu of that core interface to it.
741 	 */
742 	target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
743 	if (target >= nr_cpu_ids) {
744 		coretemp_remove_core(pd, tdata);
745 	} else if (tdata && tdata->cpu == cpu) {
746 		mutex_lock(&tdata->update_lock);
747 		tdata->cpu = target;
748 		mutex_unlock(&tdata->update_lock);
749 	}
750 
751 	/*
752 	 * If all cores in this pkg are offline, remove the interface.
753 	 */
754 	tdata = get_temp_data(pd, -1);
755 	if (cpumask_empty(&pd->cpumask)) {
756 		if (tdata)
757 			coretemp_remove_core(pd, tdata);
758 		hwmon_device_unregister(pd->hwmon_dev);
759 		pd->hwmon_dev = NULL;
760 		return 0;
761 	}
762 
763 	/*
764 	 * Check whether this core is the target for the package
765 	 * interface. We need to assign it to some other cpu.
766 	 */
767 	if (tdata && tdata->cpu == cpu) {
768 		target = cpumask_first(&pd->cpumask);
769 		mutex_lock(&tdata->update_lock);
770 		tdata->cpu = target;
771 		mutex_unlock(&tdata->update_lock);
772 	}
773 	return 0;
774 }
775 static const struct x86_cpu_id __initconst coretemp_ids[] = {
776 	X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_DTHERM, NULL),
777 	{}
778 };
779 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
780 
781 static enum cpuhp_state coretemp_hp_online;
782 
783 static int __init coretemp_init(void)
784 {
785 	int i, err;
786 
787 	/*
788 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
789 	 * sensors. We check this bit only, all the early CPUs
790 	 * without thermal sensors will be filtered out. This
791 	 * includes all the Family 5 and Family 15 (Pentium 4)
792 	 * models, since they never set the CPUID bit.
793 	 */
794 	if (!x86_match_cpu(coretemp_ids))
795 		return -ENODEV;
796 
797 	max_zones = topology_max_packages() * topology_max_dies_per_package();
798 	zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
799 			      GFP_KERNEL);
800 	if (!zone_devices)
801 		return -ENOMEM;
802 
803 	for (i = 0; i < max_zones; i++) {
804 		err = coretemp_device_add(i);
805 		if (err)
806 			goto outzone;
807 	}
808 
809 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
810 				coretemp_cpu_online, coretemp_cpu_offline);
811 	if (err < 0)
812 		goto outzone;
813 	coretemp_hp_online = err;
814 	return 0;
815 
816 outzone:
817 	while (i--)
818 		coretemp_device_remove(i);
819 	kfree(zone_devices);
820 	return err;
821 }
822 module_init(coretemp_init)
823 
824 static void __exit coretemp_exit(void)
825 {
826 	int i;
827 
828 	cpuhp_remove_state(coretemp_hp_online);
829 	for (i = 0; i < max_zones; i++)
830 		coretemp_device_remove(i);
831 	kfree(zone_devices);
832 }
833 module_exit(coretemp_exit)
834 
835 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
836 MODULE_DESCRIPTION("Intel Core temperature monitor");
837 MODULE_LICENSE("GPL");
838