1 /* 2 * coretemp.c - Linux kernel module for hardware monitoring 3 * 4 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz> 5 * 6 * Inspired from many hwmon drivers 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; version 2 of the License. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 20 * 02110-1301 USA. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/module.h> 26 #include <linux/init.h> 27 #include <linux/slab.h> 28 #include <linux/jiffies.h> 29 #include <linux/hwmon.h> 30 #include <linux/sysfs.h> 31 #include <linux/hwmon-sysfs.h> 32 #include <linux/err.h> 33 #include <linux/mutex.h> 34 #include <linux/list.h> 35 #include <linux/platform_device.h> 36 #include <linux/cpu.h> 37 #include <linux/pci.h> 38 #include <linux/smp.h> 39 #include <asm/msr.h> 40 #include <asm/processor.h> 41 42 #define DRVNAME "coretemp" 43 44 #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 45 #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 46 #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 47 #define MAX_ATTRS 5 /* Maximum no of per-core attrs */ 48 #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 49 50 #ifdef CONFIG_SMP 51 #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id 52 #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id 53 #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) 54 #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) 55 #else 56 #define TO_PHYS_ID(cpu) (cpu) 57 #define TO_CORE_ID(cpu) (cpu) 58 #define TO_ATTR_NO(cpu) (cpu) 59 #define for_each_sibling(i, cpu) for (i = 0; false; ) 60 #endif 61 62 /* 63 * Per-Core Temperature Data 64 * @last_updated: The time when the current temperature value was updated 65 * earlier (in jiffies). 66 * @cpu_core_id: The CPU Core from which temperature values should be read 67 * This value is passed as "id" field to rdmsr/wrmsr functions. 68 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, 69 * from where the temperature values should be read. 70 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. 71 * Otherwise, temp_data holds coretemp data. 72 * @valid: If this is 1, the current temperature is valid. 73 */ 74 struct temp_data { 75 int temp; 76 int ttarget; 77 int tjmax; 78 unsigned long last_updated; 79 unsigned int cpu; 80 u32 cpu_core_id; 81 u32 status_reg; 82 bool is_pkg_data; 83 bool valid; 84 struct sensor_device_attribute sd_attrs[MAX_ATTRS]; 85 char attr_name[MAX_ATTRS][CORETEMP_NAME_LENGTH]; 86 struct mutex update_lock; 87 }; 88 89 /* Platform Data per Physical CPU */ 90 struct platform_data { 91 struct device *hwmon_dev; 92 u16 phys_proc_id; 93 struct temp_data *core_data[MAX_CORE_DATA]; 94 struct device_attribute name_attr; 95 }; 96 97 struct pdev_entry { 98 struct list_head list; 99 struct platform_device *pdev; 100 unsigned int cpu; 101 u16 phys_proc_id; 102 u16 cpu_core_id; 103 }; 104 105 static LIST_HEAD(pdev_list); 106 static DEFINE_MUTEX(pdev_list_mutex); 107 108 static ssize_t show_name(struct device *dev, 109 struct device_attribute *devattr, char *buf) 110 { 111 return sprintf(buf, "%s\n", DRVNAME); 112 } 113 114 static ssize_t show_label(struct device *dev, 115 struct device_attribute *devattr, char *buf) 116 { 117 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 118 struct platform_data *pdata = dev_get_drvdata(dev); 119 struct temp_data *tdata = pdata->core_data[attr->index]; 120 121 if (tdata->is_pkg_data) 122 return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id); 123 124 return sprintf(buf, "Core %u\n", tdata->cpu_core_id); 125 } 126 127 static ssize_t show_crit_alarm(struct device *dev, 128 struct device_attribute *devattr, char *buf) 129 { 130 u32 eax, edx; 131 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 132 struct platform_data *pdata = dev_get_drvdata(dev); 133 struct temp_data *tdata = pdata->core_data[attr->index]; 134 135 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); 136 137 return sprintf(buf, "%d\n", (eax >> 5) & 1); 138 } 139 140 static ssize_t show_tjmax(struct device *dev, 141 struct device_attribute *devattr, char *buf) 142 { 143 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 144 struct platform_data *pdata = dev_get_drvdata(dev); 145 146 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax); 147 } 148 149 static ssize_t show_ttarget(struct device *dev, 150 struct device_attribute *devattr, char *buf) 151 { 152 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 153 struct platform_data *pdata = dev_get_drvdata(dev); 154 155 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); 156 } 157 158 static ssize_t show_temp(struct device *dev, 159 struct device_attribute *devattr, char *buf) 160 { 161 u32 eax, edx; 162 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 163 struct platform_data *pdata = dev_get_drvdata(dev); 164 struct temp_data *tdata = pdata->core_data[attr->index]; 165 166 mutex_lock(&tdata->update_lock); 167 168 /* Check whether the time interval has elapsed */ 169 if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) { 170 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); 171 tdata->valid = 0; 172 /* Check whether the data is valid */ 173 if (eax & 0x80000000) { 174 tdata->temp = tdata->tjmax - 175 ((eax >> 16) & 0x7f) * 1000; 176 tdata->valid = 1; 177 } 178 tdata->last_updated = jiffies; 179 } 180 181 mutex_unlock(&tdata->update_lock); 182 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; 183 } 184 185 static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) 186 { 187 /* The 100C is default for both mobile and non mobile CPUs */ 188 189 int tjmax = 100000; 190 int tjmax_ee = 85000; 191 int usemsr_ee = 1; 192 int err; 193 u32 eax, edx; 194 struct pci_dev *host_bridge; 195 196 /* Early chips have no MSR for TjMax */ 197 198 if (c->x86_model == 0xf && c->x86_mask < 4) 199 usemsr_ee = 0; 200 201 /* Atom CPUs */ 202 203 if (c->x86_model == 0x1c) { 204 usemsr_ee = 0; 205 206 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 207 208 if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL 209 && (host_bridge->device == 0xa000 /* NM10 based nettop */ 210 || host_bridge->device == 0xa010)) /* NM10 based netbook */ 211 tjmax = 100000; 212 else 213 tjmax = 90000; 214 215 pci_dev_put(host_bridge); 216 } 217 218 if (c->x86_model > 0xe && usemsr_ee) { 219 u8 platform_id; 220 221 /* 222 * Now we can detect the mobile CPU using Intel provided table 223 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm 224 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU 225 */ 226 err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx); 227 if (err) { 228 dev_warn(dev, 229 "Unable to access MSR 0x17, assuming desktop" 230 " CPU\n"); 231 usemsr_ee = 0; 232 } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { 233 /* 234 * Trust bit 28 up to Penryn, I could not find any 235 * documentation on that; if you happen to know 236 * someone at Intel please ask 237 */ 238 usemsr_ee = 0; 239 } else { 240 /* Platform ID bits 52:50 (EDX starts at bit 32) */ 241 platform_id = (edx >> 18) & 0x7; 242 243 /* 244 * Mobile Penryn CPU seems to be platform ID 7 or 5 245 * (guesswork) 246 */ 247 if (c->x86_model == 0x17 && 248 (platform_id == 5 || platform_id == 7)) { 249 /* 250 * If MSR EE bit is set, set it to 90 degrees C, 251 * otherwise 105 degrees C 252 */ 253 tjmax_ee = 90000; 254 tjmax = 105000; 255 } 256 } 257 } 258 259 if (usemsr_ee) { 260 err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx); 261 if (err) { 262 dev_warn(dev, 263 "Unable to access MSR 0xEE, for Tjmax, left" 264 " at default\n"); 265 } else if (eax & 0x40000000) { 266 tjmax = tjmax_ee; 267 } 268 } else if (tjmax == 100000) { 269 /* 270 * If we don't use msr EE it means we are desktop CPU 271 * (with exeception of Atom) 272 */ 273 dev_warn(dev, "Using relative temperature scale!\n"); 274 } 275 276 return tjmax; 277 } 278 279 static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) 280 { 281 /* The 100C is default for both mobile and non mobile CPUs */ 282 int err; 283 u32 eax, edx; 284 u32 val; 285 286 /* 287 * A new feature of current Intel(R) processors, the 288 * IA32_TEMPERATURE_TARGET contains the TjMax value 289 */ 290 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 291 if (err) { 292 dev_warn(dev, "Unable to read TjMax from CPU.\n"); 293 } else { 294 val = (eax >> 16) & 0xff; 295 /* 296 * If the TjMax is not plausible, an assumption 297 * will be used 298 */ 299 if (val > 80 && val < 120) { 300 dev_info(dev, "TjMax is %d C.\n", val); 301 return val * 1000; 302 } 303 } 304 305 /* 306 * An assumption is made for early CPUs and unreadable MSR. 307 * NOTE: the given value may not be correct. 308 */ 309 310 switch (c->x86_model) { 311 case 0xe: 312 case 0xf: 313 case 0x16: 314 case 0x1a: 315 dev_warn(dev, "TjMax is assumed as 100 C!\n"); 316 return 100000; 317 case 0x17: 318 case 0x1c: /* Atom CPUs */ 319 return adjust_tjmax(c, id, dev); 320 default: 321 dev_warn(dev, "CPU (model=0x%x) is not supported yet," 322 " using default TjMax of 100C.\n", c->x86_model); 323 return 100000; 324 } 325 } 326 327 static void __devinit get_ucode_rev_on_cpu(void *edx) 328 { 329 u32 eax; 330 331 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 332 sync_core(); 333 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); 334 } 335 336 static int get_pkg_tjmax(unsigned int cpu, struct device *dev) 337 { 338 int err; 339 u32 eax, edx, val; 340 341 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 342 if (!err) { 343 val = (eax >> 16) & 0xff; 344 if (val > 80 && val < 120) 345 return val * 1000; 346 } 347 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); 348 return 100000; /* Default TjMax: 100 degree celsius */ 349 } 350 351 static int create_name_attr(struct platform_data *pdata, struct device *dev) 352 { 353 pdata->name_attr.attr.name = "name"; 354 pdata->name_attr.attr.mode = S_IRUGO; 355 pdata->name_attr.show = show_name; 356 return device_create_file(dev, &pdata->name_attr); 357 } 358 359 static int create_core_attrs(struct temp_data *tdata, struct device *dev, 360 int attr_no) 361 { 362 int err, i; 363 static ssize_t (*rd_ptr[MAX_ATTRS]) (struct device *dev, 364 struct device_attribute *devattr, char *buf) = { 365 show_label, show_crit_alarm, show_ttarget, 366 show_temp, show_tjmax }; 367 static const char *names[MAX_ATTRS] = { 368 "temp%d_label", "temp%d_crit_alarm", 369 "temp%d_max", "temp%d_input", 370 "temp%d_crit" }; 371 372 for (i = 0; i < MAX_ATTRS; i++) { 373 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 374 attr_no); 375 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 376 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 377 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 378 tdata->sd_attrs[i].dev_attr.store = NULL; 379 tdata->sd_attrs[i].index = attr_no; 380 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); 381 if (err) 382 goto exit_free; 383 } 384 return 0; 385 386 exit_free: 387 while (--i >= 0) 388 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr); 389 return err; 390 } 391 392 static void update_ttarget(__u8 cpu_model, struct temp_data *tdata, 393 struct device *dev) 394 { 395 int err; 396 u32 eax, edx; 397 398 /* 399 * Initialize ttarget value. Eventually this will be 400 * initialized with the value from MSR_IA32_THERM_INTERRUPT 401 * register. If IA32_TEMPERATURE_TARGET is supported, this 402 * value will be over written below. 403 * To Do: Patch to initialize ttarget from MSR_IA32_THERM_INTERRUPT 404 */ 405 tdata->ttarget = tdata->tjmax - 20000; 406 407 /* 408 * Read the still undocumented IA32_TEMPERATURE_TARGET. It exists 409 * on older CPUs but not in this register, 410 * Atoms don't have it either. 411 */ 412 if (cpu_model > 0xe && cpu_model != 0x1c) { 413 err = rdmsr_safe_on_cpu(tdata->cpu, 414 MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 415 if (err) { 416 dev_warn(dev, 417 "Unable to read IA32_TEMPERATURE_TARGET MSR\n"); 418 } else { 419 tdata->ttarget = tdata->tjmax - 420 ((eax >> 8) & 0xff) * 1000; 421 } 422 } 423 } 424 425 static int chk_ucode_version(struct platform_device *pdev) 426 { 427 struct cpuinfo_x86 *c = &cpu_data(pdev->id); 428 int err; 429 u32 edx; 430 431 /* 432 * Check if we have problem with errata AE18 of Core processors: 433 * Readings might stop update when processor visited too deep sleep, 434 * fixed for stepping D0 (6EC). 435 */ 436 if (c->x86_model == 0xe && c->x86_mask < 0xc) { 437 /* check for microcode update */ 438 err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, 439 &edx, 1); 440 if (err) { 441 dev_err(&pdev->dev, 442 "Cannot determine microcode revision of " 443 "CPU#%u (%d)!\n", pdev->id, err); 444 return -ENODEV; 445 } else if (edx < 0x39) { 446 dev_err(&pdev->dev, 447 "Errata AE18 not fixed, update BIOS or " 448 "microcode of the CPU!\n"); 449 return -ENODEV; 450 } 451 } 452 return 0; 453 } 454 455 static struct platform_device *coretemp_get_pdev(unsigned int cpu) 456 { 457 u16 phys_proc_id = TO_PHYS_ID(cpu); 458 struct pdev_entry *p; 459 460 mutex_lock(&pdev_list_mutex); 461 462 list_for_each_entry(p, &pdev_list, list) 463 if (p->phys_proc_id == phys_proc_id) { 464 mutex_unlock(&pdev_list_mutex); 465 return p->pdev; 466 } 467 468 mutex_unlock(&pdev_list_mutex); 469 return NULL; 470 } 471 472 static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) 473 { 474 struct temp_data *tdata; 475 476 tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL); 477 if (!tdata) 478 return NULL; 479 480 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : 481 MSR_IA32_THERM_STATUS; 482 tdata->is_pkg_data = pkg_flag; 483 tdata->cpu = cpu; 484 tdata->cpu_core_id = TO_CORE_ID(cpu); 485 mutex_init(&tdata->update_lock); 486 return tdata; 487 } 488 489 static int create_core_data(struct platform_data *pdata, 490 struct platform_device *pdev, 491 unsigned int cpu, int pkg_flag) 492 { 493 struct temp_data *tdata; 494 struct cpuinfo_x86 *c = &cpu_data(cpu); 495 u32 eax, edx; 496 int err, attr_no; 497 498 /* 499 * Find attr number for sysfs: 500 * We map the attr number to core id of the CPU 501 * The attr number is always core id + 2 502 * The Pkgtemp will always show up as temp1_*, if available 503 */ 504 attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu); 505 506 if (attr_no > MAX_CORE_DATA - 1) 507 return -ERANGE; 508 509 /* 510 * Provide a single set of attributes for all HT siblings of a core 511 * to avoid duplicate sensors (the processor ID and core ID of all 512 * HT siblings of a core is the same). 513 * Skip if a HT sibling of this core is already online. 514 * This is not an error. 515 */ 516 if (pdata->core_data[attr_no] != NULL) 517 return 0; 518 519 tdata = init_temp_data(cpu, pkg_flag); 520 if (!tdata) 521 return -ENOMEM; 522 523 /* Test if we can access the status register */ 524 err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); 525 if (err) 526 goto exit_free; 527 528 /* We can access status register. Get Critical Temperature */ 529 if (pkg_flag) 530 tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev); 531 else 532 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); 533 534 update_ttarget(c->x86_model, tdata, &pdev->dev); 535 pdata->core_data[attr_no] = tdata; 536 537 /* Create sysfs interfaces */ 538 err = create_core_attrs(tdata, &pdev->dev, attr_no); 539 if (err) 540 goto exit_free; 541 542 return 0; 543 exit_free: 544 kfree(tdata); 545 return err; 546 } 547 548 static void coretemp_add_core(unsigned int cpu, int pkg_flag) 549 { 550 struct platform_data *pdata; 551 struct platform_device *pdev = coretemp_get_pdev(cpu); 552 int err; 553 554 if (!pdev) 555 return; 556 557 pdata = platform_get_drvdata(pdev); 558 559 err = create_core_data(pdata, pdev, cpu, pkg_flag); 560 if (err) 561 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); 562 } 563 564 static void coretemp_remove_core(struct platform_data *pdata, 565 struct device *dev, int indx) 566 { 567 int i; 568 struct temp_data *tdata = pdata->core_data[indx]; 569 570 /* Remove the sysfs attributes */ 571 for (i = 0; i < MAX_ATTRS; i++) 572 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr); 573 574 kfree(pdata->core_data[indx]); 575 pdata->core_data[indx] = NULL; 576 } 577 578 static int __devinit coretemp_probe(struct platform_device *pdev) 579 { 580 struct platform_data *pdata; 581 int err; 582 583 /* Check the microcode version of the CPU */ 584 err = chk_ucode_version(pdev); 585 if (err) 586 return err; 587 588 /* Initialize the per-package data structures */ 589 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); 590 if (!pdata) 591 return -ENOMEM; 592 593 err = create_name_attr(pdata, &pdev->dev); 594 if (err) 595 goto exit_free; 596 597 pdata->phys_proc_id = TO_PHYS_ID(pdev->id); 598 platform_set_drvdata(pdev, pdata); 599 600 pdata->hwmon_dev = hwmon_device_register(&pdev->dev); 601 if (IS_ERR(pdata->hwmon_dev)) { 602 err = PTR_ERR(pdata->hwmon_dev); 603 dev_err(&pdev->dev, "Class registration failed (%d)\n", err); 604 goto exit_name; 605 } 606 return 0; 607 608 exit_name: 609 device_remove_file(&pdev->dev, &pdata->name_attr); 610 platform_set_drvdata(pdev, NULL); 611 exit_free: 612 kfree(pdata); 613 return err; 614 } 615 616 static int __devexit coretemp_remove(struct platform_device *pdev) 617 { 618 struct platform_data *pdata = platform_get_drvdata(pdev); 619 int i; 620 621 for (i = MAX_CORE_DATA - 1; i >= 0; --i) 622 if (pdata->core_data[i]) 623 coretemp_remove_core(pdata, &pdev->dev, i); 624 625 device_remove_file(&pdev->dev, &pdata->name_attr); 626 hwmon_device_unregister(pdata->hwmon_dev); 627 platform_set_drvdata(pdev, NULL); 628 kfree(pdata); 629 return 0; 630 } 631 632 static struct platform_driver coretemp_driver = { 633 .driver = { 634 .owner = THIS_MODULE, 635 .name = DRVNAME, 636 }, 637 .probe = coretemp_probe, 638 .remove = __devexit_p(coretemp_remove), 639 }; 640 641 static int __cpuinit coretemp_device_add(unsigned int cpu) 642 { 643 int err; 644 struct platform_device *pdev; 645 struct pdev_entry *pdev_entry; 646 647 mutex_lock(&pdev_list_mutex); 648 649 pdev = platform_device_alloc(DRVNAME, cpu); 650 if (!pdev) { 651 err = -ENOMEM; 652 pr_err("Device allocation failed\n"); 653 goto exit; 654 } 655 656 pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); 657 if (!pdev_entry) { 658 err = -ENOMEM; 659 goto exit_device_put; 660 } 661 662 err = platform_device_add(pdev); 663 if (err) { 664 pr_err("Device addition failed (%d)\n", err); 665 goto exit_device_free; 666 } 667 668 pdev_entry->pdev = pdev; 669 pdev_entry->cpu = cpu; 670 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 671 pdev_entry->cpu_core_id = TO_CORE_ID(cpu); 672 673 list_add_tail(&pdev_entry->list, &pdev_list); 674 mutex_unlock(&pdev_list_mutex); 675 676 return 0; 677 678 exit_device_free: 679 kfree(pdev_entry); 680 exit_device_put: 681 platform_device_put(pdev); 682 exit: 683 mutex_unlock(&pdev_list_mutex); 684 return err; 685 } 686 687 static void coretemp_device_remove(unsigned int cpu) 688 { 689 struct pdev_entry *p, *n; 690 u16 phys_proc_id = TO_PHYS_ID(cpu); 691 692 mutex_lock(&pdev_list_mutex); 693 list_for_each_entry_safe(p, n, &pdev_list, list) { 694 if (p->phys_proc_id != phys_proc_id) 695 continue; 696 platform_device_unregister(p->pdev); 697 list_del(&p->list); 698 kfree(p); 699 } 700 mutex_unlock(&pdev_list_mutex); 701 } 702 703 static bool is_any_core_online(struct platform_data *pdata) 704 { 705 int i; 706 707 /* Find online cores, except pkgtemp data */ 708 for (i = MAX_CORE_DATA - 1; i >= 0; --i) { 709 if (pdata->core_data[i] && 710 !pdata->core_data[i]->is_pkg_data) { 711 return true; 712 } 713 } 714 return false; 715 } 716 717 static void __cpuinit get_core_online(unsigned int cpu) 718 { 719 struct cpuinfo_x86 *c = &cpu_data(cpu); 720 struct platform_device *pdev = coretemp_get_pdev(cpu); 721 int err; 722 723 /* 724 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 725 * sensors. We check this bit only, all the early CPUs 726 * without thermal sensors will be filtered out. 727 */ 728 if (!cpu_has(c, X86_FEATURE_DTS)) 729 return; 730 731 if (!pdev) { 732 /* 733 * Alright, we have DTS support. 734 * We are bringing the _first_ core in this pkg 735 * online. So, initialize per-pkg data structures and 736 * then bring this core online. 737 */ 738 err = coretemp_device_add(cpu); 739 if (err) 740 return; 741 /* 742 * Check whether pkgtemp support is available. 743 * If so, add interfaces for pkgtemp. 744 */ 745 if (cpu_has(c, X86_FEATURE_PTS)) 746 coretemp_add_core(cpu, 1); 747 } 748 /* 749 * Physical CPU device already exists. 750 * So, just add interfaces for this core. 751 */ 752 coretemp_add_core(cpu, 0); 753 } 754 755 static void __cpuinit put_core_offline(unsigned int cpu) 756 { 757 int i, indx; 758 struct platform_data *pdata; 759 struct platform_device *pdev = coretemp_get_pdev(cpu); 760 761 /* If the physical CPU device does not exist, just return */ 762 if (!pdev) 763 return; 764 765 pdata = platform_get_drvdata(pdev); 766 767 indx = TO_ATTR_NO(cpu); 768 769 if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) 770 coretemp_remove_core(pdata, &pdev->dev, indx); 771 772 /* 773 * If a core is taken offline, but a HT sibling of the same core is 774 * still online, register the alternate sibling. This ensures that 775 * exactly one set of attributes is provided as long as at least one 776 * HT sibling of a core is online. 777 */ 778 for_each_sibling(i, cpu) { 779 if (i != cpu) { 780 get_core_online(i); 781 /* 782 * Display temperature sensor data for one HT sibling 783 * per core only, so abort the loop after one such 784 * sibling has been found. 785 */ 786 break; 787 } 788 } 789 /* 790 * If all cores in this pkg are offline, remove the device. 791 * coretemp_device_remove calls unregister_platform_device, 792 * which in turn calls coretemp_remove. This removes the 793 * pkgtemp entry and does other clean ups. 794 */ 795 if (!is_any_core_online(pdata)) 796 coretemp_device_remove(cpu); 797 } 798 799 static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, 800 unsigned long action, void *hcpu) 801 { 802 unsigned int cpu = (unsigned long) hcpu; 803 804 switch (action) { 805 case CPU_ONLINE: 806 case CPU_DOWN_FAILED: 807 get_core_online(cpu); 808 break; 809 case CPU_DOWN_PREPARE: 810 put_core_offline(cpu); 811 break; 812 } 813 return NOTIFY_OK; 814 } 815 816 static struct notifier_block coretemp_cpu_notifier __refdata = { 817 .notifier_call = coretemp_cpu_callback, 818 }; 819 820 static int __init coretemp_init(void) 821 { 822 int i, err = -ENODEV; 823 824 /* quick check if we run Intel */ 825 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) 826 goto exit; 827 828 err = platform_driver_register(&coretemp_driver); 829 if (err) 830 goto exit; 831 832 for_each_online_cpu(i) 833 get_core_online(i); 834 835 #ifndef CONFIG_HOTPLUG_CPU 836 if (list_empty(&pdev_list)) { 837 err = -ENODEV; 838 goto exit_driver_unreg; 839 } 840 #endif 841 842 register_hotcpu_notifier(&coretemp_cpu_notifier); 843 return 0; 844 845 #ifndef CONFIG_HOTPLUG_CPU 846 exit_driver_unreg: 847 platform_driver_unregister(&coretemp_driver); 848 #endif 849 exit: 850 return err; 851 } 852 853 static void __exit coretemp_exit(void) 854 { 855 struct pdev_entry *p, *n; 856 857 unregister_hotcpu_notifier(&coretemp_cpu_notifier); 858 mutex_lock(&pdev_list_mutex); 859 list_for_each_entry_safe(p, n, &pdev_list, list) { 860 platform_device_unregister(p->pdev); 861 list_del(&p->list); 862 kfree(p); 863 } 864 mutex_unlock(&pdev_list_mutex); 865 platform_driver_unregister(&coretemp_driver); 866 } 867 868 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>"); 869 MODULE_DESCRIPTION("Intel Core temperature monitor"); 870 MODULE_LICENSE("GPL"); 871 872 module_init(coretemp_init) 873 module_exit(coretemp_exit) 874