1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (c) 2018-2021 Intel Corporation 3 4 #include <linux/auxiliary_bus.h> 5 #include <linux/bitfield.h> 6 #include <linux/bitops.h> 7 #include <linux/hwmon.h> 8 #include <linux/jiffies.h> 9 #include <linux/module.h> 10 #include <linux/peci.h> 11 #include <linux/peci-cpu.h> 12 #include <linux/units.h> 13 14 #include "common.h" 15 16 #define CORE_NUMS_MAX 64 17 18 #define BASE_CHANNEL_NUMS 5 19 #define CPUTEMP_CHANNEL_NUMS (BASE_CHANNEL_NUMS + CORE_NUMS_MAX) 20 21 #define TEMP_TARGET_FAN_TEMP_MASK GENMASK(15, 8) 22 #define TEMP_TARGET_REF_TEMP_MASK GENMASK(23, 16) 23 #define TEMP_TARGET_TJ_OFFSET_MASK GENMASK(29, 24) 24 25 #define DTS_MARGIN_MASK GENMASK(15, 0) 26 #define PCS_MODULE_TEMP_MASK GENMASK(15, 0) 27 28 struct resolved_cores_reg { 29 u8 bus; 30 u8 dev; 31 u8 func; 32 u8 offset; 33 }; 34 35 struct cpu_info { 36 struct resolved_cores_reg *reg; 37 u8 min_peci_revision; 38 s32 (*thermal_margin_to_millidegree)(u16 val); 39 }; 40 41 struct peci_temp_target { 42 s32 tcontrol; 43 s32 tthrottle; 44 s32 tjmax; 45 struct peci_sensor_state state; 46 }; 47 48 enum peci_temp_target_type { 49 tcontrol_type, 50 tthrottle_type, 51 tjmax_type, 52 crit_hyst_type, 53 }; 54 55 struct peci_cputemp { 56 struct peci_device *peci_dev; 57 struct device *dev; 58 const char *name; 59 const struct cpu_info *gen_info; 60 struct { 61 struct peci_temp_target target; 62 struct peci_sensor_data die; 63 struct peci_sensor_data dts; 64 struct peci_sensor_data core[CORE_NUMS_MAX]; 65 } temp; 66 const char **coretemp_label; 67 DECLARE_BITMAP(core_mask, CORE_NUMS_MAX); 68 }; 69 70 enum cputemp_channels { 71 channel_die, 72 channel_dts, 73 channel_tcontrol, 74 channel_tthrottle, 75 channel_tjmax, 76 channel_core, 77 }; 78 79 static const char * const cputemp_label[BASE_CHANNEL_NUMS] = { 80 "Die", 81 "DTS", 82 "Tcontrol", 83 "Tthrottle", 84 "Tjmax", 85 }; 86 87 static int update_temp_target(struct peci_cputemp *priv) 88 { 89 s32 tthrottle_offset, tcontrol_margin; 90 u32 pcs; 91 int ret; 92 93 if (!peci_sensor_need_update(&priv->temp.target.state)) 94 return 0; 95 96 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_TEMP_TARGET, 0, &pcs); 97 if (ret) 98 return ret; 99 100 priv->temp.target.tjmax = 101 FIELD_GET(TEMP_TARGET_REF_TEMP_MASK, pcs) * MILLIDEGREE_PER_DEGREE; 102 103 tcontrol_margin = FIELD_GET(TEMP_TARGET_FAN_TEMP_MASK, pcs); 104 tcontrol_margin = sign_extend32(tcontrol_margin, 7) * MILLIDEGREE_PER_DEGREE; 105 priv->temp.target.tcontrol = priv->temp.target.tjmax - tcontrol_margin; 106 107 tthrottle_offset = FIELD_GET(TEMP_TARGET_TJ_OFFSET_MASK, pcs) * MILLIDEGREE_PER_DEGREE; 108 priv->temp.target.tthrottle = priv->temp.target.tjmax - tthrottle_offset; 109 110 peci_sensor_mark_updated(&priv->temp.target.state); 111 112 return 0; 113 } 114 115 static int get_temp_target(struct peci_cputemp *priv, enum peci_temp_target_type type, long *val) 116 { 117 int ret; 118 119 mutex_lock(&priv->temp.target.state.lock); 120 121 ret = update_temp_target(priv); 122 if (ret) 123 goto unlock; 124 125 switch (type) { 126 case tcontrol_type: 127 *val = priv->temp.target.tcontrol; 128 break; 129 case tthrottle_type: 130 *val = priv->temp.target.tthrottle; 131 break; 132 case tjmax_type: 133 *val = priv->temp.target.tjmax; 134 break; 135 case crit_hyst_type: 136 *val = priv->temp.target.tjmax - priv->temp.target.tcontrol; 137 break; 138 default: 139 ret = -EOPNOTSUPP; 140 break; 141 } 142 unlock: 143 mutex_unlock(&priv->temp.target.state.lock); 144 145 return ret; 146 } 147 148 /* 149 * Error codes: 150 * 0x8000: General sensor error 151 * 0x8001: Reserved 152 * 0x8002: Underflow on reading value 153 * 0x8003-0x81ff: Reserved 154 */ 155 static bool dts_valid(u16 val) 156 { 157 return val < 0x8000 || val > 0x81ff; 158 } 159 160 /* 161 * Processors return a value of DTS reading in S10.6 fixed point format 162 * (16 bits: 10-bit signed magnitude, 6-bit fraction). 163 */ 164 static s32 dts_ten_dot_six_to_millidegree(u16 val) 165 { 166 return sign_extend32(val, 15) * MILLIDEGREE_PER_DEGREE / 64; 167 } 168 169 /* 170 * For older processors, thermal margin reading is returned in S8.8 fixed 171 * point format (16 bits: 8-bit signed magnitude, 8-bit fraction). 172 */ 173 static s32 dts_eight_dot_eight_to_millidegree(u16 val) 174 { 175 return sign_extend32(val, 15) * MILLIDEGREE_PER_DEGREE / 256; 176 } 177 178 static int get_die_temp(struct peci_cputemp *priv, long *val) 179 { 180 int ret = 0; 181 long tjmax; 182 u16 temp; 183 184 mutex_lock(&priv->temp.die.state.lock); 185 if (!peci_sensor_need_update(&priv->temp.die.state)) 186 goto skip_update; 187 188 ret = peci_temp_read(priv->peci_dev, &temp); 189 if (ret) 190 goto err_unlock; 191 192 if (!dts_valid(temp)) { 193 ret = -EIO; 194 goto err_unlock; 195 } 196 197 ret = get_temp_target(priv, tjmax_type, &tjmax); 198 if (ret) 199 goto err_unlock; 200 201 priv->temp.die.value = (s32)tjmax + dts_ten_dot_six_to_millidegree(temp); 202 203 peci_sensor_mark_updated(&priv->temp.die.state); 204 205 skip_update: 206 *val = priv->temp.die.value; 207 err_unlock: 208 mutex_unlock(&priv->temp.die.state.lock); 209 return ret; 210 } 211 212 static int get_dts(struct peci_cputemp *priv, long *val) 213 { 214 int ret = 0; 215 u16 thermal_margin; 216 long tcontrol; 217 u32 pcs; 218 219 mutex_lock(&priv->temp.dts.state.lock); 220 if (!peci_sensor_need_update(&priv->temp.dts.state)) 221 goto skip_update; 222 223 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_THERMAL_MARGIN, 0, &pcs); 224 if (ret) 225 goto err_unlock; 226 227 thermal_margin = FIELD_GET(DTS_MARGIN_MASK, pcs); 228 if (!dts_valid(thermal_margin)) { 229 ret = -EIO; 230 goto err_unlock; 231 } 232 233 ret = get_temp_target(priv, tcontrol_type, &tcontrol); 234 if (ret) 235 goto err_unlock; 236 237 /* Note that the tcontrol should be available before calling it */ 238 priv->temp.dts.value = 239 (s32)tcontrol - priv->gen_info->thermal_margin_to_millidegree(thermal_margin); 240 241 peci_sensor_mark_updated(&priv->temp.dts.state); 242 243 skip_update: 244 *val = priv->temp.dts.value; 245 err_unlock: 246 mutex_unlock(&priv->temp.dts.state.lock); 247 return ret; 248 } 249 250 static int get_core_temp(struct peci_cputemp *priv, int core_index, long *val) 251 { 252 int ret = 0; 253 u16 core_dts_margin; 254 long tjmax; 255 u32 pcs; 256 257 mutex_lock(&priv->temp.core[core_index].state.lock); 258 if (!peci_sensor_need_update(&priv->temp.core[core_index].state)) 259 goto skip_update; 260 261 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_MODULE_TEMP, core_index, &pcs); 262 if (ret) 263 goto err_unlock; 264 265 core_dts_margin = FIELD_GET(PCS_MODULE_TEMP_MASK, pcs); 266 if (!dts_valid(core_dts_margin)) { 267 ret = -EIO; 268 goto err_unlock; 269 } 270 271 ret = get_temp_target(priv, tjmax_type, &tjmax); 272 if (ret) 273 goto err_unlock; 274 275 /* Note that the tjmax should be available before calling it */ 276 priv->temp.core[core_index].value = 277 (s32)tjmax + dts_ten_dot_six_to_millidegree(core_dts_margin); 278 279 peci_sensor_mark_updated(&priv->temp.core[core_index].state); 280 281 skip_update: 282 *val = priv->temp.core[core_index].value; 283 err_unlock: 284 mutex_unlock(&priv->temp.core[core_index].state.lock); 285 return ret; 286 } 287 288 static int cputemp_read_string(struct device *dev, enum hwmon_sensor_types type, 289 u32 attr, int channel, const char **str) 290 { 291 struct peci_cputemp *priv = dev_get_drvdata(dev); 292 293 if (attr != hwmon_temp_label) 294 return -EOPNOTSUPP; 295 296 *str = channel < channel_core ? 297 cputemp_label[channel] : priv->coretemp_label[channel - channel_core]; 298 299 return 0; 300 } 301 302 static int cputemp_read(struct device *dev, enum hwmon_sensor_types type, 303 u32 attr, int channel, long *val) 304 { 305 struct peci_cputemp *priv = dev_get_drvdata(dev); 306 307 switch (attr) { 308 case hwmon_temp_input: 309 switch (channel) { 310 case channel_die: 311 return get_die_temp(priv, val); 312 case channel_dts: 313 return get_dts(priv, val); 314 case channel_tcontrol: 315 return get_temp_target(priv, tcontrol_type, val); 316 case channel_tthrottle: 317 return get_temp_target(priv, tthrottle_type, val); 318 case channel_tjmax: 319 return get_temp_target(priv, tjmax_type, val); 320 default: 321 return get_core_temp(priv, channel - channel_core, val); 322 } 323 break; 324 case hwmon_temp_max: 325 return get_temp_target(priv, tcontrol_type, val); 326 case hwmon_temp_crit: 327 return get_temp_target(priv, tjmax_type, val); 328 case hwmon_temp_crit_hyst: 329 return get_temp_target(priv, crit_hyst_type, val); 330 default: 331 return -EOPNOTSUPP; 332 } 333 334 return 0; 335 } 336 337 static umode_t cputemp_is_visible(const void *data, enum hwmon_sensor_types type, 338 u32 attr, int channel) 339 { 340 const struct peci_cputemp *priv = data; 341 342 if (channel > CPUTEMP_CHANNEL_NUMS) 343 return 0; 344 345 if (channel < channel_core) 346 return 0444; 347 348 if (test_bit(channel - channel_core, priv->core_mask)) 349 return 0444; 350 351 return 0; 352 } 353 354 static int init_core_mask(struct peci_cputemp *priv) 355 { 356 struct peci_device *peci_dev = priv->peci_dev; 357 struct resolved_cores_reg *reg = priv->gen_info->reg; 358 u64 core_mask; 359 u32 data; 360 int ret; 361 362 /* Get the RESOLVED_CORES register value */ 363 switch (peci_dev->info.model) { 364 case INTEL_FAM6_ICELAKE_X: 365 case INTEL_FAM6_ICELAKE_D: 366 case INTEL_FAM6_SAPPHIRERAPIDS_X: 367 ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev, 368 reg->func, reg->offset + 4, &data); 369 if (ret) 370 return ret; 371 372 core_mask = (u64)data << 32; 373 374 ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev, 375 reg->func, reg->offset, &data); 376 if (ret) 377 return ret; 378 379 core_mask |= data; 380 381 break; 382 default: 383 ret = peci_pci_local_read(peci_dev, reg->bus, reg->dev, 384 reg->func, reg->offset, &data); 385 if (ret) 386 return ret; 387 388 core_mask = data; 389 390 break; 391 } 392 393 if (!core_mask) 394 return -EIO; 395 396 bitmap_from_u64(priv->core_mask, core_mask); 397 398 return 0; 399 } 400 401 static int create_temp_label(struct peci_cputemp *priv) 402 { 403 unsigned long core_max = find_last_bit(priv->core_mask, CORE_NUMS_MAX); 404 int i; 405 406 priv->coretemp_label = devm_kzalloc(priv->dev, (core_max + 1) * sizeof(char *), GFP_KERNEL); 407 if (!priv->coretemp_label) 408 return -ENOMEM; 409 410 for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX) { 411 priv->coretemp_label[i] = devm_kasprintf(priv->dev, GFP_KERNEL, "Core %d", i); 412 if (!priv->coretemp_label[i]) 413 return -ENOMEM; 414 } 415 416 return 0; 417 } 418 419 static void check_resolved_cores(struct peci_cputemp *priv) 420 { 421 /* 422 * Failure to resolve cores is non-critical, we're still able to 423 * provide other sensor data. 424 */ 425 426 if (init_core_mask(priv)) 427 return; 428 429 if (create_temp_label(priv)) 430 bitmap_zero(priv->core_mask, CORE_NUMS_MAX); 431 } 432 433 static void sensor_init(struct peci_cputemp *priv) 434 { 435 int i; 436 437 mutex_init(&priv->temp.target.state.lock); 438 mutex_init(&priv->temp.die.state.lock); 439 mutex_init(&priv->temp.dts.state.lock); 440 441 for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX) 442 mutex_init(&priv->temp.core[i].state.lock); 443 } 444 445 static const struct hwmon_ops peci_cputemp_ops = { 446 .is_visible = cputemp_is_visible, 447 .read_string = cputemp_read_string, 448 .read = cputemp_read, 449 }; 450 451 static const struct hwmon_channel_info * const peci_cputemp_info[] = { 452 HWMON_CHANNEL_INFO(temp, 453 /* Die temperature */ 454 HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | 455 HWMON_T_CRIT | HWMON_T_CRIT_HYST, 456 /* DTS margin */ 457 HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | 458 HWMON_T_CRIT | HWMON_T_CRIT_HYST, 459 /* Tcontrol temperature */ 460 HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_CRIT, 461 /* Tthrottle temperature */ 462 HWMON_T_LABEL | HWMON_T_INPUT, 463 /* Tjmax temperature */ 464 HWMON_T_LABEL | HWMON_T_INPUT, 465 /* Core temperature - for all core channels */ 466 [channel_core ... CPUTEMP_CHANNEL_NUMS - 1] = 467 HWMON_T_LABEL | HWMON_T_INPUT), 468 NULL 469 }; 470 471 static const struct hwmon_chip_info peci_cputemp_chip_info = { 472 .ops = &peci_cputemp_ops, 473 .info = peci_cputemp_info, 474 }; 475 476 static int peci_cputemp_probe(struct auxiliary_device *adev, 477 const struct auxiliary_device_id *id) 478 { 479 struct device *dev = &adev->dev; 480 struct peci_device *peci_dev = to_peci_device(dev->parent); 481 struct peci_cputemp *priv; 482 struct device *hwmon_dev; 483 484 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 485 if (!priv) 486 return -ENOMEM; 487 488 priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_cputemp.cpu%d", 489 peci_dev->info.socket_id); 490 if (!priv->name) 491 return -ENOMEM; 492 493 priv->dev = dev; 494 priv->peci_dev = peci_dev; 495 priv->gen_info = (const struct cpu_info *)id->driver_data; 496 497 /* 498 * This is just a sanity check. Since we're using commands that are 499 * guaranteed to be supported on a given platform, we should never see 500 * revision lower than expected. 501 */ 502 if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision) 503 dev_warn(priv->dev, 504 "Unexpected PECI revision %#x, some features may be unavailable\n", 505 peci_dev->info.peci_revision); 506 507 check_resolved_cores(priv); 508 509 sensor_init(priv); 510 511 hwmon_dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, 512 priv, &peci_cputemp_chip_info, NULL); 513 514 return PTR_ERR_OR_ZERO(hwmon_dev); 515 } 516 517 /* 518 * RESOLVED_CORES PCI configuration register may have different location on 519 * different platforms. 520 */ 521 static struct resolved_cores_reg resolved_cores_reg_hsx = { 522 .bus = 1, 523 .dev = 30, 524 .func = 3, 525 .offset = 0xb4, 526 }; 527 528 static struct resolved_cores_reg resolved_cores_reg_icx = { 529 .bus = 14, 530 .dev = 30, 531 .func = 3, 532 .offset = 0xd0, 533 }; 534 535 static struct resolved_cores_reg resolved_cores_reg_spr = { 536 .bus = 31, 537 .dev = 30, 538 .func = 6, 539 .offset = 0x80, 540 }; 541 542 static const struct cpu_info cpu_hsx = { 543 .reg = &resolved_cores_reg_hsx, 544 .min_peci_revision = 0x33, 545 .thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree, 546 }; 547 548 static const struct cpu_info cpu_skx = { 549 .reg = &resolved_cores_reg_hsx, 550 .min_peci_revision = 0x33, 551 .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, 552 }; 553 554 static const struct cpu_info cpu_icx = { 555 .reg = &resolved_cores_reg_icx, 556 .min_peci_revision = 0x40, 557 .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, 558 }; 559 560 static const struct cpu_info cpu_spr = { 561 .reg = &resolved_cores_reg_spr, 562 .min_peci_revision = 0x40, 563 .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, 564 }; 565 566 static const struct auxiliary_device_id peci_cputemp_ids[] = { 567 { 568 .name = "peci_cpu.cputemp.hsx", 569 .driver_data = (kernel_ulong_t)&cpu_hsx, 570 }, 571 { 572 .name = "peci_cpu.cputemp.bdx", 573 .driver_data = (kernel_ulong_t)&cpu_hsx, 574 }, 575 { 576 .name = "peci_cpu.cputemp.bdxd", 577 .driver_data = (kernel_ulong_t)&cpu_hsx, 578 }, 579 { 580 .name = "peci_cpu.cputemp.skx", 581 .driver_data = (kernel_ulong_t)&cpu_skx, 582 }, 583 { 584 .name = "peci_cpu.cputemp.icx", 585 .driver_data = (kernel_ulong_t)&cpu_icx, 586 }, 587 { 588 .name = "peci_cpu.cputemp.icxd", 589 .driver_data = (kernel_ulong_t)&cpu_icx, 590 }, 591 { 592 .name = "peci_cpu.cputemp.spr", 593 .driver_data = (kernel_ulong_t)&cpu_spr, 594 }, 595 { } 596 }; 597 MODULE_DEVICE_TABLE(auxiliary, peci_cputemp_ids); 598 599 static struct auxiliary_driver peci_cputemp_driver = { 600 .probe = peci_cputemp_probe, 601 .id_table = peci_cputemp_ids, 602 }; 603 604 module_auxiliary_driver(peci_cputemp_driver); 605 606 MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>"); 607 MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>"); 608 MODULE_DESCRIPTION("PECI cputemp driver"); 609 MODULE_LICENSE("GPL"); 610 MODULE_IMPORT_NS(PECI_CPU); 611