1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel Core SoC Power Management Controller Driver 4 * 5 * Copyright (c) 2016, Intel Corporation. 6 * All Rights Reserved. 7 * 8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> 9 * Vishwanath Somayaji <vishwanath.somayaji@intel.com> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 enum header_type { 15 HEADER_STATUS, 16 HEADER_VALUE, 17 }; 18 19 #include <linux/bitfield.h> 20 #include <linux/debugfs.h> 21 #include <linux/delay.h> 22 #include <linux/dmi.h> 23 #include <linux/err.h> 24 #include <linux/io.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 #include <linux/slab.h> 28 #include <linux/suspend.h> 29 #include <linux/units.h> 30 31 #include <asm/cpuid/api.h> 32 #include <asm/cpu_device_id.h> 33 #include <asm/intel-family.h> 34 #include <asm/msr.h> 35 #include <asm/tsc.h> 36 37 #include "core.h" 38 #include "ssram_telemetry.h" 39 #include "../pmt/telemetry.h" 40 41 /* Maximum number of modes supported by platfoms that has low power mode capability */ 42 const char *pmc_lpm_modes[] = { 43 "S0i2.0", 44 "S0i2.1", 45 "S0i2.2", 46 "S0i3.0", 47 "S0i3.1", 48 "S0i3.2", 49 "S0i3.3", 50 "S0i3.4", 51 NULL 52 }; 53 54 /* PKGC MSRs are common across Intel Core SoCs */ 55 const struct pmc_bit_map msr_map[] = { 56 {"Package C2", MSR_PKG_C2_RESIDENCY}, 57 {"Package C3", MSR_PKG_C3_RESIDENCY}, 58 {"Package C6", MSR_PKG_C6_RESIDENCY}, 59 {"Package C7", MSR_PKG_C7_RESIDENCY}, 60 {"Package C8", MSR_PKG_C8_RESIDENCY}, 61 {"Package C9", MSR_PKG_C9_RESIDENCY}, 62 {"Package C10", MSR_PKG_C10_RESIDENCY}, 63 {} 64 }; 65 66 static inline u32 pmc_core_reg_read(struct pmc *pmc, int reg_offset) 67 { 68 return readl(pmc->regbase + reg_offset); 69 } 70 71 static inline void pmc_core_reg_write(struct pmc *pmc, int reg_offset, 72 u32 val) 73 { 74 writel(val, pmc->regbase + reg_offset); 75 } 76 77 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc *pmc, u32 value) 78 { 79 /* 80 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are 81 * used as a workaround which uses 30.5 usec tick. All other client 82 * programs have the legacy SLP_S0 residency counter that is using the 122 83 * usec tick. 84 */ 85 const int lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2; 86 87 if (pmc->map == &adl_reg_map) 88 return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2); 89 else 90 return (u64)value * pmc->map->slp_s0_res_counter_step; 91 } 92 93 static int set_etr3(struct pmc_dev *pmcdev) 94 { 95 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 96 const struct pmc_reg_map *map = pmc->map; 97 u32 reg; 98 99 if (!map->etr3_offset) 100 return -EOPNOTSUPP; 101 102 guard(mutex)(&pmcdev->lock); 103 104 /* check if CF9 is locked */ 105 reg = pmc_core_reg_read(pmc, map->etr3_offset); 106 if (reg & ETR3_CF9LOCK) 107 return -EACCES; 108 109 /* write CF9 global reset bit */ 110 reg |= ETR3_CF9GR; 111 pmc_core_reg_write(pmc, map->etr3_offset, reg); 112 113 reg = pmc_core_reg_read(pmc, map->etr3_offset); 114 if (!(reg & ETR3_CF9GR)) 115 return -EIO; 116 117 return 0; 118 } 119 static umode_t etr3_is_visible(struct kobject *kobj, 120 struct attribute *attr, 121 int idx) 122 { 123 struct device *dev = kobj_to_dev(kobj); 124 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 125 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 126 const struct pmc_reg_map *map = pmc->map; 127 u32 reg; 128 129 scoped_guard(mutex, &pmcdev->lock) 130 reg = pmc_core_reg_read(pmc, map->etr3_offset); 131 132 return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode; 133 } 134 135 static ssize_t etr3_show(struct device *dev, 136 struct device_attribute *attr, char *buf) 137 { 138 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 139 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 140 const struct pmc_reg_map *map = pmc->map; 141 u32 reg; 142 143 if (!map->etr3_offset) 144 return -EOPNOTSUPP; 145 146 scoped_guard(mutex, &pmcdev->lock) { 147 reg = pmc_core_reg_read(pmc, map->etr3_offset); 148 reg &= ETR3_CF9GR | ETR3_CF9LOCK; 149 } 150 151 return sysfs_emit(buf, "0x%08x", reg); 152 } 153 154 static ssize_t etr3_store(struct device *dev, 155 struct device_attribute *attr, 156 const char *buf, size_t len) 157 { 158 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 159 int err; 160 u32 reg; 161 162 err = kstrtouint(buf, 16, ®); 163 if (err) 164 return err; 165 166 /* allow only CF9 writes */ 167 if (reg != ETR3_CF9GR) 168 return -EINVAL; 169 170 err = set_etr3(pmcdev); 171 if (err) 172 return err; 173 174 return len; 175 } 176 static DEVICE_ATTR_RW(etr3); 177 178 static struct attribute *pmc_attrs[] = { 179 &dev_attr_etr3.attr, 180 NULL 181 }; 182 183 static const struct attribute_group pmc_attr_group = { 184 .attrs = pmc_attrs, 185 .is_visible = etr3_is_visible, 186 }; 187 188 static const struct attribute_group *pmc_dev_groups[] = { 189 &pmc_attr_group, 190 NULL 191 }; 192 193 static int pmc_core_dev_state_get(void *data, u64 *val) 194 { 195 struct pmc *pmc = data; 196 const struct pmc_reg_map *map = pmc->map; 197 u32 value; 198 199 value = pmc_core_reg_read(pmc, map->slp_s0_offset); 200 *val = pmc_core_adjust_slp_s0_step(pmc, value); 201 202 return 0; 203 } 204 205 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n"); 206 207 static int pmc_core_pson_residency_get(void *data, u64 *val) 208 { 209 struct pmc *pmc = data; 210 const struct pmc_reg_map *map = pmc->map; 211 u32 value; 212 213 value = pmc_core_reg_read(pmc, map->pson_residency_offset); 214 *val = (u64)value * map->pson_residency_counter_step; 215 216 return 0; 217 } 218 219 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency, pmc_core_pson_residency_get, NULL, "%llu\n"); 220 221 static int pmc_core_check_read_lock_bit(struct pmc *pmc) 222 { 223 u32 value; 224 225 value = pmc_core_reg_read(pmc, pmc->map->pm_cfg_offset); 226 return value & BIT(pmc->map->pm_read_disable_bit); 227 } 228 229 static void pmc_core_slps0_display(struct pmc *pmc, struct device *dev, 230 struct seq_file *s) 231 { 232 const struct pmc_bit_map **maps = pmc->map->slps0_dbg_maps; 233 const struct pmc_bit_map *map; 234 int offset = pmc->map->slps0_dbg_offset; 235 u32 data; 236 237 while (*maps) { 238 map = *maps; 239 data = pmc_core_reg_read(pmc, offset); 240 offset += 4; 241 while (map->name) { 242 if (dev) 243 dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n", 244 map->name, 245 data & map->bit_mask ? "Yes" : "No"); 246 if (s) 247 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n", 248 map->name, 249 data & map->bit_mask ? "Yes" : "No"); 250 ++map; 251 } 252 ++maps; 253 } 254 } 255 256 static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps) 257 { 258 unsigned int idx; 259 260 for (idx = 0; maps[idx]; idx++) 261 ;/* Nothing */ 262 263 return idx; 264 } 265 266 static void pmc_core_lpm_display(struct pmc *pmc, struct device *dev, 267 struct seq_file *s, u32 offset, int pmc_index, 268 const char *str, 269 const struct pmc_bit_map **maps) 270 { 271 unsigned int index, idx, len = 32, arr_size; 272 u32 bit_mask, *lpm_regs; 273 274 arr_size = pmc_core_lpm_get_arr_size(maps); 275 lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL); 276 if (!lpm_regs) 277 return; 278 279 for (index = 0; index < arr_size; index++) { 280 lpm_regs[index] = pmc_core_reg_read(pmc, offset); 281 offset += 4; 282 } 283 284 for (idx = 0; idx < arr_size; idx++) { 285 if (dev) 286 dev_info(dev, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx, 287 lpm_regs[idx]); 288 if (s) 289 seq_printf(s, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index, str, idx, 290 lpm_regs[idx]); 291 for (index = 0; maps[idx][index].name && index < len; index++) { 292 bit_mask = maps[idx][index].bit_mask; 293 if (dev) 294 dev_info(dev, "PMC%d:%-30s %-30d\n", pmc_index, 295 maps[idx][index].name, 296 lpm_regs[idx] & bit_mask ? 1 : 0); 297 if (s) 298 seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_index, 299 maps[idx][index].name, 300 lpm_regs[idx] & bit_mask ? 1 : 0); 301 } 302 } 303 304 kfree(lpm_regs); 305 } 306 307 static bool slps0_dbg_latch; 308 309 static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset) 310 { 311 return readb(pmc->regbase + offset); 312 } 313 314 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, 315 int pmc_idx, u8 pf_reg, const struct pmc_bit_map **pf_map) 316 { 317 seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n", 318 pmc_idx, ip, pf_map[idx][index].name, 319 pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); 320 } 321 322 static int pmc_core_ppfear_show(struct seq_file *s, void *unused) 323 { 324 struct pmc_dev *pmcdev = s->private; 325 unsigned int pmc_idx; 326 327 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 328 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 329 const struct pmc_bit_map **maps; 330 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; 331 unsigned int index, iter, idx, ip = 0; 332 333 if (!pmc) 334 continue; 335 336 maps = pmc->map->pfear_sts; 337 iter = pmc->map->ppfear0_offset; 338 339 for (index = 0; index < pmc->map->ppfear_buckets && 340 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) 341 pf_regs[index] = pmc_core_reg_read_byte(pmc, iter); 342 343 for (idx = 0; maps[idx]; idx++) { 344 for (index = 0; maps[idx][index].name && 345 index < pmc->map->ppfear_buckets * 8; ip++, index++) 346 pmc_core_display_map(s, index, idx, ip, pmc_idx, 347 pf_regs[index / 8], maps); 348 } 349 } 350 351 return 0; 352 } 353 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear); 354 355 /* This function should return link status, 0 means ready */ 356 static int pmc_core_mtpmc_link_status(struct pmc *pmc) 357 { 358 u32 value; 359 360 value = pmc_core_reg_read(pmc, SPT_PMC_PM_STS_OFFSET); 361 return value & BIT(SPT_PMC_MSG_FULL_STS_BIT); 362 } 363 364 static int pmc_core_send_msg(struct pmc *pmc, u32 *addr_xram) 365 { 366 u32 dest; 367 int timeout; 368 369 for (timeout = NUM_RETRIES; timeout > 0; timeout--) { 370 if (pmc_core_mtpmc_link_status(pmc) == 0) 371 break; 372 msleep(5); 373 } 374 375 if (timeout <= 0 && pmc_core_mtpmc_link_status(pmc)) 376 return -EBUSY; 377 378 dest = (*addr_xram & MTPMC_MASK) | (1U << 1); 379 pmc_core_reg_write(pmc, SPT_PMC_MTPMC_OFFSET, dest); 380 return 0; 381 } 382 383 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused) 384 { 385 struct pmc_dev *pmcdev = s->private; 386 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 387 const struct pmc_bit_map *map = pmc->map->mphy_sts; 388 u32 mphy_core_reg_low, mphy_core_reg_high; 389 u32 val_low, val_high; 390 unsigned int index; 391 int err = 0; 392 393 if (pmcdev->pmc_xram_read_bit) { 394 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); 395 return 0; 396 } 397 398 mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16); 399 mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16); 400 401 guard(mutex)(&pmcdev->lock); 402 403 err = pmc_core_send_msg(pmc, &mphy_core_reg_low); 404 if (err) 405 return err; 406 407 msleep(10); 408 val_low = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET); 409 410 err = pmc_core_send_msg(pmc, &mphy_core_reg_high); 411 if (err) 412 return err; 413 414 msleep(10); 415 val_high = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET); 416 417 for (index = 0; index < 8 && map[index].name; index++) { 418 seq_printf(s, "%-32s\tState: %s\n", 419 map[index].name, 420 map[index].bit_mask & val_low ? "Not power gated" : 421 "Power gated"); 422 } 423 424 for (index = 8; map[index].name; index++) { 425 seq_printf(s, "%-32s\tState: %s\n", 426 map[index].name, 427 map[index].bit_mask & val_high ? "Not power gated" : 428 "Power gated"); 429 } 430 431 return 0; 432 } 433 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg); 434 435 static int pmc_core_pll_show(struct seq_file *s, void *unused) 436 { 437 struct pmc_dev *pmcdev = s->private; 438 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 439 const struct pmc_bit_map *map = pmc->map->pll_sts; 440 u32 mphy_common_reg, val; 441 unsigned int index; 442 int err = 0; 443 444 if (pmcdev->pmc_xram_read_bit) { 445 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); 446 return 0; 447 } 448 449 mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16); 450 guard(mutex)(&pmcdev->lock); 451 452 err = pmc_core_send_msg(pmc, &mphy_common_reg); 453 if (err) 454 return err; 455 456 /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */ 457 msleep(10); 458 val = pmc_core_reg_read(pmc, SPT_PMC_MFPMC_OFFSET); 459 460 for (index = 0; map[index].name ; index++) { 461 seq_printf(s, "%-32s\tState: %s\n", 462 map[index].name, 463 map[index].bit_mask & val ? "Active" : "Idle"); 464 } 465 466 return 0; 467 } 468 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); 469 470 int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore) 471 { 472 struct pmc *pmc; 473 const struct pmc_reg_map *map; 474 u32 reg; 475 unsigned int pmc_idx; 476 int ltr_index; 477 478 ltr_index = value; 479 /* For platforms with multiple pmcs, ltr index value given by user 480 * is based on the contiguous indexes from ltr_show output. 481 * pmc index and ltr index needs to be calculated from it. 482 */ 483 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_idx++) { 484 pmc = pmcdev->pmcs[pmc_idx]; 485 486 if (!pmc) 487 continue; 488 489 map = pmc->map; 490 if (ltr_index <= map->ltr_ignore_max) 491 break; 492 493 /* Along with IP names, ltr_show map includes CURRENT_PLATFORM 494 * and AGGREGATED_SYSTEM values per PMC. Take these two index 495 * values into account in ltr_index calculation. Also, to start 496 * ltr index from zero for next pmc, subtract it by 1. 497 */ 498 ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1; 499 } 500 501 if (pmc_idx >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0) 502 return -EINVAL; 503 504 pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_idx, ltr_index); 505 506 guard(mutex)(&pmcdev->lock); 507 508 reg = pmc_core_reg_read(pmc, map->ltr_ignore_offset); 509 if (ignore) 510 reg |= BIT(ltr_index); 511 else 512 reg &= ~BIT(ltr_index); 513 pmc_core_reg_write(pmc, map->ltr_ignore_offset, reg); 514 515 return 0; 516 } 517 518 static ssize_t pmc_core_ltr_write(struct pmc_dev *pmcdev, 519 const char __user *userbuf, 520 size_t count, int ignore) 521 { 522 u32 value; 523 int err; 524 525 err = kstrtou32_from_user(userbuf, count, 10, &value); 526 if (err) 527 return err; 528 529 err = pmc_core_send_ltr_ignore(pmcdev, value, ignore); 530 531 return err ?: count; 532 } 533 534 static ssize_t pmc_core_ltr_ignore_write(struct file *file, 535 const char __user *userbuf, 536 size_t count, loff_t *ppos) 537 { 538 struct seq_file *s = file->private_data; 539 struct pmc_dev *pmcdev = s->private; 540 541 return pmc_core_ltr_write(pmcdev, userbuf, count, 1); 542 } 543 544 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused) 545 { 546 return 0; 547 } 548 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore); 549 550 static ssize_t pmc_core_ltr_restore_write(struct file *file, 551 const char __user *userbuf, 552 size_t count, loff_t *ppos) 553 { 554 struct seq_file *s = file->private_data; 555 struct pmc_dev *pmcdev = s->private; 556 557 return pmc_core_ltr_write(pmcdev, userbuf, count, 0); 558 } 559 560 static int pmc_core_ltr_restore_show(struct seq_file *s, void *unused) 561 { 562 return 0; 563 } 564 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore); 565 566 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset) 567 { 568 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 569 const struct pmc_reg_map *map = pmc->map; 570 u32 fd; 571 572 guard(mutex)(&pmcdev->lock); 573 574 if (!reset && !slps0_dbg_latch) 575 return; 576 577 fd = pmc_core_reg_read(pmc, map->slps0_dbg_offset); 578 if (reset) 579 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS; 580 else 581 fd |= CNP_PMC_LATCH_SLPS0_EVENTS; 582 pmc_core_reg_write(pmc, map->slps0_dbg_offset, fd); 583 584 slps0_dbg_latch = false; 585 } 586 587 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused) 588 { 589 struct pmc_dev *pmcdev = s->private; 590 591 pmc_core_slps0_dbg_latch(pmcdev, false); 592 pmc_core_slps0_display(pmcdev->pmcs[PMC_IDX_MAIN], NULL, s); 593 pmc_core_slps0_dbg_latch(pmcdev, true); 594 595 return 0; 596 } 597 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg); 598 599 static u32 convert_ltr_scale(u32 val) 600 { 601 /* 602 * As per PCIE specification supporting document 603 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency 604 * Tolerance Reporting data payload is encoded in a 605 * 3 bit scale and 10 bit value fields. Values are 606 * multiplied by the indicated scale to yield an absolute time 607 * value, expressible in a range from 1 nanosecond to 608 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds. 609 * 610 * scale encoding is as follows: 611 * 612 * ---------------------------------------------- 613 * |scale factor | Multiplier (ns) | 614 * ---------------------------------------------- 615 * | 0 | 1 | 616 * | 1 | 32 | 617 * | 2 | 1024 | 618 * | 3 | 32768 | 619 * | 4 | 1048576 | 620 * | 5 | 33554432 | 621 * | 6 | Invalid | 622 * | 7 | Invalid | 623 * ---------------------------------------------- 624 */ 625 if (val > 5) { 626 pr_warn("Invalid LTR scale factor.\n"); 627 return 0; 628 } 629 630 return 1U << (5 * val); 631 } 632 633 static int pmc_core_ltr_show(struct seq_file *s, void *unused) 634 { 635 struct pmc_dev *pmcdev = s->private; 636 u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val; 637 u32 ltr_raw_data, scale; 638 u16 snoop_ltr, nonsnoop_ltr; 639 unsigned int pmc_idx, index, ltr_index = 0; 640 641 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 642 struct pmc *pmc; 643 const struct pmc_bit_map *map; 644 u32 ltr_ign_reg; 645 646 pmc = pmcdev->pmcs[pmc_idx]; 647 if (!pmc) 648 continue; 649 650 scoped_guard(mutex, &pmcdev->lock) 651 ltr_ign_reg = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset); 652 653 map = pmc->map->ltr_show_sts; 654 for (index = 0; map[index].name; index++) { 655 bool ltr_ign_data; 656 657 if (index > pmc->map->ltr_ignore_max) 658 ltr_ign_data = false; 659 else 660 ltr_ign_data = ltr_ign_reg & BIT(index); 661 662 decoded_snoop_ltr = decoded_non_snoop_ltr = 0; 663 ltr_raw_data = pmc_core_reg_read(pmc, 664 map[index].bit_mask); 665 snoop_ltr = ltr_raw_data & ~MTPMC_MASK; 666 nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK; 667 668 if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) { 669 scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr); 670 val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr); 671 decoded_non_snoop_ltr = val * convert_ltr_scale(scale); 672 } 673 if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) { 674 scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr); 675 val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr); 676 decoded_snoop_ltr = val * convert_ltr_scale(scale); 677 } 678 679 seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n", 680 ltr_index, pmc_idx, map[index].name, ltr_raw_data, 681 decoded_non_snoop_ltr, 682 decoded_snoop_ltr, ltr_ign_data); 683 ltr_index++; 684 } 685 } 686 return 0; 687 } 688 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr); 689 690 static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused) 691 { 692 struct pmc_dev *pmcdev = s->private; 693 unsigned int pmc_idx; 694 695 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { 696 const struct pmc_bit_map **maps; 697 unsigned int arr_size, r_idx; 698 u32 offset, counter; 699 struct pmc *pmc; 700 701 pmc = pmcdev->pmcs[pmc_idx]; 702 if (!pmc) 703 continue; 704 maps = pmc->map->s0ix_blocker_maps; 705 offset = pmc->map->s0ix_blocker_offset; 706 arr_size = pmc_core_lpm_get_arr_size(maps); 707 708 for (r_idx = 0; r_idx < arr_size; r_idx++) { 709 const struct pmc_bit_map *map; 710 711 for (map = maps[r_idx]; map->name; map++) { 712 if (!map->blk) 713 continue; 714 counter = pmc_core_reg_read(pmc, offset); 715 seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_idx, 716 map->name, counter); 717 offset += map->blk * S0IX_BLK_SIZE; 718 } 719 } 720 } 721 return 0; 722 } 723 DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker); 724 725 static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev) 726 { 727 unsigned int pmc_idx; 728 729 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { 730 struct pmc *pmc; 731 u32 ltr_ign; 732 733 pmc = pmcdev->pmcs[pmc_idx]; 734 if (!pmc) 735 continue; 736 737 guard(mutex)(&pmcdev->lock); 738 pmc->ltr_ign = pmc_core_reg_read(pmc, pmc->map->ltr_ignore_offset); 739 740 /* ltr_ignore_max is the max index value for LTR ignore register */ 741 ltr_ign = pmc->ltr_ign | GENMASK(pmc->map->ltr_ignore_max, 0); 742 pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, ltr_ign); 743 } 744 745 /* 746 * Ignoring ME during suspend is blocking platforms with ADL PCH to get to 747 * deeper S0ix substate. 748 */ 749 pmc_core_send_ltr_ignore(pmcdev, 6, 0); 750 } 751 752 static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev) 753 { 754 unsigned int pmc_idx; 755 756 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { 757 struct pmc *pmc; 758 759 pmc = pmcdev->pmcs[pmc_idx]; 760 if (!pmc) 761 continue; 762 763 guard(mutex)(&pmcdev->lock); 764 pmc_core_reg_write(pmc, pmc->map->ltr_ignore_offset, pmc->ltr_ign); 765 } 766 } 767 768 static inline u64 adjust_lpm_residency(struct pmc *pmc, u32 offset, 769 const int lpm_adj_x2) 770 { 771 u64 lpm_res = pmc_core_reg_read(pmc, offset); 772 773 return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res); 774 } 775 776 static int pmc_core_substate_res_show(struct seq_file *s, void *unused) 777 { 778 struct pmc_dev *pmcdev = s->private; 779 unsigned int pmc_idx; 780 781 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 782 int lpm_adj_x2; 783 struct pmc *pmc; 784 u32 offset; 785 u8 mode; 786 787 pmc = pmcdev->pmcs[pmc_idx]; 788 if (!pmc) 789 continue; 790 791 lpm_adj_x2 = pmc->map->lpm_res_counter_step_x2; 792 offset = pmc->map->lpm_residency_offset; 793 794 seq_printf(s, "pmc%u %10s %15s\n", pmc_idx, "Substate", "Residency"); 795 pmc_for_each_mode(mode, pmc) { 796 seq_printf(s, "%15s %15llu\n", pmc_lpm_modes[mode], 797 adjust_lpm_residency(pmc, offset + (4 * mode), lpm_adj_x2)); 798 } 799 } 800 801 return 0; 802 } 803 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res); 804 805 static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused) 806 { 807 struct pmc_dev *pmcdev = s->private; 808 unsigned int pmc_idx; 809 810 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 811 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 812 const struct pmc_bit_map **maps; 813 u32 offset; 814 815 if (!pmc) 816 continue; 817 maps = pmc->map->lpm_sts; 818 offset = pmc->map->lpm_status_offset; 819 pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "STATUS", maps); 820 } 821 822 return 0; 823 } 824 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs); 825 826 static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused) 827 { 828 struct pmc_dev *pmcdev = s->private; 829 unsigned int pmc_idx; 830 831 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 832 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 833 const struct pmc_bit_map **maps; 834 u32 offset; 835 836 if (!pmc) 837 continue; 838 maps = pmc->map->lpm_sts; 839 offset = pmc->map->lpm_live_status_offset; 840 pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "LIVE_STATUS", maps); 841 } 842 843 return 0; 844 } 845 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs); 846 847 static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index, 848 enum header_type type) 849 { 850 struct pmc_dev *pmcdev = s->private; 851 struct pmc *pmc = pmcdev->pmcs[pmc_index]; 852 u8 mode; 853 854 seq_printf(s, "%40s |", "Element"); 855 pmc_for_each_mode(mode, pmc) 856 seq_printf(s, " %9s |", pmc_lpm_modes[mode]); 857 858 if (type == HEADER_STATUS) { 859 seq_printf(s, " %9s |", "Status"); 860 seq_printf(s, " %11s |\n", "Live Status"); 861 } else { 862 seq_printf(s, " %9s |\n", "Value"); 863 } 864 } 865 866 static int pmc_core_substate_blk_req_show(struct seq_file *s, void *unused) 867 { 868 struct pmc_dev *pmcdev = s->private; 869 unsigned int pmc_idx; 870 871 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { 872 const struct pmc_bit_map **maps; 873 unsigned int arr_size, r_idx; 874 u32 offset, counter; 875 u32 *lpm_req_regs; 876 struct pmc *pmc; 877 878 pmc = pmcdev->pmcs[pmc_idx]; 879 if (!pmc || !pmc->lpm_req_regs) 880 continue; 881 882 lpm_req_regs = pmc->lpm_req_regs; 883 maps = pmc->map->s0ix_blocker_maps; 884 offset = pmc->map->s0ix_blocker_offset; 885 arr_size = pmc_core_lpm_get_arr_size(maps); 886 887 /* Display the header */ 888 pmc_core_substate_req_header_show(s, pmc_idx, HEADER_VALUE); 889 890 for (r_idx = 0; r_idx < arr_size; r_idx++) { 891 const struct pmc_bit_map *map; 892 893 for (map = maps[r_idx]; map->name; map++) { 894 u8 mode; 895 896 if (!map->blk) 897 continue; 898 899 counter = pmc_core_reg_read(pmc, offset); 900 seq_printf(s, "pmc%u: %34s |", pmc_idx, map->name); 901 pmc_for_each_mode(mode, pmc) { 902 bool required = *lpm_req_regs & BIT(mode); 903 904 seq_printf(s, " %9s |", required ? "Required" : " "); 905 } 906 seq_printf(s, " %9u |\n", counter); 907 offset += map->blk * S0IX_BLK_SIZE; 908 lpm_req_regs++; 909 } 910 } 911 } 912 return 0; 913 } 914 915 static int pmc_core_substate_blk_req_open(struct inode *inode, struct file *file) 916 { 917 return single_open(file, pmc_core_substate_blk_req_show, inode->i_private); 918 } 919 920 const struct file_operations pmc_core_substate_blk_req_fops = { 921 .owner = THIS_MODULE, 922 .open = pmc_core_substate_blk_req_open, 923 .read = seq_read, 924 .llseek = seq_lseek, 925 .release = single_release, 926 }; 927 928 static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) 929 { 930 struct pmc_dev *pmcdev = s->private; 931 u32 sts_offset; 932 u32 sts_offset_live; 933 u32 *lpm_req_regs; 934 unsigned int mp, pmc_idx; 935 int num_maps; 936 937 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 938 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 939 const struct pmc_bit_map **maps; 940 941 if (!pmc) 942 continue; 943 944 maps = pmc->map->lpm_sts; 945 num_maps = pmc->map->lpm_num_maps; 946 sts_offset = pmc->map->lpm_status_offset; 947 sts_offset_live = pmc->map->lpm_live_status_offset; 948 lpm_req_regs = pmc->lpm_req_regs; 949 950 /* 951 * When there are multiple PMCs, though the PMC may exist, the 952 * requirement register discovery could have failed so check 953 * before accessing. 954 */ 955 if (!lpm_req_regs) 956 continue; 957 958 /* Display the header */ 959 pmc_core_substate_req_header_show(s, pmc_idx, HEADER_STATUS); 960 961 /* Loop over maps */ 962 for (mp = 0; mp < num_maps; mp++) { 963 u32 req_mask = 0; 964 u32 lpm_status; 965 u32 lpm_status_live; 966 const struct pmc_bit_map *map; 967 int i, len = 32; 968 u8 mode; 969 970 /* 971 * Capture the requirements and create a mask so that we only 972 * show an element if it's required for at least one of the 973 * enabled low power modes 974 */ 975 pmc_for_each_mode(mode, pmc) 976 req_mask |= lpm_req_regs[mp + (mode * num_maps)]; 977 978 /* Get the last latched status for this map */ 979 lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4)); 980 981 /* Get the runtime status for this map */ 982 lpm_status_live = pmc_core_reg_read(pmc, sts_offset_live + (mp * 4)); 983 984 /* Loop over elements in this map */ 985 map = maps[mp]; 986 for (i = 0; map[i].name && i < len; i++) { 987 u32 bit_mask = map[i].bit_mask; 988 989 if (!(bit_mask & req_mask)) { 990 /* 991 * Not required for any enabled states 992 * so don't display 993 */ 994 continue; 995 } 996 997 /* Display the element name in the first column */ 998 seq_printf(s, "pmc%d: %34s |", pmc_idx, map[i].name); 999 1000 /* Loop over the enabled states and display if required */ 1001 pmc_for_each_mode(mode, pmc) { 1002 bool required = lpm_req_regs[mp + (mode * num_maps)] & 1003 bit_mask; 1004 seq_printf(s, " %9s |", required ? "Required" : " "); 1005 } 1006 1007 /* In Status column, show the last captured state of this agent */ 1008 seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " "); 1009 1010 /* In Live status column, show the live state of this agent */ 1011 seq_printf(s, " %11s |", lpm_status_live & bit_mask ? "Yes" : " "); 1012 1013 seq_puts(s, "\n"); 1014 } 1015 } 1016 } 1017 return 0; 1018 } 1019 1020 static int pmc_core_substate_req_regs_open(struct inode *inode, struct file *file) 1021 { 1022 return single_open(file, pmc_core_substate_req_regs_show, inode->i_private); 1023 } 1024 1025 const struct file_operations pmc_core_substate_req_regs_fops = { 1026 .owner = THIS_MODULE, 1027 .open = pmc_core_substate_req_regs_open, 1028 .read = seq_read, 1029 .llseek = seq_lseek, 1030 .release = single_release, 1031 }; 1032 1033 static unsigned int pmc_core_get_crystal_freq(void) 1034 { 1035 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; 1036 1037 if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC) 1038 return 0; 1039 1040 eax_denominator = ebx_numerator = ecx_hz = edx = 0; 1041 1042 /* TSC/Crystal ratio, plus optionally Crystal Hz */ 1043 cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); 1044 1045 if (ebx_numerator == 0 || eax_denominator == 0) 1046 return 0; 1047 1048 return ecx_hz; 1049 } 1050 1051 static int pmc_core_die_c6_us_show(struct seq_file *s, void *unused) 1052 { 1053 struct pmc_dev *pmcdev = s->private; 1054 u64 die_c6_res, count; 1055 int ret; 1056 1057 if (!pmcdev->crystal_freq) { 1058 dev_warn_once(&pmcdev->pdev->dev, "Crystal frequency unavailable\n"); 1059 return -ENXIO; 1060 } 1061 1062 ret = pmt_telem_read(pmcdev->punit_ep, pmcdev->die_c6_offset, 1063 &count, 1); 1064 if (ret) 1065 return ret; 1066 1067 die_c6_res = div64_u64(count * HZ_PER_MHZ, pmcdev->crystal_freq); 1068 seq_printf(s, "%llu\n", die_c6_res); 1069 1070 return 0; 1071 } 1072 DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us); 1073 1074 static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) 1075 { 1076 struct pmc_dev *pmcdev = s->private; 1077 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 1078 bool c10; 1079 u32 reg; 1080 u8 mode; 1081 1082 reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset); 1083 if (reg & LPM_STS_LATCH_MODE) { 1084 seq_puts(s, "c10"); 1085 c10 = false; 1086 } else { 1087 seq_puts(s, "[c10]"); 1088 c10 = true; 1089 } 1090 1091 pmc_for_each_mode(mode, pmc) { 1092 if ((BIT(mode) & reg) && !c10) 1093 seq_printf(s, " [%s]", pmc_lpm_modes[mode]); 1094 else 1095 seq_printf(s, " %s", pmc_lpm_modes[mode]); 1096 } 1097 1098 seq_puts(s, " clear\n"); 1099 1100 return 0; 1101 } 1102 1103 static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, 1104 const char __user *userbuf, 1105 size_t count, loff_t *ppos) 1106 { 1107 struct seq_file *s = file->private_data; 1108 struct pmc_dev *pmcdev = s->private; 1109 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 1110 bool clear = false, c10 = false; 1111 unsigned char buf[8]; 1112 int mode; 1113 u32 reg; 1114 u8 m; 1115 1116 if (count > sizeof(buf) - 1) 1117 return -EINVAL; 1118 if (copy_from_user(buf, userbuf, count)) 1119 return -EFAULT; 1120 buf[count] = '\0'; 1121 1122 /* 1123 * Allowed strings are: 1124 * Any enabled substate, e.g. 'S0i2.0' 1125 * 'c10' 1126 * 'clear' 1127 */ 1128 mode = sysfs_match_string(pmc_lpm_modes, buf); 1129 1130 /* Check string matches enabled mode */ 1131 pmc_for_each_mode(m, pmc) 1132 if (mode == m) 1133 break; 1134 1135 if (mode != m || mode < 0) { 1136 if (sysfs_streq(buf, "clear")) 1137 clear = true; 1138 else if (sysfs_streq(buf, "c10")) 1139 c10 = true; 1140 else 1141 return -EINVAL; 1142 } 1143 1144 if (clear) { 1145 guard(mutex)(&pmcdev->lock); 1146 1147 reg = pmc_core_reg_read(pmc, pmc->map->etr3_offset); 1148 reg |= ETR3_CLEAR_LPM_EVENTS; 1149 pmc_core_reg_write(pmc, pmc->map->etr3_offset, reg); 1150 1151 return count; 1152 } 1153 1154 if (c10) { 1155 guard(mutex)(&pmcdev->lock); 1156 1157 reg = pmc_core_reg_read(pmc, pmc->map->lpm_sts_latch_en_offset); 1158 reg &= ~LPM_STS_LATCH_MODE; 1159 pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg); 1160 1161 return count; 1162 } 1163 1164 /* 1165 * For LPM mode latching we set the latch enable bit and selected mode 1166 * and clear everything else. 1167 */ 1168 reg = LPM_STS_LATCH_MODE | BIT(mode); 1169 guard(mutex)(&pmcdev->lock); 1170 pmc_core_reg_write(pmc, pmc->map->lpm_sts_latch_en_offset, reg); 1171 1172 return count; 1173 } 1174 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode); 1175 1176 static int pmc_core_pkgc_show(struct seq_file *s, void *unused) 1177 { 1178 struct pmc *pmc = s->private; 1179 const struct pmc_bit_map *map = pmc->map->msr_sts; 1180 u64 pcstate_count; 1181 unsigned int index; 1182 1183 for (index = 0; map[index].name ; index++) { 1184 if (rdmsrq_safe(map[index].bit_mask, &pcstate_count)) 1185 continue; 1186 1187 pcstate_count *= 1000; 1188 do_div(pcstate_count, tsc_khz); 1189 seq_printf(s, "%-8s : %llu\n", map[index].name, 1190 pcstate_count); 1191 } 1192 1193 return 0; 1194 } 1195 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc); 1196 1197 static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order) 1198 { 1199 unsigned int i, j; 1200 1201 if (!lpm_pri) 1202 return false; 1203 /* 1204 * Each byte contains the priority level for 2 modes (7:4 and 3:0). 1205 * In a 32 bit register this allows for describing 8 modes. Store the 1206 * levels and look for values out of range. 1207 */ 1208 for (i = 0; i < 8; i++) { 1209 int level = lpm_pri & GENMASK(3, 0); 1210 1211 if (level >= LPM_MAX_NUM_MODES) 1212 return false; 1213 1214 mode_order[i] = level; 1215 lpm_pri >>= 4; 1216 } 1217 1218 /* Check that we have unique values */ 1219 for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++) 1220 for (j = i + 1; j < LPM_MAX_NUM_MODES; j++) 1221 if (mode_order[i] == mode_order[j]) 1222 return false; 1223 1224 return true; 1225 } 1226 1227 static void pmc_core_pmc_get_low_power_modes(struct pmc_dev *pmcdev, struct pmc *pmc) 1228 { 1229 u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI; 1230 u8 mode_order[LPM_MAX_NUM_MODES]; 1231 u32 lpm_pri; 1232 u32 lpm_en; 1233 u8 mode; 1234 unsigned int i; 1235 int p; 1236 1237 /* Use LPM Maps to indicate support for substates */ 1238 if (!pmc->map->lpm_num_maps) 1239 return; 1240 1241 lpm_en = pmc_core_reg_read(pmc, pmc->map->lpm_en_offset); 1242 /* For MTL, BIT 31 is not an lpm mode but a enable bit. 1243 * Lower byte is enough to cover the number of lpm modes for all 1244 * platforms and hence mask the upper 3 bytes. 1245 */ 1246 pmc->num_lpm_modes = hweight32(lpm_en & 0xFF); 1247 1248 /* Read 32 bit LPM_PRI register */ 1249 lpm_pri = pmc_core_reg_read(pmc, pmc->map->lpm_priority_offset); 1250 1251 /* 1252 * If lpm_pri value passes verification, then override the default 1253 * modes here. Otherwise stick with the default. 1254 */ 1255 if (pmc_core_pri_verify(lpm_pri, mode_order)) 1256 /* Get list of modes in priority order */ 1257 for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++) 1258 pri_order[mode_order[mode]] = mode; 1259 else 1260 dev_dbg(&pmcdev->pdev->dev, 1261 "Assuming a default substate order for this platform\n"); 1262 1263 /* 1264 * Loop through all modes from lowest to highest priority, 1265 * and capture all enabled modes in order 1266 */ 1267 i = 0; 1268 for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) { 1269 u8 mode = pri_order[p]; 1270 1271 if (!(BIT(mode) & lpm_en)) 1272 continue; 1273 1274 pmc->lpm_en_modes[i++] = mode; 1275 } 1276 } 1277 1278 static void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev) 1279 { 1280 unsigned int pmc_idx; 1281 1282 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { 1283 struct pmc *pmc; 1284 1285 pmc = pmcdev->pmcs[pmc_idx]; 1286 if (!pmc) 1287 continue; 1288 1289 pmc_core_pmc_get_low_power_modes(pmcdev, pmc); 1290 } 1291 } 1292 1293 int get_primary_reg_base(struct pmc *pmc) 1294 { 1295 u64 slp_s0_addr; 1296 1297 if (lpit_read_residency_count_address(&slp_s0_addr)) { 1298 pmc->base_addr = PMC_BASE_ADDR_DEFAULT; 1299 1300 if (page_is_ram(PHYS_PFN(pmc->base_addr))) 1301 return -ENODEV; 1302 } else { 1303 pmc->base_addr = slp_s0_addr - pmc->map->slp_s0_offset; 1304 } 1305 1306 pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length); 1307 if (!pmc->regbase) 1308 return -ENOMEM; 1309 return 0; 1310 } 1311 1312 static struct telem_endpoint *pmc_core_register_endpoint(struct pci_dev *pcidev, u32 *guids) 1313 { 1314 struct telem_endpoint *ep; 1315 unsigned int i; 1316 1317 for (i = 0; guids[i]; i++) { 1318 ep = pmt_telem_find_and_register_endpoint(pcidev, guids[i], 0); 1319 if (!IS_ERR(ep)) 1320 return ep; 1321 } 1322 return ERR_PTR(-ENODEV); 1323 } 1324 1325 void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids) 1326 { 1327 struct telem_endpoint *ep; 1328 struct pci_dev *pcidev; 1329 1330 pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0)); 1331 if (!pcidev) { 1332 dev_err(&pmcdev->pdev->dev, "PUNIT PMT device not found."); 1333 return; 1334 } 1335 1336 ep = pmc_core_register_endpoint(pcidev, guids); 1337 pci_dev_put(pcidev); 1338 if (IS_ERR(ep)) { 1339 dev_err(&pmcdev->pdev->dev, 1340 "pmc_core: couldn't get DMU telem endpoint %ld", 1341 PTR_ERR(ep)); 1342 return; 1343 } 1344 1345 pmcdev->punit_ep = ep; 1346 pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET; 1347 } 1348 1349 void pmc_core_set_device_d3(unsigned int device) 1350 { 1351 struct pci_dev *pcidev; 1352 1353 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); 1354 if (pcidev) { 1355 if (!device_trylock(&pcidev->dev)) { 1356 pci_dev_put(pcidev); 1357 return; 1358 } 1359 if (!pcidev->dev.driver) { 1360 dev_info(&pcidev->dev, "Setting to D3hot\n"); 1361 pci_set_power_state(pcidev, PCI_D3hot); 1362 } 1363 device_unlock(&pcidev->dev); 1364 pci_dev_put(pcidev); 1365 } 1366 } 1367 1368 static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev) 1369 { 1370 struct platform_device *pdev = pmcdev->pdev; 1371 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); 1372 u8 val; 1373 1374 if (!adev) 1375 return false; 1376 1377 if (fwnode_property_read_u8(acpi_fwnode_handle(adev), 1378 "intel-cec-pson-switching-enabled-in-s0", 1379 &val)) 1380 return false; 1381 1382 return val == 1; 1383 } 1384 1385 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) 1386 { 1387 debugfs_remove_recursive(pmcdev->dbgfs_dir); 1388 } 1389 1390 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) 1391 { 1392 struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 1393 struct dentry *dir; 1394 1395 dir = debugfs_create_dir("pmc_core", NULL); 1396 pmcdev->dbgfs_dir = dir; 1397 1398 debugfs_create_file("slp_s0_residency_usec", 0444, dir, primary_pmc, 1399 &pmc_core_dev_state); 1400 1401 if (primary_pmc->map->pfear_sts) 1402 debugfs_create_file("pch_ip_power_gating_status", 0444, dir, 1403 pmcdev, &pmc_core_ppfear_fops); 1404 1405 debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, 1406 &pmc_core_ltr_ignore_fops); 1407 1408 debugfs_create_file("ltr_restore", 0200, dir, pmcdev, &pmc_core_ltr_restore_fops); 1409 1410 debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops); 1411 1412 if (primary_pmc->map->s0ix_blocker_maps) 1413 debugfs_create_file("s0ix_blocker", 0444, dir, pmcdev, &pmc_core_s0ix_blocker_fops); 1414 1415 debugfs_create_file("package_cstate_show", 0444, dir, primary_pmc, 1416 &pmc_core_pkgc_fops); 1417 1418 if (primary_pmc->map->pll_sts) 1419 debugfs_create_file("pll_status", 0444, dir, pmcdev, 1420 &pmc_core_pll_fops); 1421 1422 if (primary_pmc->map->mphy_sts) 1423 debugfs_create_file("mphy_core_lanes_power_gating_status", 1424 0444, dir, pmcdev, 1425 &pmc_core_mphy_pg_fops); 1426 1427 if (primary_pmc->map->slps0_dbg_maps) { 1428 debugfs_create_file("slp_s0_debug_status", 0444, 1429 dir, pmcdev, 1430 &pmc_core_slps0_dbg_fops); 1431 1432 debugfs_create_bool("slp_s0_dbg_latch", 0644, 1433 dir, &slps0_dbg_latch); 1434 } 1435 1436 if (primary_pmc->map->lpm_en_offset) { 1437 debugfs_create_file("substate_residencies", 0444, 1438 pmcdev->dbgfs_dir, pmcdev, 1439 &pmc_core_substate_res_fops); 1440 } 1441 1442 if (primary_pmc->map->lpm_status_offset) { 1443 debugfs_create_file("substate_status_registers", 0444, 1444 pmcdev->dbgfs_dir, pmcdev, 1445 &pmc_core_substate_sts_regs_fops); 1446 debugfs_create_file("substate_live_status_registers", 0444, 1447 pmcdev->dbgfs_dir, pmcdev, 1448 &pmc_core_substate_l_sts_regs_fops); 1449 debugfs_create_file("lpm_latch_mode", 0644, 1450 pmcdev->dbgfs_dir, pmcdev, 1451 &pmc_core_lpm_latch_mode_fops); 1452 } 1453 1454 if (primary_pmc->lpm_req_regs) { 1455 debugfs_create_file("substate_requirements", 0444, 1456 pmcdev->dbgfs_dir, pmcdev, 1457 pmc_dev_info->sub_req_show); 1458 } 1459 1460 if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) { 1461 debugfs_create_file("pson_residency_usec", 0444, 1462 pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency); 1463 } 1464 1465 if (pmcdev->punit_ep) { 1466 debugfs_create_file("die_c6_us_show", 0444, 1467 pmcdev->dbgfs_dir, pmcdev, 1468 &pmc_core_die_c6_us_fops); 1469 } 1470 } 1471 1472 /* 1473 * This function retrieves low power mode requirement data from PMC Low 1474 * Power Mode (LPM) table. 1475 * 1476 * In telemetry space, the LPM table contains a 4 byte header followed 1477 * by 8 consecutive mode blocks (one for each LPM mode). Each block 1478 * has a 4 byte header followed by a set of registers that describe the 1479 * IP state requirements for the given mode. The IP mapping is platform 1480 * specific but the same for each block, making for easy analysis. 1481 * Platforms only use a subset of the space to track the requirements 1482 * for their IPs. Callers provide the requirement registers they use as 1483 * a list of indices. Each requirement register is associated with an 1484 * IP map that's maintained by the caller. 1485 * 1486 * Header 1487 * +----+----------------------------+----------------------------+ 1488 * | 0 | REVISION | ENABLED MODES | 1489 * +----+--------------+-------------+-------------+--------------+ 1490 * 1491 * Low Power Mode 0 Block 1492 * +----+--------------+-------------+-------------+--------------+ 1493 * | 1 | SUB ID | SIZE | MAJOR | MINOR | 1494 * +----+--------------+-------------+-------------+--------------+ 1495 * | 2 | LPM0 Requirements 0 | 1496 * +----+---------------------------------------------------------+ 1497 * | | ... | 1498 * +----+---------------------------------------------------------+ 1499 * | 29 | LPM0 Requirements 27 | 1500 * +----+---------------------------------------------------------+ 1501 * 1502 * ... 1503 * 1504 * Low Power Mode 7 Block 1505 * +----+--------------+-------------+-------------+--------------+ 1506 * | | SUB ID | SIZE | MAJOR | MINOR | 1507 * +----+--------------+-------------+-------------+--------------+ 1508 * | 60 | LPM7 Requirements 0 | 1509 * +----+---------------------------------------------------------+ 1510 * | | ... | 1511 * +----+---------------------------------------------------------+ 1512 * | 87 | LPM7 Requirements 27 | 1513 * +----+---------------------------------------------------------+ 1514 * 1515 */ 1516 int pmc_core_pmt_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep) 1517 { 1518 const u8 *lpm_indices; 1519 int num_maps, mode_offset = 0; 1520 int ret, lpm_size; 1521 u8 mode; 1522 1523 lpm_indices = pmc->map->lpm_reg_index; 1524 num_maps = pmc->map->lpm_num_maps; 1525 lpm_size = LPM_MAX_NUM_MODES * num_maps; 1526 1527 pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev, 1528 lpm_size * sizeof(u32), 1529 GFP_KERNEL); 1530 if (!pmc->lpm_req_regs) 1531 return -ENOMEM; 1532 1533 mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET; 1534 pmc_for_each_mode(mode, pmc) { 1535 u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps); 1536 int m; 1537 1538 for (m = 0; m < num_maps; m++) { 1539 u8 sample_id = lpm_indices[m] + mode_offset; 1540 1541 ret = pmt_telem_read32(ep, sample_id, req_offset, 1); 1542 if (ret) { 1543 dev_err(&pmcdev->pdev->dev, 1544 "couldn't read Low Power Mode requirements: %d\n", ret); 1545 return ret; 1546 } 1547 ++req_offset; 1548 } 1549 mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET; 1550 } 1551 return ret; 1552 } 1553 1554 int pmc_core_pmt_get_blk_sub_req(struct pmc_dev *pmcdev, struct pmc *pmc, 1555 struct telem_endpoint *ep) 1556 { 1557 u32 num_blocker, sample_offset; 1558 unsigned int index; 1559 u32 *req_offset; 1560 int ret; 1561 1562 num_blocker = pmc->map->num_s0ix_blocker; 1563 sample_offset = pmc->map->blocker_req_offset; 1564 1565 pmc->lpm_req_regs = devm_kcalloc(&pmcdev->pdev->dev, num_blocker, 1566 sizeof(u32), GFP_KERNEL); 1567 if (!pmc->lpm_req_regs) 1568 return -ENOMEM; 1569 1570 req_offset = pmc->lpm_req_regs; 1571 for (index = 0; index < num_blocker; index++, req_offset++) { 1572 ret = pmt_telem_read32(ep, index + sample_offset, req_offset, 1); 1573 if (ret) { 1574 dev_err(&pmcdev->pdev->dev, 1575 "couldn't read Low Power Mode requirements: %d\n", ret); 1576 return ret; 1577 } 1578 } 1579 return 0; 1580 } 1581 1582 static int pmc_core_get_telem_info(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) 1583 { 1584 struct pci_dev *pcidev __free(pci_dev_put) = NULL; 1585 struct telem_endpoint *ep; 1586 unsigned int pmc_idx; 1587 int ret; 1588 1589 pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, pmc_dev_info->pci_func)); 1590 if (!pcidev) 1591 return -ENODEV; 1592 1593 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 1594 struct pmc *pmc; 1595 1596 pmc = pmcdev->pmcs[pmc_idx]; 1597 if (!pmc) 1598 continue; 1599 1600 if (!pmc->map->lpm_req_guid) 1601 return -ENXIO; 1602 1603 ep = pmt_telem_find_and_register_endpoint(pcidev, pmc->map->lpm_req_guid, 0); 1604 if (IS_ERR(ep)) { 1605 dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep); 1606 return -EPROBE_DEFER; 1607 } 1608 1609 ret = pmc_dev_info->sub_req(pmcdev, pmc, ep); 1610 pmt_telem_unregister_endpoint(ep); 1611 if (ret) 1612 return ret; 1613 } 1614 1615 return 0; 1616 } 1617 1618 static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid) 1619 { 1620 for (; list->map; ++list) 1621 if (devid == list->devid) 1622 return list->map; 1623 1624 return NULL; 1625 } 1626 1627 static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_idx) 1628 1629 { 1630 struct pmc_ssram_telemetry pmc_ssram_telemetry; 1631 const struct pmc_reg_map *map; 1632 struct pmc *pmc; 1633 int ret; 1634 1635 ret = pmc_ssram_telemetry_get_pmc_info(pmc_idx, &pmc_ssram_telemetry); 1636 if (ret) 1637 return ret; 1638 1639 map = pmc_core_find_regmap(pmcdev->regmap_list, pmc_ssram_telemetry.devid); 1640 if (!map) 1641 return -ENODEV; 1642 1643 pmc = pmcdev->pmcs[pmc_idx]; 1644 /* Memory for primary PMC has been allocated */ 1645 if (!pmc) { 1646 pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL); 1647 if (!pmc) 1648 return -ENOMEM; 1649 } 1650 1651 pmc->map = map; 1652 pmc->base_addr = pmc_ssram_telemetry.base_addr; 1653 pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length); 1654 1655 if (!pmc->regbase) { 1656 devm_kfree(&pmcdev->pdev->dev, pmc); 1657 return -ENOMEM; 1658 } 1659 1660 pmcdev->pmcs[pmc_idx] = pmc; 1661 1662 return 0; 1663 } 1664 1665 static int pmc_core_ssram_get_reg_base(struct pmc_dev *pmcdev) 1666 { 1667 int ret; 1668 1669 ret = pmc_core_pmc_add(pmcdev, PMC_IDX_MAIN); 1670 if (ret) 1671 return ret; 1672 1673 pmc_core_pmc_add(pmcdev, PMC_IDX_IOE); 1674 pmc_core_pmc_add(pmcdev, PMC_IDX_PCH); 1675 1676 return 0; 1677 } 1678 1679 /* 1680 * When supported, ssram init is used to achieve all available PMCs. 1681 * If ssram init fails, this function uses legacy method to at least get the 1682 * primary PMC. 1683 */ 1684 int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) 1685 { 1686 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 1687 bool ssram; 1688 int ret; 1689 1690 pmcdev->suspend = pmc_dev_info->suspend; 1691 pmcdev->resume = pmc_dev_info->resume; 1692 1693 ssram = pmc_dev_info->regmap_list != NULL; 1694 if (ssram) { 1695 pmcdev->regmap_list = pmc_dev_info->regmap_list; 1696 ret = pmc_core_ssram_get_reg_base(pmcdev); 1697 /* 1698 * EAGAIN error code indicates Intel PMC SSRAM Telemetry driver 1699 * has not finished probe and PMC info is not available yet. Try 1700 * again later. 1701 */ 1702 if (ret == -EAGAIN) 1703 return -EPROBE_DEFER; 1704 1705 if (ret) { 1706 dev_warn(&pmcdev->pdev->dev, 1707 "Failed to get PMC info from SSRAM, %d, using legacy init\n", ret); 1708 ssram = false; 1709 } 1710 } 1711 1712 if (!ssram) { 1713 pmc->map = pmc_dev_info->map; 1714 ret = get_primary_reg_base(pmc); 1715 if (ret) 1716 return ret; 1717 } 1718 1719 pmc_core_get_low_power_modes(pmcdev); 1720 if (pmc_dev_info->dmu_guids) 1721 pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guids); 1722 1723 if (ssram) { 1724 ret = pmc_core_get_telem_info(pmcdev, pmc_dev_info); 1725 if (ret) 1726 goto unmap_regbase; 1727 } 1728 1729 return 0; 1730 1731 unmap_regbase: 1732 for (unsigned int pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 1733 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 1734 1735 if (pmc && pmc->regbase) 1736 iounmap(pmc->regbase); 1737 } 1738 1739 if (pmcdev->punit_ep) 1740 pmt_telem_unregister_endpoint(pmcdev->punit_ep); 1741 1742 return ret; 1743 } 1744 1745 static const struct x86_cpu_id intel_pmc_core_ids[] = { 1746 X86_MATCH_VFM(INTEL_SKYLAKE_L, &spt_pmc_dev), 1747 X86_MATCH_VFM(INTEL_SKYLAKE, &spt_pmc_dev), 1748 X86_MATCH_VFM(INTEL_KABYLAKE_L, &spt_pmc_dev), 1749 X86_MATCH_VFM(INTEL_KABYLAKE, &spt_pmc_dev), 1750 X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnp_pmc_dev), 1751 X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_pmc_dev), 1752 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_pmc_dev), 1753 X86_MATCH_VFM(INTEL_COMETLAKE, &cnp_pmc_dev), 1754 X86_MATCH_VFM(INTEL_COMETLAKE_L, &cnp_pmc_dev), 1755 X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_pmc_dev), 1756 X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_pmc_dev), 1757 X86_MATCH_VFM(INTEL_ATOM_TREMONT, &tgl_l_pmc_dev), 1758 X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &icl_pmc_dev), 1759 X86_MATCH_VFM(INTEL_ROCKETLAKE, &tgl_pmc_dev), 1760 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &tgl_l_pmc_dev), 1761 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &tgl_l_pmc_dev), 1762 X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_pmc_dev), 1763 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &tgl_l_pmc_dev), 1764 X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_pmc_dev), 1765 X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_pmc_dev), 1766 X86_MATCH_VFM(INTEL_BARTLETTLAKE, &adl_pmc_dev), 1767 X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_pmc_dev), 1768 X86_MATCH_VFM(INTEL_ARROWLAKE, &arl_pmc_dev), 1769 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &arl_h_pmc_dev), 1770 X86_MATCH_VFM(INTEL_ARROWLAKE_U, &arl_h_pmc_dev), 1771 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_pmc_dev), 1772 X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_pmc_dev), 1773 X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &wcl_pmc_dev), 1774 {} 1775 }; 1776 1777 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); 1778 1779 /* 1780 * This quirk can be used on those platforms where 1781 * the platform BIOS enforces 24Mhz crystal to shutdown 1782 * before PMC can assert SLP_S0#. 1783 */ 1784 static bool xtal_ignore; 1785 static int quirk_xtal_ignore(const struct dmi_system_id *id) 1786 { 1787 xtal_ignore = true; 1788 return 0; 1789 } 1790 1791 static void pmc_core_xtal_ignore(struct pmc *pmc) 1792 { 1793 u32 value; 1794 1795 value = pmc_core_reg_read(pmc, pmc->map->pm_vric1_offset); 1796 /* 24MHz Crystal Shutdown Qualification Disable */ 1797 value |= SPT_PMC_VRIC1_XTALSDQDIS; 1798 /* Low Voltage Mode Enable */ 1799 value &= ~SPT_PMC_VRIC1_SLPS0LVEN; 1800 pmc_core_reg_write(pmc, pmc->map->pm_vric1_offset, value); 1801 } 1802 1803 static const struct dmi_system_id pmc_core_dmi_table[] = { 1804 { 1805 .callback = quirk_xtal_ignore, 1806 .ident = "HP Elite x2 1013 G3", 1807 .matches = { 1808 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1809 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"), 1810 }, 1811 }, 1812 {} 1813 }; 1814 1815 static void pmc_core_do_dmi_quirks(struct pmc *pmc) 1816 { 1817 dmi_check_system(pmc_core_dmi_table); 1818 1819 if (xtal_ignore) 1820 pmc_core_xtal_ignore(pmc); 1821 } 1822 1823 static void pmc_core_clean_structure(struct platform_device *pdev) 1824 { 1825 struct pmc_dev *pmcdev = platform_get_drvdata(pdev); 1826 unsigned int pmc_idx; 1827 1828 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 1829 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 1830 1831 if (pmc && pmc->regbase) 1832 iounmap(pmc->regbase); 1833 } 1834 1835 if (pmcdev->punit_ep) 1836 pmt_telem_unregister_endpoint(pmcdev->punit_ep); 1837 1838 platform_set_drvdata(pdev, NULL); 1839 } 1840 1841 static int pmc_core_probe(struct platform_device *pdev) 1842 { 1843 static bool device_initialized; 1844 struct pmc_dev *pmcdev; 1845 const struct x86_cpu_id *cpu_id; 1846 struct pmc_dev_info *pmc_dev_info; 1847 struct pmc *primary_pmc; 1848 int ret; 1849 1850 if (device_initialized) 1851 return -ENODEV; 1852 1853 pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL); 1854 if (!pmcdev) 1855 return -ENOMEM; 1856 1857 pmcdev->crystal_freq = pmc_core_get_crystal_freq(); 1858 1859 platform_set_drvdata(pdev, pmcdev); 1860 pmcdev->pdev = pdev; 1861 1862 cpu_id = x86_match_cpu(intel_pmc_core_ids); 1863 if (!cpu_id) 1864 return -ENODEV; 1865 1866 pmc_dev_info = (struct pmc_dev_info *)cpu_id->driver_data; 1867 1868 /* Primary PMC */ 1869 primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL); 1870 if (!primary_pmc) 1871 return -ENOMEM; 1872 pmcdev->pmcs[PMC_IDX_MAIN] = primary_pmc; 1873 1874 /* The last element in msr_map is empty */ 1875 pmcdev->num_of_pkgc = ARRAY_SIZE(msr_map) - 1; 1876 pmcdev->pkgc_res_cnt = devm_kcalloc(&pdev->dev, 1877 pmcdev->num_of_pkgc, 1878 sizeof(*pmcdev->pkgc_res_cnt), 1879 GFP_KERNEL); 1880 if (!pmcdev->pkgc_res_cnt) 1881 return -ENOMEM; 1882 1883 ret = devm_mutex_init(&pdev->dev, &pmcdev->lock); 1884 if (ret) 1885 return ret; 1886 1887 if (pmc_dev_info->init) 1888 ret = pmc_dev_info->init(pmcdev, pmc_dev_info); 1889 else 1890 ret = generic_core_init(pmcdev, pmc_dev_info); 1891 1892 if (ret) { 1893 platform_set_drvdata(pdev, NULL); 1894 return ret; 1895 } 1896 1897 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc); 1898 pmc_core_do_dmi_quirks(primary_pmc); 1899 1900 pmc_core_dbgfs_register(pmcdev, pmc_dev_info); 1901 pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) * 1902 pmc_core_adjust_slp_s0_step(primary_pmc, 1)); 1903 1904 device_initialized = true; 1905 dev_info(&pdev->dev, " initialized\n"); 1906 1907 return 0; 1908 } 1909 1910 static void pmc_core_remove(struct platform_device *pdev) 1911 { 1912 struct pmc_dev *pmcdev = platform_get_drvdata(pdev); 1913 pmc_core_dbgfs_unregister(pmcdev); 1914 pmc_core_clean_structure(pdev); 1915 } 1916 1917 static bool warn_on_s0ix_failures; 1918 module_param(warn_on_s0ix_failures, bool, 0644); 1919 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); 1920 1921 static bool ltr_ignore_all_suspend = true; 1922 module_param(ltr_ignore_all_suspend, bool, 0644); 1923 MODULE_PARM_DESC(ltr_ignore_all_suspend, "Ignore all LTRs during suspend"); 1924 1925 static __maybe_unused int pmc_core_suspend(struct device *dev) 1926 { 1927 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 1928 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 1929 unsigned int i; 1930 1931 if (pmcdev->suspend) 1932 pmcdev->suspend(pmcdev); 1933 1934 if (ltr_ignore_all_suspend) 1935 pmc_core_ltr_ignore_all(pmcdev); 1936 1937 /* Check if the syspend will actually use S0ix */ 1938 if (pm_suspend_via_firmware()) 1939 return 0; 1940 1941 /* Save PKGC residency for checking later */ 1942 for (i = 0; i < pmcdev->num_of_pkgc; i++) { 1943 if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i])) 1944 return -EIO; 1945 } 1946 1947 /* Save S0ix residency for checking later */ 1948 if (pmc_core_dev_state_get(pmc, &pmcdev->s0ix_counter)) 1949 return -EIO; 1950 1951 return 0; 1952 } 1953 1954 static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev) 1955 { 1956 u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask; 1957 u64 deepest_pkgc_residency; 1958 1959 if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency)) 1960 return false; 1961 1962 if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]) 1963 return true; 1964 1965 return false; 1966 } 1967 1968 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev) 1969 { 1970 u64 s0ix_counter; 1971 1972 if (pmc_core_dev_state_get(pmcdev->pmcs[PMC_IDX_MAIN], &s0ix_counter)) 1973 return false; 1974 1975 pm_report_hw_sleep_time((u32)(s0ix_counter - pmcdev->s0ix_counter)); 1976 1977 if (s0ix_counter == pmcdev->s0ix_counter) 1978 return true; 1979 1980 return false; 1981 } 1982 1983 int pmc_core_resume_common(struct pmc_dev *pmcdev) 1984 { 1985 struct device *dev = &pmcdev->pdev->dev; 1986 struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; 1987 const struct pmc_bit_map **maps = pmc->map->lpm_sts; 1988 int offset = pmc->map->lpm_status_offset; 1989 unsigned int pmc_idx, i; 1990 1991 /* Check if the syspend used S0ix */ 1992 if (pm_suspend_via_firmware()) 1993 return 0; 1994 1995 if (!pmc_core_is_s0ix_failed(pmcdev)) 1996 return 0; 1997 1998 if (!warn_on_s0ix_failures) 1999 return 0; 2000 2001 if (pmc_core_is_deepest_pkgc_failed(pmcdev)) { 2002 /* S0ix failed because of deepest PKGC entry failure */ 2003 dev_info(dev, "CPU did not enter %s!!! (%s cnt=0x%llx)\n", 2004 msr_map[pmcdev->num_of_pkgc - 1].name, 2005 msr_map[pmcdev->num_of_pkgc - 1].name, 2006 pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1]); 2007 2008 for (i = 0; i < pmcdev->num_of_pkgc; i++) { 2009 u64 pc_cnt; 2010 2011 if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) { 2012 dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n", 2013 msr_map[i].name, pmcdev->pkgc_res_cnt[i], 2014 msr_map[i].name, pc_cnt); 2015 } 2016 } 2017 return 0; 2018 } 2019 2020 /* The real interesting case - S0ix failed - lets ask PMC why. */ 2021 dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n", 2022 pmcdev->s0ix_counter); 2023 2024 if (pmc->map->slps0_dbg_maps) 2025 pmc_core_slps0_display(pmc, dev, NULL); 2026 2027 for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { 2028 struct pmc *pmc = pmcdev->pmcs[pmc_idx]; 2029 2030 if (!pmc) 2031 continue; 2032 if (pmc->map->lpm_sts) 2033 pmc_core_lpm_display(pmc, dev, NULL, offset, pmc_idx, "STATUS", maps); 2034 } 2035 2036 return 0; 2037 } 2038 2039 static __maybe_unused int pmc_core_resume(struct device *dev) 2040 { 2041 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 2042 2043 if (ltr_ignore_all_suspend) 2044 pmc_core_ltr_restore_all(pmcdev); 2045 2046 if (pmcdev->resume) 2047 return pmcdev->resume(pmcdev); 2048 2049 return pmc_core_resume_common(pmcdev); 2050 } 2051 2052 static const struct dev_pm_ops pmc_core_pm_ops = { 2053 SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume) 2054 }; 2055 2056 static const struct acpi_device_id pmc_core_acpi_ids[] = { 2057 {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/ 2058 { } 2059 }; 2060 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids); 2061 2062 static struct platform_driver pmc_core_driver = { 2063 .driver = { 2064 .name = "intel_pmc_core", 2065 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids), 2066 .pm = &pmc_core_pm_ops, 2067 .dev_groups = pmc_dev_groups, 2068 }, 2069 .probe = pmc_core_probe, 2070 .remove = pmc_core_remove, 2071 }; 2072 2073 module_platform_driver(pmc_core_driver); 2074 2075 MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY"); 2076 MODULE_LICENSE("GPL v2"); 2077 MODULE_DESCRIPTION("Intel PMC Core Driver"); 2078