1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD SoC Power Management Controller Driver 4 * 5 * Copyright (c) 2020, Advanced Micro Devices, Inc. 6 * All Rights Reserved. 7 * 8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/acpi.h> 14 #include <linux/bitfield.h> 15 #include <linux/bits.h> 16 #include <linux/debugfs.h> 17 #include <linux/delay.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/limits.h> 21 #include <linux/module.h> 22 #include <linux/pci.h> 23 #include <linux/platform_device.h> 24 #include <linux/rtc.h> 25 #include <linux/serio.h> 26 #include <linux/suspend.h> 27 #include <linux/seq_file.h> 28 #include <linux/uaccess.h> 29 30 #include <asm/amd_node.h> 31 32 #include "pmc.h" 33 34 /* SMU communication registers */ 35 #define AMD_PMC_REGISTER_RESPONSE 0x980 36 #define AMD_PMC_REGISTER_ARGUMENT 0x9BC 37 38 /* PMC Scratch Registers */ 39 #define AMD_PMC_SCRATCH_REG_CZN 0x94 40 #define AMD_PMC_SCRATCH_REG_YC 0xD14 41 #define AMD_PMC_SCRATCH_REG_1AH 0xF14 42 43 /* STB Registers */ 44 #define AMD_PMC_STB_PMI_0 0x03E30600 45 #define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001 46 #define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002 47 #define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003 48 #define AMD_PMC_STB_DUMMY_PC 0xC6000007 49 50 /* STB S2D(Spill to DRAM) has different message port offset */ 51 #define AMD_S2D_REGISTER_MESSAGE 0xA20 52 #define AMD_S2D_REGISTER_RESPONSE 0xA80 53 #define AMD_S2D_REGISTER_ARGUMENT 0xA88 54 55 /* STB Spill to DRAM Parameters */ 56 #define S2D_TELEMETRY_BYTES_MAX 0x100000U 57 #define S2D_RSVD_RAM_SPACE 0x100000 58 #define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000 59 60 /* STB Spill to DRAM Message Definition */ 61 #define STB_FORCE_FLUSH_DATA 0xCF 62 63 /* Base address of SMU for mapping physical address to virtual address */ 64 #define AMD_PMC_MAPPING_SIZE 0x01000 65 #define AMD_PMC_BASE_ADDR_OFFSET 0x10000 66 #define AMD_PMC_BASE_ADDR_LO 0x13B102E8 67 #define AMD_PMC_BASE_ADDR_HI 0x13B102EC 68 #define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0) 69 #define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20) 70 71 /* SMU Response Codes */ 72 #define AMD_PMC_RESULT_OK 0x01 73 #define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC 74 #define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD 75 #define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE 76 #define AMD_PMC_RESULT_FAILED 0xFF 77 78 /* FCH SSC Registers */ 79 #define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30 80 #define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34 81 #define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38 82 #define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C 83 #define FCH_SSC_MAPPING_SIZE 0x800 84 #define FCH_BASE_PHY_ADDR_LOW 0xFED81100 85 #define FCH_BASE_PHY_ADDR_HIGH 0x00000000 86 87 /* SMU Message Definations */ 88 #define SMU_MSG_GETSMUVERSION 0x02 89 #define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04 90 #define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05 91 #define SMU_MSG_LOG_START 0x06 92 #define SMU_MSG_LOG_RESET 0x07 93 #define SMU_MSG_LOG_DUMP_DATA 0x08 94 #define SMU_MSG_GET_SUP_CONSTRAINTS 0x09 95 96 #define PMC_MSG_DELAY_MIN_US 50 97 #define RESPONSE_REGISTER_LOOP_MAX 20000 98 99 #define DELAY_MIN_US 2000 100 #define DELAY_MAX_US 3000 101 #define FIFO_SIZE 4096 102 103 enum amd_pmc_def { 104 MSG_TEST = 0x01, 105 MSG_OS_HINT_PCO, 106 MSG_OS_HINT_RN, 107 }; 108 109 enum s2d_arg { 110 S2D_TELEMETRY_SIZE = 0x01, 111 S2D_PHYS_ADDR_LOW, 112 S2D_PHYS_ADDR_HIGH, 113 S2D_NUM_SAMPLES, 114 S2D_DRAM_SIZE, 115 }; 116 117 struct amd_pmc_stb_v2_data { 118 size_t size; 119 u8 data[] __counted_by(size); 120 }; 121 122 struct amd_pmc_bit_map { 123 const char *name; 124 u32 bit_mask; 125 }; 126 127 static const struct amd_pmc_bit_map soc15_ip_blk[] = { 128 {"DISPLAY", BIT(0)}, 129 {"CPU", BIT(1)}, 130 {"GFX", BIT(2)}, 131 {"VDD", BIT(3)}, 132 {"ACP", BIT(4)}, 133 {"VCN", BIT(5)}, 134 {"ISP", BIT(6)}, 135 {"NBIO", BIT(7)}, 136 {"DF", BIT(8)}, 137 {"USB3_0", BIT(9)}, 138 {"USB3_1", BIT(10)}, 139 {"LAPIC", BIT(11)}, 140 {"USB3_2", BIT(12)}, 141 {"USB3_3", BIT(13)}, 142 {"USB3_4", BIT(14)}, 143 {"USB4_0", BIT(15)}, 144 {"USB4_1", BIT(16)}, 145 {"MPM", BIT(17)}, 146 {"JPEG", BIT(18)}, 147 {"IPU", BIT(19)}, 148 {"UMSCH", BIT(20)}, 149 {"VPE", BIT(21)}, 150 {} 151 }; 152 153 static bool enable_stb; 154 module_param(enable_stb, bool, 0644); 155 MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); 156 157 static bool disable_workarounds; 158 module_param(disable_workarounds, bool, 0644); 159 MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs"); 160 161 static bool dump_custom_stb; 162 module_param(dump_custom_stb, bool, 0644); 163 MODULE_PARM_DESC(dump_custom_stb, "Enable to dump full STB buffer"); 164 165 static struct amd_pmc_dev pmc; 166 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret); 167 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf); 168 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data); 169 170 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) 171 { 172 return ioread32(dev->regbase + reg_offset); 173 } 174 175 static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val) 176 { 177 iowrite32(val, dev->regbase + reg_offset); 178 } 179 180 struct smu_metrics { 181 u32 table_version; 182 u32 hint_count; 183 u32 s0i3_last_entry_status; 184 u32 timein_s0i2; 185 u64 timeentering_s0i3_lastcapture; 186 u64 timeentering_s0i3_totaltime; 187 u64 timeto_resume_to_os_lastcapture; 188 u64 timeto_resume_to_os_totaltime; 189 u64 timein_s0i3_lastcapture; 190 u64 timein_s0i3_totaltime; 191 u64 timein_swdrips_lastcapture; 192 u64 timein_swdrips_totaltime; 193 u64 timecondition_notmet_lastcapture[32]; 194 u64 timecondition_notmet_totaltime[32]; 195 } __packed; 196 197 static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp) 198 { 199 struct amd_pmc_dev *dev = filp->f_inode->i_private; 200 u32 size = FIFO_SIZE * sizeof(u32); 201 u32 *buf; 202 int rc; 203 204 buf = kzalloc(size, GFP_KERNEL); 205 if (!buf) 206 return -ENOMEM; 207 208 rc = amd_pmc_read_stb(dev, buf); 209 if (rc) { 210 kfree(buf); 211 return rc; 212 } 213 214 filp->private_data = buf; 215 return rc; 216 } 217 218 static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 219 loff_t *pos) 220 { 221 if (!filp->private_data) 222 return -EINVAL; 223 224 return simple_read_from_buffer(buf, size, pos, filp->private_data, 225 FIFO_SIZE * sizeof(u32)); 226 } 227 228 static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp) 229 { 230 kfree(filp->private_data); 231 return 0; 232 } 233 234 static const struct file_operations amd_pmc_stb_debugfs_fops = { 235 .owner = THIS_MODULE, 236 .open = amd_pmc_stb_debugfs_open, 237 .read = amd_pmc_stb_debugfs_read, 238 .release = amd_pmc_stb_debugfs_release, 239 }; 240 241 /* Enhanced STB Firmware Reporting Mechanism */ 242 static int amd_pmc_stb_handle_efr(struct file *filp) 243 { 244 struct amd_pmc_dev *dev = filp->f_inode->i_private; 245 struct amd_pmc_stb_v2_data *stb_data_arr; 246 u32 fsize; 247 248 fsize = dev->dram_size - S2D_RSVD_RAM_SPACE; 249 stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL); 250 if (!stb_data_arr) 251 return -ENOMEM; 252 253 stb_data_arr->size = fsize; 254 memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize); 255 filp->private_data = stb_data_arr; 256 257 return 0; 258 } 259 260 static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp) 261 { 262 struct amd_pmc_dev *dev = filp->f_inode->i_private; 263 u32 fsize, num_samples, val, stb_rdptr_offset = 0; 264 struct amd_pmc_stb_v2_data *stb_data_arr; 265 int ret; 266 267 /* Write dummy postcode while reading the STB buffer */ 268 ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC); 269 if (ret) 270 dev_err(dev->dev, "error writing to STB: %d\n", ret); 271 272 /* Spill to DRAM num_samples uses separate SMU message port */ 273 dev->msg_port = 1; 274 275 ret = amd_pmc_send_cmd(dev, 0, &val, STB_FORCE_FLUSH_DATA, 1); 276 if (ret) 277 dev_dbg_once(dev->dev, "S2D force flush not supported: %d\n", ret); 278 279 /* 280 * We have a custom stb size and the PMFW is supposed to give 281 * the enhanced dram size. Note that we land here only for the 282 * platforms that support enhanced dram size reporting. 283 */ 284 if (dump_custom_stb) 285 return amd_pmc_stb_handle_efr(filp); 286 287 /* Get the num_samples to calculate the last push location */ 288 ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true); 289 /* Clear msg_port for other SMU operation */ 290 dev->msg_port = 0; 291 if (ret) { 292 dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret); 293 return ret; 294 } 295 296 fsize = min(num_samples, S2D_TELEMETRY_BYTES_MAX); 297 stb_data_arr = kmalloc(struct_size(stb_data_arr, data, fsize), GFP_KERNEL); 298 if (!stb_data_arr) 299 return -ENOMEM; 300 301 stb_data_arr->size = fsize; 302 303 /* 304 * Start capturing data from the last push location. 305 * This is for general cases, where the stb limits 306 * are meant for standard usage. 307 */ 308 if (num_samples > S2D_TELEMETRY_BYTES_MAX) { 309 /* First read oldest data starting 1 behind last write till end of ringbuffer */ 310 stb_rdptr_offset = num_samples % S2D_TELEMETRY_BYTES_MAX; 311 fsize = S2D_TELEMETRY_BYTES_MAX - stb_rdptr_offset; 312 313 memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr + stb_rdptr_offset, fsize); 314 /* Second copy the newer samples from offset 0 - last write */ 315 memcpy_fromio(stb_data_arr->data + fsize, dev->stb_virt_addr, stb_rdptr_offset); 316 } else { 317 memcpy_fromio(stb_data_arr->data, dev->stb_virt_addr, fsize); 318 } 319 320 filp->private_data = stb_data_arr; 321 322 return 0; 323 } 324 325 static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size, 326 loff_t *pos) 327 { 328 struct amd_pmc_stb_v2_data *data = filp->private_data; 329 330 return simple_read_from_buffer(buf, size, pos, data->data, data->size); 331 } 332 333 static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp) 334 { 335 kfree(filp->private_data); 336 return 0; 337 } 338 339 static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = { 340 .owner = THIS_MODULE, 341 .open = amd_pmc_stb_debugfs_open_v2, 342 .read = amd_pmc_stb_debugfs_read_v2, 343 .release = amd_pmc_stb_debugfs_release_v2, 344 }; 345 346 static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev) 347 { 348 switch (dev->cpu_id) { 349 case AMD_CPU_ID_PCO: 350 case AMD_CPU_ID_RN: 351 case AMD_CPU_ID_YC: 352 case AMD_CPU_ID_CB: 353 dev->num_ips = 12; 354 dev->s2d_msg_id = 0xBE; 355 dev->smu_msg = 0x538; 356 break; 357 case AMD_CPU_ID_PS: 358 dev->num_ips = 21; 359 dev->s2d_msg_id = 0x85; 360 dev->smu_msg = 0x538; 361 break; 362 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 363 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 364 dev->num_ips = 22; 365 dev->s2d_msg_id = 0xDE; 366 dev->smu_msg = 0x938; 367 break; 368 } 369 } 370 371 static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) 372 { 373 if (dev->cpu_id == AMD_CPU_ID_PCO) { 374 dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n"); 375 return -EINVAL; 376 } 377 378 /* Get Active devices list from SMU */ 379 if (!dev->active_ips) 380 amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, true); 381 382 /* Get dram address */ 383 if (!dev->smu_virt_addr) { 384 u32 phys_addr_low, phys_addr_hi; 385 u64 smu_phys_addr; 386 387 amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, true); 388 amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, true); 389 smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); 390 391 dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, 392 sizeof(struct smu_metrics)); 393 if (!dev->smu_virt_addr) 394 return -ENOMEM; 395 } 396 397 /* Start the logging */ 398 amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false); 399 amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false); 400 401 return 0; 402 } 403 404 static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table) 405 { 406 if (!pdev->smu_virt_addr) { 407 int ret = amd_pmc_setup_smu_logging(pdev); 408 409 if (ret) 410 return ret; 411 } 412 413 if (pdev->cpu_id == AMD_CPU_ID_PCO) 414 return -ENODEV; 415 memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics)); 416 return 0; 417 } 418 419 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) 420 { 421 struct smu_metrics table; 422 423 if (get_metrics_table(pdev, &table)) 424 return; 425 426 if (!table.s0i3_last_entry_status) 427 dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n"); 428 pm_report_hw_sleep_time(table.s0i3_last_entry_status ? 429 table.timein_s0i3_lastcapture : 0); 430 } 431 432 static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) 433 { 434 int rc; 435 u32 val; 436 437 if (dev->cpu_id == AMD_CPU_ID_PCO) 438 return -ENODEV; 439 440 rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, true); 441 if (rc) 442 return rc; 443 444 dev->smu_program = (val >> 24) & GENMASK(7, 0); 445 dev->major = (val >> 16) & GENMASK(7, 0); 446 dev->minor = (val >> 8) & GENMASK(7, 0); 447 dev->rev = (val >> 0) & GENMASK(7, 0); 448 449 dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n", 450 dev->smu_program, dev->major, dev->minor, dev->rev); 451 452 return 0; 453 } 454 455 static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr, 456 char *buf) 457 { 458 struct amd_pmc_dev *dev = dev_get_drvdata(d); 459 460 if (!dev->major) { 461 int rc = amd_pmc_get_smu_version(dev); 462 463 if (rc) 464 return rc; 465 } 466 return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev); 467 } 468 469 static ssize_t smu_program_show(struct device *d, struct device_attribute *attr, 470 char *buf) 471 { 472 struct amd_pmc_dev *dev = dev_get_drvdata(d); 473 474 if (!dev->major) { 475 int rc = amd_pmc_get_smu_version(dev); 476 477 if (rc) 478 return rc; 479 } 480 return sysfs_emit(buf, "%u\n", dev->smu_program); 481 } 482 483 static DEVICE_ATTR_RO(smu_fw_version); 484 static DEVICE_ATTR_RO(smu_program); 485 486 static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 487 { 488 struct device *dev = kobj_to_dev(kobj); 489 struct amd_pmc_dev *pdev = dev_get_drvdata(dev); 490 491 if (pdev->cpu_id == AMD_CPU_ID_PCO) 492 return 0; 493 return 0444; 494 } 495 496 static struct attribute *pmc_attrs[] = { 497 &dev_attr_smu_fw_version.attr, 498 &dev_attr_smu_program.attr, 499 NULL, 500 }; 501 502 static struct attribute_group pmc_attr_group = { 503 .attrs = pmc_attrs, 504 .is_visible = pmc_attr_is_visible, 505 }; 506 507 static const struct attribute_group *pmc_groups[] = { 508 &pmc_attr_group, 509 NULL, 510 }; 511 512 static int smu_fw_info_show(struct seq_file *s, void *unused) 513 { 514 struct amd_pmc_dev *dev = s->private; 515 struct smu_metrics table; 516 int idx; 517 518 if (get_metrics_table(dev, &table)) 519 return -EINVAL; 520 521 seq_puts(s, "\n=== SMU Statistics ===\n"); 522 seq_printf(s, "Table Version: %d\n", table.table_version); 523 seq_printf(s, "Hint Count: %d\n", table.hint_count); 524 seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" : 525 "Unknown/Fail"); 526 seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture); 527 seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture); 528 seq_printf(s, "Time (in us) to resume from S0i3: %lld\n", 529 table.timeto_resume_to_os_lastcapture); 530 531 seq_puts(s, "\n=== Active time (in us) ===\n"); 532 for (idx = 0 ; idx < dev->num_ips ; idx++) { 533 if (soc15_ip_blk[idx].bit_mask & dev->active_ips) 534 seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name, 535 table.timecondition_notmet_lastcapture[idx]); 536 } 537 538 return 0; 539 } 540 DEFINE_SHOW_ATTRIBUTE(smu_fw_info); 541 542 static int s0ix_stats_show(struct seq_file *s, void *unused) 543 { 544 struct amd_pmc_dev *dev = s->private; 545 u64 entry_time, exit_time, residency; 546 547 /* Use FCH registers to get the S0ix stats */ 548 if (!dev->fch_virt_addr) { 549 u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW; 550 u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH; 551 u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo); 552 553 dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE); 554 if (!dev->fch_virt_addr) 555 return -ENOMEM; 556 } 557 558 entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET); 559 entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET); 560 561 exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET); 562 exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET); 563 564 /* It's in 48MHz. We need to convert it */ 565 residency = exit_time - entry_time; 566 do_div(residency, 48); 567 568 seq_puts(s, "=== S0ix statistics ===\n"); 569 seq_printf(s, "S0ix Entry Time: %lld\n", entry_time); 570 seq_printf(s, "S0ix Exit Time: %lld\n", exit_time); 571 seq_printf(s, "Residency Time: %lld\n", residency); 572 573 return 0; 574 } 575 DEFINE_SHOW_ATTRIBUTE(s0ix_stats); 576 577 static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, 578 struct seq_file *s) 579 { 580 u32 val; 581 int rc; 582 583 switch (pdev->cpu_id) { 584 case AMD_CPU_ID_CZN: 585 /* we haven't yet read SMU version */ 586 if (!pdev->major) { 587 rc = amd_pmc_get_smu_version(pdev); 588 if (rc) 589 return rc; 590 } 591 if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37)) 592 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN); 593 else 594 return -EINVAL; 595 break; 596 case AMD_CPU_ID_YC: 597 case AMD_CPU_ID_CB: 598 case AMD_CPU_ID_PS: 599 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC); 600 break; 601 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 602 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 603 val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_1AH); 604 break; 605 default: 606 return -EINVAL; 607 } 608 609 if (dev) 610 pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val); 611 612 if (s) 613 seq_printf(s, "SMU idlemask : 0x%x\n", val); 614 615 return 0; 616 } 617 618 static int amd_pmc_idlemask_show(struct seq_file *s, void *unused) 619 { 620 return amd_pmc_idlemask_read(s->private, NULL, s); 621 } 622 DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask); 623 624 static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) 625 { 626 debugfs_remove_recursive(dev->dbgfs_dir); 627 } 628 629 static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev) 630 { 631 switch (dev->cpu_id) { 632 case AMD_CPU_ID_YC: 633 case AMD_CPU_ID_CB: 634 case AMD_CPU_ID_PS: 635 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 636 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 637 return true; 638 default: 639 return false; 640 } 641 } 642 643 static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) 644 { 645 dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); 646 debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev, 647 &smu_fw_info_fops); 648 debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev, 649 &s0ix_stats_fops); 650 debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev, 651 &amd_pmc_idlemask_fops); 652 /* Enable STB only when the module_param is set */ 653 if (enable_stb) { 654 if (amd_pmc_is_stb_supported(dev)) 655 debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, 656 &amd_pmc_stb_debugfs_fops_v2); 657 else 658 debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, 659 &amd_pmc_stb_debugfs_fops); 660 } 661 } 662 663 static void amd_pmc_dump_registers(struct amd_pmc_dev *dev) 664 { 665 u32 value, message, argument, response; 666 667 if (dev->msg_port) { 668 message = AMD_S2D_REGISTER_MESSAGE; 669 argument = AMD_S2D_REGISTER_ARGUMENT; 670 response = AMD_S2D_REGISTER_RESPONSE; 671 } else { 672 message = dev->smu_msg; 673 argument = AMD_PMC_REGISTER_ARGUMENT; 674 response = AMD_PMC_REGISTER_RESPONSE; 675 } 676 677 value = amd_pmc_reg_read(dev, response); 678 dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value); 679 680 value = amd_pmc_reg_read(dev, argument); 681 dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value); 682 683 value = amd_pmc_reg_read(dev, message); 684 dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value); 685 } 686 687 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret) 688 { 689 int rc; 690 u32 val, message, argument, response; 691 692 mutex_lock(&dev->lock); 693 694 if (dev->msg_port) { 695 message = AMD_S2D_REGISTER_MESSAGE; 696 argument = AMD_S2D_REGISTER_ARGUMENT; 697 response = AMD_S2D_REGISTER_RESPONSE; 698 } else { 699 message = dev->smu_msg; 700 argument = AMD_PMC_REGISTER_ARGUMENT; 701 response = AMD_PMC_REGISTER_RESPONSE; 702 } 703 704 /* Wait until we get a valid response */ 705 rc = readx_poll_timeout(ioread32, dev->regbase + response, 706 val, val != 0, PMC_MSG_DELAY_MIN_US, 707 PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); 708 if (rc) { 709 dev_err(dev->dev, "failed to talk to SMU\n"); 710 goto out_unlock; 711 } 712 713 /* Write zero to response register */ 714 amd_pmc_reg_write(dev, response, 0); 715 716 /* Write argument into response register */ 717 amd_pmc_reg_write(dev, argument, arg); 718 719 /* Write message ID to message ID register */ 720 amd_pmc_reg_write(dev, message, msg); 721 722 /* Wait until we get a valid response */ 723 rc = readx_poll_timeout(ioread32, dev->regbase + response, 724 val, val != 0, PMC_MSG_DELAY_MIN_US, 725 PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX); 726 if (rc) { 727 dev_err(dev->dev, "SMU response timed out\n"); 728 goto out_unlock; 729 } 730 731 switch (val) { 732 case AMD_PMC_RESULT_OK: 733 if (ret) { 734 /* PMFW may take longer time to return back the data */ 735 usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US); 736 *data = amd_pmc_reg_read(dev, argument); 737 } 738 break; 739 case AMD_PMC_RESULT_CMD_REJECT_BUSY: 740 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val); 741 rc = -EBUSY; 742 goto out_unlock; 743 case AMD_PMC_RESULT_CMD_UNKNOWN: 744 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val); 745 rc = -EINVAL; 746 goto out_unlock; 747 case AMD_PMC_RESULT_CMD_REJECT_PREREQ: 748 case AMD_PMC_RESULT_FAILED: 749 default: 750 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val); 751 rc = -EIO; 752 goto out_unlock; 753 } 754 755 out_unlock: 756 mutex_unlock(&dev->lock); 757 amd_pmc_dump_registers(dev); 758 return rc; 759 } 760 761 static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) 762 { 763 switch (dev->cpu_id) { 764 case AMD_CPU_ID_PCO: 765 return MSG_OS_HINT_PCO; 766 case AMD_CPU_ID_RN: 767 case AMD_CPU_ID_YC: 768 case AMD_CPU_ID_CB: 769 case AMD_CPU_ID_PS: 770 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT: 771 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT: 772 return MSG_OS_HINT_RN; 773 } 774 return -EINVAL; 775 } 776 777 static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev) 778 { 779 struct device *d; 780 int rc; 781 782 /* cezanne platform firmware has a fix in 64.66.0 */ 783 if (pdev->cpu_id == AMD_CPU_ID_CZN) { 784 if (!pdev->major) { 785 rc = amd_pmc_get_smu_version(pdev); 786 if (rc) 787 return rc; 788 } 789 790 if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) 791 return 0; 792 } 793 794 d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); 795 if (!d) 796 return 0; 797 if (device_may_wakeup(d)) { 798 dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n"); 799 disable_irq_wake(1); 800 device_set_wakeup_enable(d, false); 801 } 802 put_device(d); 803 804 return 0; 805 } 806 807 static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) 808 { 809 struct rtc_device *rtc_device; 810 time64_t then, now, duration; 811 struct rtc_wkalrm alarm; 812 struct rtc_time tm; 813 int rc; 814 815 /* we haven't yet read SMU version */ 816 if (!pdev->major) { 817 rc = amd_pmc_get_smu_version(pdev); 818 if (rc) 819 return rc; 820 } 821 822 if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53)) 823 return 0; 824 825 rtc_device = rtc_class_open("rtc0"); 826 if (!rtc_device) 827 return 0; 828 rc = rtc_read_alarm(rtc_device, &alarm); 829 if (rc) 830 return rc; 831 if (!alarm.enabled) { 832 dev_dbg(pdev->dev, "alarm not enabled\n"); 833 return 0; 834 } 835 rc = rtc_read_time(rtc_device, &tm); 836 if (rc) 837 return rc; 838 then = rtc_tm_to_time64(&alarm.time); 839 now = rtc_tm_to_time64(&tm); 840 duration = then-now; 841 842 /* in the past */ 843 if (then < now) 844 return 0; 845 846 /* will be stored in upper 16 bits of s0i3 hint argument, 847 * so timer wakeup from s0i3 is limited to ~18 hours or less 848 */ 849 if (duration <= 4 || duration > U16_MAX) 850 return -EINVAL; 851 852 *arg |= (duration << 16); 853 rc = rtc_alarm_irq_enable(rtc_device, 0); 854 pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration); 855 856 return rc; 857 } 858 859 static void amd_pmc_s2idle_prepare(void) 860 { 861 struct amd_pmc_dev *pdev = &pmc; 862 int rc; 863 u8 msg; 864 u32 arg = 1; 865 866 /* Reset and Start SMU logging - to monitor the s0i3 stats */ 867 amd_pmc_setup_smu_logging(pdev); 868 869 /* Activate CZN specific platform bug workarounds */ 870 if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { 871 rc = amd_pmc_verify_czn_rtc(pdev, &arg); 872 if (rc) { 873 dev_err(pdev->dev, "failed to set RTC: %d\n", rc); 874 return; 875 } 876 } 877 878 msg = amd_pmc_get_os_hint(pdev); 879 rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, false); 880 if (rc) { 881 dev_err(pdev->dev, "suspend failed: %d\n", rc); 882 return; 883 } 884 885 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE); 886 if (rc) 887 dev_err(pdev->dev, "error writing to STB: %d\n", rc); 888 } 889 890 static void amd_pmc_s2idle_check(void) 891 { 892 struct amd_pmc_dev *pdev = &pmc; 893 struct smu_metrics table; 894 int rc; 895 896 /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */ 897 if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) && 898 table.s0i3_last_entry_status) 899 usleep_range(10000, 20000); 900 901 /* Dump the IdleMask before we add to the STB */ 902 amd_pmc_idlemask_read(pdev, pdev->dev, NULL); 903 904 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK); 905 if (rc) 906 dev_err(pdev->dev, "error writing to STB: %d\n", rc); 907 } 908 909 static int amd_pmc_dump_data(struct amd_pmc_dev *pdev) 910 { 911 if (pdev->cpu_id == AMD_CPU_ID_PCO) 912 return -ENODEV; 913 914 return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, false); 915 } 916 917 static void amd_pmc_s2idle_restore(void) 918 { 919 struct amd_pmc_dev *pdev = &pmc; 920 int rc; 921 u8 msg; 922 923 msg = amd_pmc_get_os_hint(pdev); 924 rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, false); 925 if (rc) 926 dev_err(pdev->dev, "resume failed: %d\n", rc); 927 928 /* Let SMU know that we are looking for stats */ 929 amd_pmc_dump_data(pdev); 930 931 rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE); 932 if (rc) 933 dev_err(pdev->dev, "error writing to STB: %d\n", rc); 934 935 /* Notify on failed entry */ 936 amd_pmc_validate_deepest(pdev); 937 938 amd_pmc_process_restore_quirks(pdev); 939 } 940 941 static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { 942 .prepare = amd_pmc_s2idle_prepare, 943 .check = amd_pmc_s2idle_check, 944 .restore = amd_pmc_s2idle_restore, 945 }; 946 947 static int amd_pmc_suspend_handler(struct device *dev) 948 { 949 struct amd_pmc_dev *pdev = dev_get_drvdata(dev); 950 951 /* 952 * Must be called only from the same set of dev_pm_ops handlers 953 * as i8042_pm_suspend() is called: currently just from .suspend. 954 */ 955 if (pdev->disable_8042_wakeup && !disable_workarounds) { 956 int rc = amd_pmc_wa_irq1(pdev); 957 958 if (rc) { 959 dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc); 960 return rc; 961 } 962 } 963 964 return 0; 965 } 966 967 static const struct dev_pm_ops amd_pmc_pm = { 968 .suspend = amd_pmc_suspend_handler, 969 }; 970 971 static const struct pci_device_id pmc_pci_ids[] = { 972 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, 973 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) }, 974 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, 975 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) }, 976 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, 977 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, 978 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) }, 979 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) }, 980 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 981 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, 982 { } 983 }; 984 985 static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) 986 { 987 u32 phys_addr_low, phys_addr_hi; 988 u64 stb_phys_addr; 989 u32 size = 0; 990 int ret; 991 992 /* Spill to DRAM feature uses separate SMU message port */ 993 dev->msg_port = 1; 994 995 amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true); 996 if (size != S2D_TELEMETRY_BYTES_MAX) 997 return -EIO; 998 999 /* Get DRAM size */ 1000 ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true); 1001 if (ret || !dev->dram_size) 1002 dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX; 1003 1004 /* Get STB DRAM address */ 1005 amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true); 1006 amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true); 1007 1008 if (!phys_addr_hi && !phys_addr_low) { 1009 dev_err(dev->dev, "STB is not enabled on the system; disable enable_stb or contact system vendor\n"); 1010 return -EINVAL; 1011 } 1012 1013 stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); 1014 1015 /* Clear msg_port for other SMU operation */ 1016 dev->msg_port = 0; 1017 1018 dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size); 1019 if (!dev->stb_virt_addr) 1020 return -ENOMEM; 1021 1022 return 0; 1023 } 1024 1025 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data) 1026 { 1027 int err; 1028 1029 err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data); 1030 if (err) { 1031 dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0); 1032 return pcibios_err_to_errno(err); 1033 } 1034 1035 return 0; 1036 } 1037 1038 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf) 1039 { 1040 int i, err; 1041 1042 for (i = 0; i < FIFO_SIZE; i++) { 1043 err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++); 1044 if (err) { 1045 dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0); 1046 return pcibios_err_to_errno(err); 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 1053 static int amd_pmc_probe(struct platform_device *pdev) 1054 { 1055 struct amd_pmc_dev *dev = &pmc; 1056 struct pci_dev *rdev; 1057 u32 base_addr_lo, base_addr_hi; 1058 u64 base_addr; 1059 int err; 1060 u32 val; 1061 1062 dev->dev = &pdev->dev; 1063 1064 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); 1065 if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) { 1066 err = -ENODEV; 1067 goto err_pci_dev_put; 1068 } 1069 1070 dev->cpu_id = rdev->device; 1071 1072 if (dev->cpu_id == AMD_CPU_ID_SP) { 1073 dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n"); 1074 err = -ENODEV; 1075 goto err_pci_dev_put; 1076 } 1077 1078 dev->rdev = rdev; 1079 err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val); 1080 if (err) { 1081 dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO); 1082 err = pcibios_err_to_errno(err); 1083 goto err_pci_dev_put; 1084 } 1085 1086 base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK; 1087 1088 err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val); 1089 if (err) { 1090 dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI); 1091 err = pcibios_err_to_errno(err); 1092 goto err_pci_dev_put; 1093 } 1094 1095 base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK; 1096 base_addr = ((u64)base_addr_hi << 32 | base_addr_lo); 1097 1098 dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET, 1099 AMD_PMC_MAPPING_SIZE); 1100 if (!dev->regbase) { 1101 err = -ENOMEM; 1102 goto err_pci_dev_put; 1103 } 1104 1105 mutex_init(&dev->lock); 1106 1107 /* Get num of IP blocks within the SoC */ 1108 amd_pmc_get_ip_info(dev); 1109 1110 if (enable_stb && amd_pmc_is_stb_supported(dev)) { 1111 err = amd_pmc_s2d_init(dev); 1112 if (err) 1113 goto err_pci_dev_put; 1114 } 1115 1116 platform_set_drvdata(pdev, dev); 1117 if (IS_ENABLED(CONFIG_SUSPEND)) { 1118 err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops); 1119 if (err) 1120 dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n"); 1121 if (!disable_workarounds) 1122 amd_pmc_quirks_init(dev); 1123 } 1124 1125 amd_pmc_dbgfs_register(dev); 1126 if (IS_ENABLED(CONFIG_AMD_MP2_STB)) 1127 amd_mp2_stb_init(dev); 1128 pm_report_max_hw_sleep(U64_MAX); 1129 return 0; 1130 1131 err_pci_dev_put: 1132 pci_dev_put(rdev); 1133 return err; 1134 } 1135 1136 static void amd_pmc_remove(struct platform_device *pdev) 1137 { 1138 struct amd_pmc_dev *dev = platform_get_drvdata(pdev); 1139 1140 if (IS_ENABLED(CONFIG_SUSPEND)) 1141 acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops); 1142 amd_pmc_dbgfs_unregister(dev); 1143 pci_dev_put(dev->rdev); 1144 if (IS_ENABLED(CONFIG_AMD_MP2_STB)) 1145 amd_mp2_stb_deinit(dev); 1146 mutex_destroy(&dev->lock); 1147 } 1148 1149 static const struct acpi_device_id amd_pmc_acpi_ids[] = { 1150 {"AMDI0005", 0}, 1151 {"AMDI0006", 0}, 1152 {"AMDI0007", 0}, 1153 {"AMDI0008", 0}, 1154 {"AMDI0009", 0}, 1155 {"AMDI000A", 0}, 1156 {"AMDI000B", 0}, 1157 {"AMD0004", 0}, 1158 {"AMD0005", 0}, 1159 { } 1160 }; 1161 MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids); 1162 1163 static struct platform_driver amd_pmc_driver = { 1164 .driver = { 1165 .name = "amd_pmc", 1166 .acpi_match_table = amd_pmc_acpi_ids, 1167 .dev_groups = pmc_groups, 1168 .pm = pm_sleep_ptr(&amd_pmc_pm), 1169 }, 1170 .probe = amd_pmc_probe, 1171 .remove = amd_pmc_remove, 1172 }; 1173 module_platform_driver(amd_pmc_driver); 1174 1175 MODULE_LICENSE("GPL v2"); 1176 MODULE_DESCRIPTION("AMD PMC Driver"); 1177