1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_pc.h" 7 8 #include <linux/cleanup.h> 9 #include <linux/delay.h> 10 #include <linux/iopoll.h> 11 #include <linux/jiffies.h> 12 #include <linux/ktime.h> 13 #include <linux/wait_bit.h> 14 15 #include <drm/drm_managed.h> 16 #include <drm/drm_print.h> 17 #include <generated/xe_device_wa_oob.h> 18 #include <generated/xe_wa_oob.h> 19 20 #include "abi/guc_actions_slpc_abi.h" 21 #include "regs/xe_gt_regs.h" 22 #include "regs/xe_regs.h" 23 #include "xe_bo.h" 24 #include "xe_device.h" 25 #include "xe_force_wake.h" 26 #include "xe_gt.h" 27 #include "xe_gt_idle.h" 28 #include "xe_gt_printk.h" 29 #include "xe_gt_throttle.h" 30 #include "xe_gt_types.h" 31 #include "xe_guc.h" 32 #include "xe_guc_ct.h" 33 #include "xe_map.h" 34 #include "xe_mmio.h" 35 #include "xe_pcode.h" 36 #include "xe_pm.h" 37 #include "xe_sriov.h" 38 #include "xe_wa.h" 39 40 #define MCHBAR_MIRROR_BASE_SNB 0x140000 41 42 #define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998) 43 #define RP0_MASK REG_GENMASK(7, 0) 44 #define RP1_MASK REG_GENMASK(15, 8) 45 #define RPN_MASK REG_GENMASK(23, 16) 46 47 #define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0) 48 #define RPE_MASK REG_GENMASK(15, 8) 49 #define RPA_MASK REG_GENMASK(31, 16) 50 51 #define GT_PERF_STATUS XE_REG(0x1381b4) 52 #define CAGF_MASK REG_GENMASK(19, 11) 53 54 #define GT_FREQUENCY_MULTIPLIER 50 55 #define GT_FREQUENCY_SCALER 3 56 57 #define LNL_MERT_FREQ_CAP 800 58 #define BMG_MERT_FREQ_CAP 2133 59 #define BMG_MIN_FREQ 1200 60 #define BMG_MERT_FLUSH_FREQ_CAP 2600 61 62 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ 63 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ 64 #define SLPC_ACT_FREQ_TIMEOUT_MS 100 65 66 /** 67 * DOC: GuC Power Conservation (PC) 68 * 69 * GuC Power Conservation (PC) supports multiple features for the most 70 * efficient and performing use of the GT when GuC submission is enabled, 71 * including frequency management, Render-C states management, and various 72 * algorithms for power balancing. 73 * 74 * Single Loop Power Conservation (SLPC) is the name given to the suite of 75 * connected power conservation features in the GuC firmware. The firmware 76 * exposes a programming interface to the host for the control of SLPC. 77 * 78 * Frequency management: 79 * --------------------- 80 * 81 * Xe driver enables SLPC with all of its defaults features and frequency 82 * selection, which varies per platform. 83 * 84 * Power profiles add another level of control to SLPC. When power saving 85 * profile is chosen, SLPC will use conservative thresholds to ramp frequency, 86 * thus saving power. Base profile is default and ensures balanced performance 87 * for any workload. 88 * 89 * Render-C States: 90 * ---------------- 91 * 92 * Render-C states is also a GuC PC feature that is now enabled in Xe for 93 * all platforms. 94 * 95 * Implementation details: 96 * ----------------------- 97 * The implementation for GuC Power Management features is split as follows: 98 * 99 * xe_guc_rc: Logic for handling GuC RC 100 * xe_gt_idle: Host side logic for RC6 and Coarse Power gating (CPG) 101 * xe_guc_pc: Logic for all other SLPC related features 102 * 103 * There is some cross interaction between these where host C6 will need to be 104 * enabled when we plan to skip GuC RC. Also, the GuC RC mode is currently 105 * overridden through 0x3003 which is an SLPC H2G call. 106 */ 107 108 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc) 109 { 110 return container_of(pc, struct xe_guc, pc); 111 } 112 113 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc) 114 { 115 return &pc_to_guc(pc)->ct; 116 } 117 118 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc) 119 { 120 return guc_to_gt(pc_to_guc(pc)); 121 } 122 123 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc) 124 { 125 return guc_to_xe(pc_to_guc(pc)); 126 } 127 128 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) 129 { 130 return &pc->bo->vmap; 131 } 132 133 #define slpc_shared_data_read(pc_, field_) \ 134 xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \ 135 struct slpc_shared_data, field_) 136 137 #define slpc_shared_data_write(pc_, field_, val_) \ 138 xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \ 139 struct slpc_shared_data, field_, val_) 140 141 #define SLPC_EVENT(id, count) \ 142 (FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \ 143 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count)) 144 145 static int wait_for_pc_state(struct xe_guc_pc *pc, 146 enum slpc_global_state target_state, 147 int timeout_ms) 148 { 149 enum slpc_global_state state; 150 151 xe_device_assert_mem_access(pc_to_xe(pc)); 152 153 return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state), 154 state == target_state, 155 20, timeout_ms * USEC_PER_MSEC, false); 156 } 157 158 static int wait_for_flush_complete(struct xe_guc_pc *pc) 159 { 160 const unsigned long timeout = msecs_to_jiffies(30); 161 162 if (!wait_var_event_timeout(&pc->flush_freq_limit, 163 !atomic_read(&pc->flush_freq_limit), 164 timeout)) 165 return -ETIMEDOUT; 166 167 return 0; 168 } 169 170 static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit) 171 { 172 u32 freq; 173 174 return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc), 175 freq <= max_limit, 176 20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false); 177 } 178 179 static int pc_action_reset(struct xe_guc_pc *pc) 180 { 181 struct xe_guc_ct *ct = pc_to_ct(pc); 182 u32 action[] = { 183 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 184 SLPC_EVENT(SLPC_EVENT_RESET, 2), 185 xe_bo_ggtt_addr(pc->bo), 186 0, 187 }; 188 int ret; 189 190 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 191 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 192 xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n", 193 ERR_PTR(ret)); 194 195 return ret; 196 } 197 198 static int pc_action_query_task_state(struct xe_guc_pc *pc) 199 { 200 struct xe_guc_ct *ct = pc_to_ct(pc); 201 u32 action[] = { 202 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 203 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2), 204 xe_bo_ggtt_addr(pc->bo), 205 0, 206 }; 207 int ret; 208 209 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 210 SLPC_RESET_TIMEOUT_MS)) 211 return -EAGAIN; 212 213 /* Blocking here to ensure the results are ready before reading them */ 214 ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action)); 215 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 216 xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n", 217 ERR_PTR(ret)); 218 219 return ret; 220 } 221 222 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) 223 { 224 struct xe_guc_ct *ct = pc_to_ct(pc); 225 u32 action[] = { 226 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 227 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2), 228 id, 229 value, 230 }; 231 int ret; 232 233 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 234 SLPC_RESET_TIMEOUT_MS)) 235 return -EAGAIN; 236 237 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 238 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 239 xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n", 240 id, value, ERR_PTR(ret)); 241 242 return ret; 243 } 244 245 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id) 246 { 247 u32 action[] = { 248 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 249 SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), 250 id, 251 }; 252 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; 253 int ret; 254 255 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 256 SLPC_RESET_TIMEOUT_MS)) 257 return -EAGAIN; 258 259 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 260 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 261 xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe", 262 ERR_PTR(ret)); 263 264 return ret; 265 } 266 267 /** 268 * xe_guc_pc_action_set_param() - Set value of SLPC param 269 * @pc: Xe_GuC_PC instance 270 * @id: Param id 271 * @value: Value to set 272 * 273 * This function can be used to set any SLPC param. 274 * 275 * Return: 0 on Success 276 */ 277 int xe_guc_pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) 278 { 279 xe_device_assert_mem_access(pc_to_xe(pc)); 280 return pc_action_set_param(pc, id, value); 281 } 282 283 /** 284 * xe_guc_pc_action_unset_param() - Revert to default value 285 * @pc: Xe_GuC_PC instance 286 * @id: Param id 287 * 288 * This function can be used revert any SLPC param to its default value. 289 * 290 * Return: 0 on Success 291 */ 292 int xe_guc_pc_action_unset_param(struct xe_guc_pc *pc, u8 id) 293 { 294 xe_device_assert_mem_access(pc_to_xe(pc)); 295 return pc_action_unset_param(pc, id); 296 } 297 298 static u32 decode_freq(u32 raw) 299 { 300 return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER, 301 GT_FREQUENCY_SCALER); 302 } 303 304 static u32 encode_freq(u32 freq) 305 { 306 return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER, 307 GT_FREQUENCY_MULTIPLIER); 308 } 309 310 static u32 pc_get_min_freq(struct xe_guc_pc *pc) 311 { 312 u32 freq; 313 314 freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK, 315 slpc_shared_data_read(pc, task_state_data.freq)); 316 317 return decode_freq(freq); 318 } 319 320 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable) 321 { 322 struct xe_gt *gt = pc_to_gt(pc); 323 u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE; 324 325 /* Allow/Disallow punit to process software freq requests */ 326 xe_mmio_write32(>->mmio, RP_CONTROL, state); 327 } 328 329 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) 330 { 331 struct xe_gt *gt = pc_to_gt(pc); 332 u32 rpnswreq; 333 334 pc_set_manual_rp_ctrl(pc, true); 335 336 /* Req freq is in units of 16.66 Mhz */ 337 rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq)); 338 xe_mmio_write32(>->mmio, RPNSWREQ, rpnswreq); 339 340 /* Sleep for a small time to allow pcode to respond */ 341 usleep_range(100, 300); 342 343 pc_set_manual_rp_ctrl(pc, false); 344 } 345 346 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) 347 { 348 /* 349 * Let's only check for the rpn-rp0 range. If max < min, 350 * min becomes a fixed request. 351 */ 352 if (freq < pc->rpn_freq || freq > pc->rp0_freq) 353 return -EINVAL; 354 355 /* 356 * GuC policy is to elevate minimum frequency to the efficient levels 357 * Our goal is to have the admin choices respected. 358 */ 359 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, 360 freq < xe_guc_pc_get_rpe_freq(pc)); 361 362 return pc_action_set_param(pc, 363 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 364 freq); 365 } 366 367 static int pc_get_max_freq(struct xe_guc_pc *pc) 368 { 369 u32 freq; 370 371 freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK, 372 slpc_shared_data_read(pc, task_state_data.freq)); 373 374 return decode_freq(freq); 375 } 376 377 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) 378 { 379 /* 380 * Let's only check for the rpn-rp0 range. If max < min, 381 * min becomes a fixed request. 382 * Also, overclocking is not supported. 383 */ 384 if (freq < pc->rpn_freq || freq > pc->rp0_freq) 385 return -EINVAL; 386 387 return pc_action_set_param(pc, 388 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 389 freq); 390 } 391 392 static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc) 393 { 394 struct xe_gt *gt = pc_to_gt(pc); 395 u32 reg; 396 397 if (xe_gt_is_media_type(gt)) 398 reg = xe_mmio_read32(>->mmio, MTL_MPA_FREQUENCY); 399 else 400 reg = xe_mmio_read32(>->mmio, MTL_GT_RPA_FREQUENCY); 401 402 return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg)); 403 } 404 405 static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc) 406 { 407 struct xe_gt *gt = pc_to_gt(pc); 408 u32 reg; 409 410 if (xe_gt_is_media_type(gt)) 411 reg = xe_mmio_read32(>->mmio, MTL_MPE_FREQUENCY); 412 else 413 reg = xe_mmio_read32(>->mmio, MTL_GT_RPE_FREQUENCY); 414 415 return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); 416 } 417 418 static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc) 419 { 420 /* 421 * For PVC we still need to use fused RP0 as the approximation for RPa 422 * For other platforms than PVC we get the resolved RPa directly from 423 * PCODE at a different register 424 */ 425 426 struct xe_gt *gt = pc_to_gt(pc); 427 u32 reg; 428 429 reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); 430 return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 431 } 432 433 static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc) 434 { 435 struct xe_gt *gt = pc_to_gt(pc); 436 u32 reg; 437 438 reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); 439 return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 440 } 441 442 static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc) 443 { 444 struct xe_gt *gt = pc_to_gt(pc); 445 u32 reg; 446 447 /* 448 * For PVC we still need to use fused RP1 as the approximation for RPe 449 */ 450 reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); 451 return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 452 } 453 454 static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc) 455 { 456 struct xe_gt *gt = pc_to_gt(pc); 457 u32 reg; 458 459 /* 460 * For other platforms than PVC, we get the resolved RPe directly from 461 * PCODE at a different register 462 */ 463 reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); 464 return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 465 } 466 467 /** 468 * xe_guc_pc_get_act_freq - Get Actual running frequency 469 * @pc: The GuC PC 470 * 471 * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6). 472 */ 473 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) 474 { 475 struct xe_gt *gt = pc_to_gt(pc); 476 struct xe_device *xe = gt_to_xe(gt); 477 u32 freq; 478 479 /* When in RC6, actual frequency reported will be 0. */ 480 if (GRAPHICS_VERx100(xe) >= 1270) { 481 freq = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); 482 freq = REG_FIELD_GET(MTL_CAGF_MASK, freq); 483 } else { 484 freq = xe_mmio_read32(>->mmio, GT_PERF_STATUS); 485 freq = REG_FIELD_GET(CAGF_MASK, freq); 486 } 487 488 freq = decode_freq(freq); 489 490 return freq; 491 } 492 493 static u32 get_cur_freq(struct xe_gt *gt) 494 { 495 u32 freq; 496 497 freq = xe_mmio_read32(>->mmio, RPNSWREQ); 498 freq = REG_FIELD_GET(REQ_RATIO_MASK, freq); 499 return decode_freq(freq); 500 } 501 502 /** 503 * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency 504 * @pc: The GuC PC 505 * 506 * Returns: the requested frequency for that GT instance 507 */ 508 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc) 509 { 510 struct xe_gt *gt = pc_to_gt(pc); 511 512 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 513 514 return get_cur_freq(gt); 515 } 516 517 /** 518 * xe_guc_pc_get_cur_freq - Get Current requested frequency 519 * @pc: The GuC PC 520 * @freq: A pointer to a u32 where the freq value will be returned 521 * 522 * Returns: 0 on success, 523 * -EAGAIN if GuC PC not ready (likely in middle of a reset). 524 */ 525 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) 526 { 527 struct xe_gt *gt = pc_to_gt(pc); 528 529 /* 530 * GuC SLPC plays with cur freq request when GuCRC is enabled 531 * Block RC6 for a more reliable read. 532 */ 533 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 534 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT)) 535 return -ETIMEDOUT; 536 537 *freq = get_cur_freq(gt); 538 539 return 0; 540 } 541 542 /** 543 * xe_guc_pc_get_rp0_freq - Get the RP0 freq 544 * @pc: The GuC PC 545 * 546 * Returns: RP0 freq. 547 */ 548 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc) 549 { 550 return pc->rp0_freq; 551 } 552 553 /** 554 * xe_guc_pc_get_rpa_freq - Get the RPa freq 555 * @pc: The GuC PC 556 * 557 * Returns: RPa freq. 558 */ 559 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc) 560 { 561 struct xe_gt *gt = pc_to_gt(pc); 562 struct xe_device *xe = gt_to_xe(gt); 563 564 if (GRAPHICS_VERx100(xe) == 1260) 565 return pvc_get_rpa_freq(pc); 566 else if (GRAPHICS_VERx100(xe) >= 1270) 567 return mtl_get_rpa_freq(pc); 568 else 569 return tgl_get_rpa_freq(pc); 570 } 571 572 /** 573 * xe_guc_pc_get_rpe_freq - Get the RPe freq 574 * @pc: The GuC PC 575 * 576 * Returns: RPe freq. 577 */ 578 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc) 579 { 580 struct xe_device *xe = pc_to_xe(pc); 581 u32 freq; 582 583 if (GRAPHICS_VERx100(xe) == 1260) 584 freq = pvc_get_rpe_freq(pc); 585 else if (GRAPHICS_VERx100(xe) >= 1270) 586 freq = mtl_get_rpe_freq(pc); 587 else 588 freq = tgl_get_rpe_freq(pc); 589 590 return freq; 591 } 592 593 /** 594 * xe_guc_pc_get_rpn_freq - Get the RPn freq 595 * @pc: The GuC PC 596 * 597 * Returns: RPn freq. 598 */ 599 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) 600 { 601 return pc->rpn_freq; 602 } 603 604 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq) 605 { 606 int ret; 607 608 lockdep_assert_held(&pc->freq_lock); 609 610 /* Might be in the middle of a gt reset */ 611 if (!pc->freq_ready) 612 return -EAGAIN; 613 614 ret = pc_action_query_task_state(pc); 615 if (ret) 616 return ret; 617 618 *freq = pc_get_min_freq(pc); 619 620 return 0; 621 } 622 623 /** 624 * xe_guc_pc_get_min_freq - Get the min operational frequency 625 * @pc: The GuC PC 626 * @freq: A pointer to a u32 where the freq value will be returned 627 * 628 * Returns: 0 on success, 629 * -EAGAIN if GuC PC not ready (likely in middle of a reset). 630 */ 631 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) 632 { 633 guard(mutex)(&pc->freq_lock); 634 635 return xe_guc_pc_get_min_freq_locked(pc, freq); 636 } 637 638 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq) 639 { 640 int ret; 641 642 lockdep_assert_held(&pc->freq_lock); 643 644 /* Might be in the middle of a gt reset */ 645 if (!pc->freq_ready) 646 return -EAGAIN; 647 648 ret = pc_set_min_freq(pc, freq); 649 if (ret) 650 return ret; 651 652 pc->user_requested_min = freq; 653 654 return 0; 655 } 656 657 /** 658 * xe_guc_pc_set_min_freq - Set the minimal operational frequency 659 * @pc: The GuC PC 660 * @freq: The selected minimal frequency 661 * 662 * Returns: 0 on success, 663 * -EAGAIN if GuC PC not ready (likely in middle of a reset), 664 * -EINVAL if value out of bounds. 665 */ 666 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) 667 { 668 guard(mutex)(&pc->freq_lock); 669 670 return xe_guc_pc_set_min_freq_locked(pc, freq); 671 } 672 673 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq) 674 { 675 int ret; 676 677 lockdep_assert_held(&pc->freq_lock); 678 679 /* Might be in the middle of a gt reset */ 680 if (!pc->freq_ready) 681 return -EAGAIN; 682 683 ret = pc_action_query_task_state(pc); 684 if (ret) 685 return ret; 686 687 *freq = pc_get_max_freq(pc); 688 689 return 0; 690 } 691 692 /** 693 * xe_guc_pc_get_max_freq - Get Maximum operational frequency 694 * @pc: The GuC PC 695 * @freq: A pointer to a u32 where the freq value will be returned 696 * 697 * Returns: 0 on success, 698 * -EAGAIN if GuC PC not ready (likely in middle of a reset). 699 */ 700 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) 701 { 702 guard(mutex)(&pc->freq_lock); 703 704 return xe_guc_pc_get_max_freq_locked(pc, freq); 705 } 706 707 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq) 708 { 709 int ret; 710 711 lockdep_assert_held(&pc->freq_lock); 712 713 /* Might be in the middle of a gt reset */ 714 if (!pc->freq_ready) 715 return -EAGAIN; 716 717 ret = pc_set_max_freq(pc, freq); 718 if (ret) 719 return ret; 720 721 pc->user_requested_max = freq; 722 723 return 0; 724 } 725 726 /** 727 * xe_guc_pc_set_max_freq - Set the maximum operational frequency 728 * @pc: The GuC PC 729 * @freq: The selected maximum frequency value 730 * 731 * Returns: 0 on success, 732 * -EAGAIN if GuC PC not ready (likely in middle of a reset), 733 * -EINVAL if value out of bounds. 734 */ 735 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) 736 { 737 if (XE_GT_WA(pc_to_gt(pc), 22019338487)) { 738 if (wait_for_flush_complete(pc) != 0) 739 return -EAGAIN; 740 } 741 742 guard(mutex)(&pc->freq_lock); 743 744 return xe_guc_pc_set_max_freq_locked(pc, freq); 745 } 746 747 /** 748 * xe_guc_pc_c_status - get the current GT C state 749 * @pc: XE_GuC_PC instance 750 */ 751 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) 752 { 753 struct xe_gt *gt = pc_to_gt(pc); 754 u32 reg, gt_c_state; 755 756 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { 757 reg = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); 758 gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg); 759 760 /* 761 * There are higher level sleep states that will cause this 762 * field to read out as its reset state, and those are only 763 * possible after the GT is already in C6. 764 */ 765 if (gt_c_state == MTL_CRST) 766 gt_c_state = GT_C6; 767 } else { 768 reg = xe_mmio_read32(>->mmio, GT_CORE_STATUS); 769 gt_c_state = REG_FIELD_GET(RCN_MASK, reg); 770 } 771 772 switch (gt_c_state) { 773 case GT_C6: 774 return GT_IDLE_C6; 775 case GT_C0: 776 return GT_IDLE_C0; 777 default: 778 return GT_IDLE_UNKNOWN; 779 } 780 } 781 782 /** 783 * xe_guc_pc_rc6_residency - rc6 residency counter 784 * @pc: Xe_GuC_PC instance 785 */ 786 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) 787 { 788 struct xe_gt *gt = pc_to_gt(pc); 789 u32 reg; 790 791 reg = xe_mmio_read32(>->mmio, GT_GFX_RC6); 792 793 return reg; 794 } 795 796 /** 797 * xe_guc_pc_mc6_residency - mc6 residency counter 798 * @pc: Xe_GuC_PC instance 799 */ 800 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) 801 { 802 struct xe_gt *gt = pc_to_gt(pc); 803 u64 reg; 804 805 reg = xe_mmio_read32(>->mmio, MTL_MEDIA_MC6); 806 807 return reg; 808 } 809 810 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) 811 { 812 struct xe_gt *gt = pc_to_gt(pc); 813 u32 reg; 814 815 xe_device_assert_mem_access(pc_to_xe(pc)); 816 817 if (xe_gt_is_media_type(gt)) 818 reg = xe_mmio_read32(>->mmio, MTL_MEDIAP_STATE_CAP); 819 else 820 reg = xe_mmio_read32(>->mmio, MTL_RP_STATE_CAP); 821 822 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg)); 823 824 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg)); 825 } 826 827 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) 828 { 829 struct xe_gt *gt = pc_to_gt(pc); 830 struct xe_device *xe = gt_to_xe(gt); 831 u32 reg; 832 833 xe_device_assert_mem_access(pc_to_xe(pc)); 834 835 if (xe->info.platform == XE_PVC) 836 reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); 837 else 838 reg = xe_mmio_read32(>->mmio, RP_STATE_CAP); 839 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 840 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 841 } 842 843 static void pc_init_fused_rp_values(struct xe_guc_pc *pc) 844 { 845 struct xe_gt *gt = pc_to_gt(pc); 846 struct xe_device *xe = gt_to_xe(gt); 847 848 if (GRAPHICS_VERx100(xe) >= 1270) 849 mtl_init_fused_rp_values(pc); 850 else 851 tgl_init_fused_rp_values(pc); 852 } 853 854 static u32 pc_max_freq_cap(struct xe_guc_pc *pc) 855 { 856 struct xe_gt *gt = pc_to_gt(pc); 857 858 if (XE_GT_WA(gt, 22019338487)) { 859 if (xe_gt_is_media_type(gt)) 860 return min(LNL_MERT_FREQ_CAP, pc->rp0_freq); 861 else 862 return min(BMG_MERT_FREQ_CAP, pc->rp0_freq); 863 } else { 864 return pc->rp0_freq; 865 } 866 } 867 868 /** 869 * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT 870 * frequency to allow faster GuC load times 871 * @pc: Xe_GuC_PC instance 872 */ 873 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc) 874 { 875 struct xe_gt *gt = pc_to_gt(pc); 876 877 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 878 pc_set_cur_freq(pc, pc_max_freq_cap(pc)); 879 } 880 881 /** 882 * xe_guc_pc_init_early - Initialize RPx values 883 * @pc: Xe_GuC_PC instance 884 */ 885 void xe_guc_pc_init_early(struct xe_guc_pc *pc) 886 { 887 struct xe_gt *gt = pc_to_gt(pc); 888 889 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 890 pc_init_fused_rp_values(pc); 891 } 892 893 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) 894 { 895 struct xe_tile *tile = gt_to_tile(pc_to_gt(pc)); 896 int ret; 897 898 lockdep_assert_held(&pc->freq_lock); 899 900 ret = pc_action_query_task_state(pc); 901 if (ret) 902 goto out; 903 904 /* 905 * GuC defaults to some RPmax that is not actually achievable without 906 * overclocking. Let's adjust it to the Hardware RP0, which is the 907 * regular maximum 908 */ 909 if (pc_get_max_freq(pc) > pc->rp0_freq) { 910 ret = pc_set_max_freq(pc, pc->rp0_freq); 911 if (ret) 912 goto out; 913 } 914 915 /* 916 * Same thing happens for Server platforms where min is listed as 917 * RPMax 918 */ 919 if (pc_get_min_freq(pc) > pc->rp0_freq) 920 ret = pc_set_min_freq(pc, pc->rp0_freq); 921 922 if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890)) 923 ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc))); 924 925 out: 926 return ret; 927 } 928 929 static int pc_adjust_requested_freq(struct xe_guc_pc *pc) 930 { 931 int ret = 0; 932 933 lockdep_assert_held(&pc->freq_lock); 934 935 if (pc->user_requested_min != 0) { 936 ret = pc_set_min_freq(pc, pc->user_requested_min); 937 if (ret) 938 return ret; 939 } 940 941 if (pc->user_requested_max != 0) { 942 ret = pc_set_max_freq(pc, pc->user_requested_max); 943 if (ret) 944 return ret; 945 } 946 947 return ret; 948 } 949 950 static bool needs_flush_freq_limit(struct xe_guc_pc *pc) 951 { 952 struct xe_gt *gt = pc_to_gt(pc); 953 954 return XE_GT_WA(gt, 22019338487) && 955 pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP; 956 } 957 958 /** 959 * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush 960 * @pc: the xe_guc_pc object 961 * 962 * As per the WA, reduce max GT frequency during L2 cache flush 963 */ 964 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) 965 { 966 struct xe_gt *gt = pc_to_gt(pc); 967 u32 max_freq; 968 int ret; 969 970 if (!needs_flush_freq_limit(pc)) 971 return; 972 973 guard(mutex)(&pc->freq_lock); 974 975 ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq); 976 if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) { 977 ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP); 978 if (ret) { 979 xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n", 980 BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); 981 return; 982 } 983 984 atomic_set(&pc->flush_freq_limit, 1); 985 986 /* 987 * If user has previously changed max freq, stash that value to 988 * restore later, otherwise use the current max. New user 989 * requests wait on flush. 990 */ 991 if (pc->user_requested_max != 0) 992 pc->stashed_max_freq = pc->user_requested_max; 993 else 994 pc->stashed_max_freq = max_freq; 995 } 996 997 /* 998 * Wait for actual freq to go below the flush cap: even if the previous 999 * max was below cap, the current one might still be above it 1000 */ 1001 ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); 1002 if (ret) 1003 xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", 1004 BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); 1005 } 1006 1007 /** 1008 * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes. 1009 * @pc: the xe_guc_pc object 1010 * 1011 * Retrieve the previous GT max frequency value. 1012 */ 1013 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc) 1014 { 1015 struct xe_gt *gt = pc_to_gt(pc); 1016 int ret = 0; 1017 1018 if (!needs_flush_freq_limit(pc)) 1019 return; 1020 1021 if (!atomic_read(&pc->flush_freq_limit)) 1022 return; 1023 1024 mutex_lock(&pc->freq_lock); 1025 1026 ret = pc_set_max_freq(>->uc.guc.pc, pc->stashed_max_freq); 1027 if (ret) 1028 xe_gt_err_once(gt, "Failed to restore max freq %u:%d", 1029 pc->stashed_max_freq, ret); 1030 1031 atomic_set(&pc->flush_freq_limit, 0); 1032 mutex_unlock(&pc->freq_lock); 1033 wake_up_var(&pc->flush_freq_limit); 1034 } 1035 1036 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) 1037 { 1038 int ret; 1039 1040 if (!XE_GT_WA(pc_to_gt(pc), 22019338487)) 1041 return 0; 1042 1043 guard(mutex)(&pc->freq_lock); 1044 1045 /* 1046 * Get updated min/max and stash them. 1047 */ 1048 ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq); 1049 if (!ret) 1050 ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq); 1051 if (ret) 1052 return ret; 1053 1054 /* 1055 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads. 1056 */ 1057 ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc))); 1058 if (!ret) 1059 ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc))); 1060 1061 return ret; 1062 } 1063 1064 /** 1065 * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values 1066 * @pc: The GuC PC 1067 * 1068 * Returns: 0 on success, 1069 * error code on failure 1070 */ 1071 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc) 1072 { 1073 int ret = 0; 1074 1075 if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc) 1076 return 0; 1077 1078 mutex_lock(&pc->freq_lock); 1079 ret = pc_set_max_freq(pc, pc->stashed_max_freq); 1080 if (!ret) 1081 ret = pc_set_min_freq(pc, pc->stashed_min_freq); 1082 mutex_unlock(&pc->freq_lock); 1083 1084 return ret; 1085 } 1086 1087 static void pc_init_pcode_freq(struct xe_guc_pc *pc) 1088 { 1089 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER); 1090 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER); 1091 1092 XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max)); 1093 } 1094 1095 static int pc_init_freqs(struct xe_guc_pc *pc) 1096 { 1097 int ret; 1098 1099 mutex_lock(&pc->freq_lock); 1100 1101 ret = pc_adjust_freq_bounds(pc); 1102 if (ret) 1103 goto out; 1104 1105 ret = pc_adjust_requested_freq(pc); 1106 if (ret) 1107 goto out; 1108 1109 pc_init_pcode_freq(pc); 1110 1111 /* 1112 * The frequencies are really ready for use only after the user 1113 * requested ones got restored. 1114 */ 1115 pc->freq_ready = true; 1116 1117 out: 1118 mutex_unlock(&pc->freq_lock); 1119 return ret; 1120 } 1121 1122 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val) 1123 { 1124 int ret = 0; 1125 1126 ret = pc_action_set_param(pc, 1127 SLPC_PARAM_STRATEGIES, 1128 val); 1129 1130 return ret; 1131 } 1132 1133 static const char *power_profile_to_string(struct xe_guc_pc *pc) 1134 { 1135 switch (pc->power_profile) { 1136 case SLPC_POWER_PROFILE_BASE: 1137 return "base"; 1138 case SLPC_POWER_PROFILE_POWER_SAVING: 1139 return "power_saving"; 1140 default: 1141 return "invalid"; 1142 } 1143 } 1144 1145 void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile) 1146 { 1147 switch (pc->power_profile) { 1148 case SLPC_POWER_PROFILE_BASE: 1149 sprintf(profile, "[%s] %s\n", "base", "power_saving"); 1150 break; 1151 case SLPC_POWER_PROFILE_POWER_SAVING: 1152 sprintf(profile, "%s [%s]\n", "base", "power_saving"); 1153 break; 1154 default: 1155 sprintf(profile, "invalid"); 1156 } 1157 } 1158 1159 int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf) 1160 { 1161 int ret = 0; 1162 u32 val; 1163 1164 if (strncmp("base", buf, strlen("base")) == 0) 1165 val = SLPC_POWER_PROFILE_BASE; 1166 else if (strncmp("power_saving", buf, strlen("power_saving")) == 0) 1167 val = SLPC_POWER_PROFILE_POWER_SAVING; 1168 else 1169 return -EINVAL; 1170 1171 guard(mutex)(&pc->freq_lock); 1172 guard(xe_pm_runtime_noresume)(pc_to_xe(pc)); 1173 1174 ret = pc_action_set_param(pc, 1175 SLPC_PARAM_POWER_PROFILE, 1176 val); 1177 if (ret) 1178 xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n", 1179 val, ERR_PTR(ret)); 1180 else 1181 pc->power_profile = val; 1182 1183 return ret; 1184 } 1185 1186 static int pc_action_set_dcc(struct xe_guc_pc *pc, bool enable) 1187 { 1188 int ret; 1189 1190 ret = pc_action_set_param(pc, 1191 SLPC_PARAM_TASK_ENABLE_DCC, 1192 enable); 1193 if (!ret) 1194 return pc_action_set_param(pc, 1195 SLPC_PARAM_TASK_DISABLE_DCC, 1196 !enable); 1197 else 1198 return ret; 1199 } 1200 1201 static int pc_modify_defaults(struct xe_guc_pc *pc) 1202 { 1203 struct xe_device *xe = pc_to_xe(pc); 1204 struct xe_gt *gt = pc_to_gt(pc); 1205 int ret = 0; 1206 1207 if (xe->info.platform == XE_PANTHERLAKE) { 1208 ret = pc_action_set_dcc(pc, false); 1209 if (unlikely(ret)) 1210 xe_gt_err(gt, "Failed to modify DCC default: %pe\n", ERR_PTR(ret)); 1211 } 1212 1213 return ret; 1214 } 1215 1216 /** 1217 * xe_guc_pc_start - Start GuC's Power Conservation component 1218 * @pc: Xe_GuC_PC instance 1219 */ 1220 int xe_guc_pc_start(struct xe_guc_pc *pc) 1221 { 1222 struct xe_device *xe = pc_to_xe(pc); 1223 struct xe_gt *gt = pc_to_gt(pc); 1224 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 1225 ktime_t earlier; 1226 int ret; 1227 1228 xe_gt_assert(gt, xe_device_uc_enabled(xe)); 1229 1230 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 1231 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT)) 1232 return -ETIMEDOUT; 1233 1234 if (xe->info.skip_guc_pc) { 1235 /* Request max possible since dynamic freq mgmt is not enabled */ 1236 pc_set_cur_freq(pc, UINT_MAX); 1237 return 0; 1238 } 1239 1240 xe_map_memset(xe, &pc->bo->vmap, 0, 0, size); 1241 slpc_shared_data_write(pc, header.size, size); 1242 1243 earlier = ktime_get(); 1244 ret = pc_action_reset(pc); 1245 if (ret) 1246 return ret; 1247 1248 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 1249 SLPC_RESET_TIMEOUT_MS)) { 1250 xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n", 1251 xe_guc_pc_get_act_freq(pc), get_cur_freq(gt), 1252 xe_gt_throttle_get_limit_reasons(gt)); 1253 1254 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 1255 SLPC_RESET_EXTENDED_TIMEOUT_MS)) { 1256 xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n"); 1257 return -EIO; 1258 } 1259 1260 xe_gt_warn(gt, "GuC PC excessive start time: %lldms", 1261 ktime_ms_delta(ktime_get(), earlier)); 1262 } 1263 1264 ret = pc_modify_defaults(pc); 1265 if (ret) 1266 return ret; 1267 1268 ret = pc_init_freqs(pc); 1269 if (ret) 1270 return ret; 1271 1272 ret = pc_set_mert_freq_cap(pc); 1273 if (ret) 1274 return ret; 1275 1276 /* Enable SLPC Optimized Strategy for compute */ 1277 ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE); 1278 1279 /* Set cached value of power_profile */ 1280 ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc)); 1281 if (unlikely(ret)) 1282 xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret)); 1283 1284 return ret; 1285 } 1286 1287 /** 1288 * xe_guc_pc_stop - Stop GuC's Power Conservation component 1289 * @pc: Xe_GuC_PC instance 1290 */ 1291 int xe_guc_pc_stop(struct xe_guc_pc *pc) 1292 { 1293 struct xe_device *xe = pc_to_xe(pc); 1294 1295 if (xe->info.skip_guc_pc) 1296 return 0; 1297 1298 mutex_lock(&pc->freq_lock); 1299 pc->freq_ready = false; 1300 mutex_unlock(&pc->freq_lock); 1301 1302 return 0; 1303 } 1304 1305 /** 1306 * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component 1307 * @arg: opaque pointer that should point to Xe_GuC_PC instance 1308 */ 1309 static void xe_guc_pc_fini_hw(void *arg) 1310 { 1311 struct xe_guc_pc *pc = arg; 1312 struct xe_device *xe = pc_to_xe(pc); 1313 1314 if (xe_device_wedged(xe)) 1315 return; 1316 1317 CLASS(xe_force_wake, fw_ref)(gt_to_fw(pc_to_gt(pc)), XE_FW_GT); 1318 XE_WARN_ON(xe_guc_pc_stop(pc)); 1319 1320 /* Bind requested freq to mert_freq_cap before unload */ 1321 pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc))); 1322 } 1323 1324 /** 1325 * xe_guc_pc_init - Initialize GuC's Power Conservation component 1326 * @pc: Xe_GuC_PC instance 1327 */ 1328 int xe_guc_pc_init(struct xe_guc_pc *pc) 1329 { 1330 struct xe_gt *gt = pc_to_gt(pc); 1331 struct xe_tile *tile = gt_to_tile(gt); 1332 struct xe_device *xe = gt_to_xe(gt); 1333 struct xe_bo *bo; 1334 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 1335 int err; 1336 1337 if (xe->info.skip_guc_pc) 1338 return 0; 1339 1340 err = drmm_mutex_init(&xe->drm, &pc->freq_lock); 1341 if (err) 1342 return err; 1343 1344 bo = xe_managed_bo_create_pin_map(xe, tile, size, 1345 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 1346 XE_BO_FLAG_GGTT | 1347 XE_BO_FLAG_GGTT_INVALIDATE | 1348 XE_BO_FLAG_PINNED_NORESTORE); 1349 if (IS_ERR(bo)) 1350 return PTR_ERR(bo); 1351 1352 pc->bo = bo; 1353 1354 pc->power_profile = SLPC_POWER_PROFILE_BASE; 1355 1356 return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc); 1357 } 1358 1359 static const char *pc_get_state_string(struct xe_guc_pc *pc) 1360 { 1361 switch (slpc_shared_data_read(pc, header.global_state)) { 1362 case SLPC_GLOBAL_STATE_NOT_RUNNING: 1363 return "not running"; 1364 case SLPC_GLOBAL_STATE_INITIALIZING: 1365 return "initializing"; 1366 case SLPC_GLOBAL_STATE_RESETTING: 1367 return "resetting"; 1368 case SLPC_GLOBAL_STATE_RUNNING: 1369 return "running"; 1370 case SLPC_GLOBAL_STATE_SHUTTING_DOWN: 1371 return "shutting down"; 1372 case SLPC_GLOBAL_STATE_ERROR: 1373 return "error"; 1374 default: 1375 return "unknown"; 1376 } 1377 } 1378 1379 /** 1380 * xe_guc_pc_print - Print GuC's Power Conservation information for debug 1381 * @pc: Xe_GuC_PC instance 1382 * @p: drm_printer 1383 */ 1384 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p) 1385 { 1386 drm_printf(p, "SLPC Shared Data Header:\n"); 1387 drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size)); 1388 drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc)); 1389 1390 if (pc_action_query_task_state(pc)) 1391 return; 1392 1393 drm_printf(p, "\nSLPC Tasks Status:\n"); 1394 drm_printf(p, "\tGTPERF enabled: %s\n", 1395 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1396 SLPC_GTPERF_TASK_ENABLED)); 1397 drm_printf(p, "\tDCC enabled: %s\n", 1398 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1399 SLPC_DCC_TASK_ENABLED)); 1400 drm_printf(p, "\tDCC in use: %s\n", 1401 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1402 SLPC_IN_DCC)); 1403 drm_printf(p, "\tBalancer enabled: %s\n", 1404 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1405 SLPC_BALANCER_ENABLED)); 1406 drm_printf(p, "\tIBC enabled: %s\n", 1407 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1408 SLPC_IBC_TASK_ENABLED)); 1409 drm_printf(p, "\tBalancer IA LMT enabled: %s\n", 1410 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1411 SLPC_BALANCER_IA_LMT_ENABLED)); 1412 drm_printf(p, "\tBalancer IA LMT active: %s\n", 1413 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1414 SLPC_BALANCER_IA_LMT_ACTIVE)); 1415 } 1416