1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_pc.h" 7 8 #include <linux/cleanup.h> 9 #include <linux/delay.h> 10 #include <linux/jiffies.h> 11 #include <linux/ktime.h> 12 #include <linux/wait_bit.h> 13 14 #include <drm/drm_managed.h> 15 #include <drm/drm_print.h> 16 #include <generated/xe_wa_oob.h> 17 18 #include "abi/guc_actions_slpc_abi.h" 19 #include "regs/xe_gt_regs.h" 20 #include "regs/xe_regs.h" 21 #include "xe_bo.h" 22 #include "xe_device.h" 23 #include "xe_force_wake.h" 24 #include "xe_gt.h" 25 #include "xe_gt_idle.h" 26 #include "xe_gt_printk.h" 27 #include "xe_gt_throttle.h" 28 #include "xe_gt_types.h" 29 #include "xe_guc.h" 30 #include "xe_guc_ct.h" 31 #include "xe_map.h" 32 #include "xe_mmio.h" 33 #include "xe_pcode.h" 34 #include "xe_pm.h" 35 #include "xe_sriov.h" 36 #include "xe_wa.h" 37 38 #define MCHBAR_MIRROR_BASE_SNB 0x140000 39 40 #define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998) 41 #define RP0_MASK REG_GENMASK(7, 0) 42 #define RP1_MASK REG_GENMASK(15, 8) 43 #define RPN_MASK REG_GENMASK(23, 16) 44 45 #define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0) 46 #define RPE_MASK REG_GENMASK(15, 8) 47 #define RPA_MASK REG_GENMASK(31, 16) 48 49 #define GT_PERF_STATUS XE_REG(0x1381b4) 50 #define CAGF_MASK REG_GENMASK(19, 11) 51 52 #define GT_FREQUENCY_MULTIPLIER 50 53 #define GT_FREQUENCY_SCALER 3 54 55 #define LNL_MERT_FREQ_CAP 800 56 #define BMG_MERT_FREQ_CAP 2133 57 #define BMG_MIN_FREQ 1200 58 #define BMG_MERT_FLUSH_FREQ_CAP 2600 59 60 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ 61 #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ 62 #define SLPC_ACT_FREQ_TIMEOUT_MS 100 63 64 /** 65 * DOC: GuC Power Conservation (PC) 66 * 67 * GuC Power Conservation (PC) supports multiple features for the most 68 * efficient and performing use of the GT when GuC submission is enabled, 69 * including frequency management, Render-C states management, and various 70 * algorithms for power balancing. 71 * 72 * Single Loop Power Conservation (SLPC) is the name given to the suite of 73 * connected power conservation features in the GuC firmware. The firmware 74 * exposes a programming interface to the host for the control of SLPC. 75 * 76 * Frequency management: 77 * ===================== 78 * 79 * Xe driver enables SLPC with all of its defaults features and frequency 80 * selection, which varies per platform. 81 * 82 * Render-C States: 83 * ================ 84 * 85 * Render-C states is also a GuC PC feature that is now enabled in Xe for 86 * all platforms. 87 * 88 */ 89 90 static struct xe_guc *pc_to_guc(struct xe_guc_pc *pc) 91 { 92 return container_of(pc, struct xe_guc, pc); 93 } 94 95 static struct xe_guc_ct *pc_to_ct(struct xe_guc_pc *pc) 96 { 97 return &pc_to_guc(pc)->ct; 98 } 99 100 static struct xe_gt *pc_to_gt(struct xe_guc_pc *pc) 101 { 102 return guc_to_gt(pc_to_guc(pc)); 103 } 104 105 static struct xe_device *pc_to_xe(struct xe_guc_pc *pc) 106 { 107 return guc_to_xe(pc_to_guc(pc)); 108 } 109 110 static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) 111 { 112 return &pc->bo->vmap; 113 } 114 115 #define slpc_shared_data_read(pc_, field_) \ 116 xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \ 117 struct slpc_shared_data, field_) 118 119 #define slpc_shared_data_write(pc_, field_, val_) \ 120 xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \ 121 struct slpc_shared_data, field_, val_) 122 123 #define SLPC_EVENT(id, count) \ 124 (FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \ 125 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count)) 126 127 static int wait_for_pc_state(struct xe_guc_pc *pc, 128 enum slpc_global_state state, 129 int timeout_ms) 130 { 131 int timeout_us = 1000 * timeout_ms; 132 int slept, wait = 10; 133 134 xe_device_assert_mem_access(pc_to_xe(pc)); 135 136 for (slept = 0; slept < timeout_us;) { 137 if (slpc_shared_data_read(pc, header.global_state) == state) 138 return 0; 139 140 usleep_range(wait, wait << 1); 141 slept += wait; 142 wait <<= 1; 143 if (slept + wait > timeout_us) 144 wait = timeout_us - slept; 145 } 146 147 return -ETIMEDOUT; 148 } 149 150 static int wait_for_flush_complete(struct xe_guc_pc *pc) 151 { 152 const unsigned long timeout = msecs_to_jiffies(30); 153 154 if (!wait_var_event_timeout(&pc->flush_freq_limit, 155 !atomic_read(&pc->flush_freq_limit), 156 timeout)) 157 return -ETIMEDOUT; 158 159 return 0; 160 } 161 162 static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq) 163 { 164 int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC; 165 int slept, wait = 10; 166 167 for (slept = 0; slept < timeout_us;) { 168 if (xe_guc_pc_get_act_freq(pc) <= freq) 169 return 0; 170 171 usleep_range(wait, wait << 1); 172 slept += wait; 173 wait <<= 1; 174 if (slept + wait > timeout_us) 175 wait = timeout_us - slept; 176 } 177 178 return -ETIMEDOUT; 179 } 180 static int pc_action_reset(struct xe_guc_pc *pc) 181 { 182 struct xe_guc_ct *ct = pc_to_ct(pc); 183 u32 action[] = { 184 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 185 SLPC_EVENT(SLPC_EVENT_RESET, 2), 186 xe_bo_ggtt_addr(pc->bo), 187 0, 188 }; 189 int ret; 190 191 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 192 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 193 xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n", 194 ERR_PTR(ret)); 195 196 return ret; 197 } 198 199 static int pc_action_query_task_state(struct xe_guc_pc *pc) 200 { 201 struct xe_guc_ct *ct = pc_to_ct(pc); 202 u32 action[] = { 203 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 204 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2), 205 xe_bo_ggtt_addr(pc->bo), 206 0, 207 }; 208 int ret; 209 210 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 211 SLPC_RESET_TIMEOUT_MS)) 212 return -EAGAIN; 213 214 /* Blocking here to ensure the results are ready before reading them */ 215 ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action)); 216 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 217 xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n", 218 ERR_PTR(ret)); 219 220 return ret; 221 } 222 223 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) 224 { 225 struct xe_guc_ct *ct = pc_to_ct(pc); 226 u32 action[] = { 227 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 228 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2), 229 id, 230 value, 231 }; 232 int ret; 233 234 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 235 SLPC_RESET_TIMEOUT_MS)) 236 return -EAGAIN; 237 238 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 239 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 240 xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n", 241 id, value, ERR_PTR(ret)); 242 243 return ret; 244 } 245 246 static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id) 247 { 248 u32 action[] = { 249 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 250 SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), 251 id, 252 }; 253 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; 254 int ret; 255 256 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 257 SLPC_RESET_TIMEOUT_MS)) 258 return -EAGAIN; 259 260 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 261 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 262 xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe", 263 ERR_PTR(ret)); 264 265 return ret; 266 } 267 268 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode) 269 { 270 struct xe_guc_ct *ct = pc_to_ct(pc); 271 u32 action[] = { 272 GUC_ACTION_HOST2GUC_SETUP_PC_GUCRC, 273 mode, 274 }; 275 int ret; 276 277 ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 278 if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED)) 279 xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n", 280 mode, ERR_PTR(ret)); 281 return ret; 282 } 283 284 static u32 decode_freq(u32 raw) 285 { 286 return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER, 287 GT_FREQUENCY_SCALER); 288 } 289 290 static u32 encode_freq(u32 freq) 291 { 292 return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER, 293 GT_FREQUENCY_MULTIPLIER); 294 } 295 296 static u32 pc_get_min_freq(struct xe_guc_pc *pc) 297 { 298 u32 freq; 299 300 freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK, 301 slpc_shared_data_read(pc, task_state_data.freq)); 302 303 return decode_freq(freq); 304 } 305 306 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable) 307 { 308 struct xe_gt *gt = pc_to_gt(pc); 309 u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE; 310 311 /* Allow/Disallow punit to process software freq requests */ 312 xe_mmio_write32(>->mmio, RP_CONTROL, state); 313 } 314 315 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) 316 { 317 struct xe_gt *gt = pc_to_gt(pc); 318 u32 rpnswreq; 319 320 pc_set_manual_rp_ctrl(pc, true); 321 322 /* Req freq is in units of 16.66 Mhz */ 323 rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq)); 324 xe_mmio_write32(>->mmio, RPNSWREQ, rpnswreq); 325 326 /* Sleep for a small time to allow pcode to respond */ 327 usleep_range(100, 300); 328 329 pc_set_manual_rp_ctrl(pc, false); 330 } 331 332 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) 333 { 334 /* 335 * Let's only check for the rpn-rp0 range. If max < min, 336 * min becomes a fixed request. 337 */ 338 if (freq < pc->rpn_freq || freq > pc->rp0_freq) 339 return -EINVAL; 340 341 /* 342 * GuC policy is to elevate minimum frequency to the efficient levels 343 * Our goal is to have the admin choices respected. 344 */ 345 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, 346 freq < pc->rpe_freq); 347 348 return pc_action_set_param(pc, 349 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 350 freq); 351 } 352 353 static int pc_get_max_freq(struct xe_guc_pc *pc) 354 { 355 u32 freq; 356 357 freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK, 358 slpc_shared_data_read(pc, task_state_data.freq)); 359 360 return decode_freq(freq); 361 } 362 363 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) 364 { 365 /* 366 * Let's only check for the rpn-rp0 range. If max < min, 367 * min becomes a fixed request. 368 * Also, overclocking is not supported. 369 */ 370 if (freq < pc->rpn_freq || freq > pc->rp0_freq) 371 return -EINVAL; 372 373 return pc_action_set_param(pc, 374 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 375 freq); 376 } 377 378 static void mtl_update_rpa_value(struct xe_guc_pc *pc) 379 { 380 struct xe_gt *gt = pc_to_gt(pc); 381 u32 reg; 382 383 if (xe_gt_is_media_type(gt)) 384 reg = xe_mmio_read32(>->mmio, MTL_MPA_FREQUENCY); 385 else 386 reg = xe_mmio_read32(>->mmio, MTL_GT_RPA_FREQUENCY); 387 388 pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg)); 389 } 390 391 static void mtl_update_rpe_value(struct xe_guc_pc *pc) 392 { 393 struct xe_gt *gt = pc_to_gt(pc); 394 u32 reg; 395 396 if (xe_gt_is_media_type(gt)) 397 reg = xe_mmio_read32(>->mmio, MTL_MPE_FREQUENCY); 398 else 399 reg = xe_mmio_read32(>->mmio, MTL_GT_RPE_FREQUENCY); 400 401 pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); 402 } 403 404 static void tgl_update_rpa_value(struct xe_guc_pc *pc) 405 { 406 struct xe_gt *gt = pc_to_gt(pc); 407 struct xe_device *xe = gt_to_xe(gt); 408 u32 reg; 409 410 /* 411 * For PVC we still need to use fused RP0 as the approximation for RPa 412 * For other platforms than PVC we get the resolved RPa directly from 413 * PCODE at a different register 414 */ 415 if (xe->info.platform == XE_PVC) { 416 reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); 417 pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 418 } else { 419 reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); 420 pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 421 } 422 } 423 424 static void tgl_update_rpe_value(struct xe_guc_pc *pc) 425 { 426 struct xe_gt *gt = pc_to_gt(pc); 427 struct xe_device *xe = gt_to_xe(gt); 428 u32 reg; 429 430 /* 431 * For PVC we still need to use fused RP1 as the approximation for RPe 432 * For other platforms than PVC we get the resolved RPe directly from 433 * PCODE at a different register 434 */ 435 if (xe->info.platform == XE_PVC) { 436 reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); 437 pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 438 } else { 439 reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC); 440 pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 441 } 442 } 443 444 static void pc_update_rp_values(struct xe_guc_pc *pc) 445 { 446 struct xe_gt *gt = pc_to_gt(pc); 447 struct xe_device *xe = gt_to_xe(gt); 448 449 if (GRAPHICS_VERx100(xe) >= 1270) { 450 mtl_update_rpa_value(pc); 451 mtl_update_rpe_value(pc); 452 } else { 453 tgl_update_rpa_value(pc); 454 tgl_update_rpe_value(pc); 455 } 456 457 /* 458 * RPe is decided at runtime by PCODE. In the rare case where that's 459 * smaller than the fused min, we will trust the PCODE and use that 460 * as our minimum one. 461 */ 462 pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq); 463 } 464 465 /** 466 * xe_guc_pc_get_act_freq - Get Actual running frequency 467 * @pc: The GuC PC 468 * 469 * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6). 470 */ 471 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) 472 { 473 struct xe_gt *gt = pc_to_gt(pc); 474 struct xe_device *xe = gt_to_xe(gt); 475 u32 freq; 476 477 /* When in RC6, actual frequency reported will be 0. */ 478 if (GRAPHICS_VERx100(xe) >= 1270) { 479 freq = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); 480 freq = REG_FIELD_GET(MTL_CAGF_MASK, freq); 481 } else { 482 freq = xe_mmio_read32(>->mmio, GT_PERF_STATUS); 483 freq = REG_FIELD_GET(CAGF_MASK, freq); 484 } 485 486 freq = decode_freq(freq); 487 488 return freq; 489 } 490 491 static u32 get_cur_freq(struct xe_gt *gt) 492 { 493 u32 freq; 494 495 freq = xe_mmio_read32(>->mmio, RPNSWREQ); 496 freq = REG_FIELD_GET(REQ_RATIO_MASK, freq); 497 return decode_freq(freq); 498 } 499 500 /** 501 * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency 502 * @pc: The GuC PC 503 * 504 * Returns: the requested frequency for that GT instance 505 */ 506 u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc) 507 { 508 struct xe_gt *gt = pc_to_gt(pc); 509 510 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 511 512 return get_cur_freq(gt); 513 } 514 515 /** 516 * xe_guc_pc_get_cur_freq - Get Current requested frequency 517 * @pc: The GuC PC 518 * @freq: A pointer to a u32 where the freq value will be returned 519 * 520 * Returns: 0 on success, 521 * -EAGAIN if GuC PC not ready (likely in middle of a reset). 522 */ 523 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) 524 { 525 struct xe_gt *gt = pc_to_gt(pc); 526 unsigned int fw_ref; 527 528 /* 529 * GuC SLPC plays with cur freq request when GuCRC is enabled 530 * Block RC6 for a more reliable read. 531 */ 532 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 533 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) { 534 xe_force_wake_put(gt_to_fw(gt), fw_ref); 535 return -ETIMEDOUT; 536 } 537 538 *freq = get_cur_freq(gt); 539 540 xe_force_wake_put(gt_to_fw(gt), fw_ref); 541 return 0; 542 } 543 544 /** 545 * xe_guc_pc_get_rp0_freq - Get the RP0 freq 546 * @pc: The GuC PC 547 * 548 * Returns: RP0 freq. 549 */ 550 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc) 551 { 552 return pc->rp0_freq; 553 } 554 555 /** 556 * xe_guc_pc_get_rpa_freq - Get the RPa freq 557 * @pc: The GuC PC 558 * 559 * Returns: RPa freq. 560 */ 561 u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc) 562 { 563 pc_update_rp_values(pc); 564 565 return pc->rpa_freq; 566 } 567 568 /** 569 * xe_guc_pc_get_rpe_freq - Get the RPe freq 570 * @pc: The GuC PC 571 * 572 * Returns: RPe freq. 573 */ 574 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc) 575 { 576 pc_update_rp_values(pc); 577 578 return pc->rpe_freq; 579 } 580 581 /** 582 * xe_guc_pc_get_rpn_freq - Get the RPn freq 583 * @pc: The GuC PC 584 * 585 * Returns: RPn freq. 586 */ 587 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) 588 { 589 return pc->rpn_freq; 590 } 591 592 static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq) 593 { 594 int ret; 595 596 lockdep_assert_held(&pc->freq_lock); 597 598 /* Might be in the middle of a gt reset */ 599 if (!pc->freq_ready) 600 return -EAGAIN; 601 602 ret = pc_action_query_task_state(pc); 603 if (ret) 604 return ret; 605 606 *freq = pc_get_min_freq(pc); 607 608 return 0; 609 } 610 611 /** 612 * xe_guc_pc_get_min_freq - Get the min operational frequency 613 * @pc: The GuC PC 614 * @freq: A pointer to a u32 where the freq value will be returned 615 * 616 * Returns: 0 on success, 617 * -EAGAIN if GuC PC not ready (likely in middle of a reset). 618 */ 619 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) 620 { 621 guard(mutex)(&pc->freq_lock); 622 623 return xe_guc_pc_get_min_freq_locked(pc, freq); 624 } 625 626 static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq) 627 { 628 int ret; 629 630 lockdep_assert_held(&pc->freq_lock); 631 632 /* Might be in the middle of a gt reset */ 633 if (!pc->freq_ready) 634 return -EAGAIN; 635 636 ret = pc_set_min_freq(pc, freq); 637 if (ret) 638 return ret; 639 640 pc->user_requested_min = freq; 641 642 return 0; 643 } 644 645 /** 646 * xe_guc_pc_set_min_freq - Set the minimal operational frequency 647 * @pc: The GuC PC 648 * @freq: The selected minimal frequency 649 * 650 * Returns: 0 on success, 651 * -EAGAIN if GuC PC not ready (likely in middle of a reset), 652 * -EINVAL if value out of bounds. 653 */ 654 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) 655 { 656 guard(mutex)(&pc->freq_lock); 657 658 return xe_guc_pc_set_min_freq_locked(pc, freq); 659 } 660 661 static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq) 662 { 663 int ret; 664 665 lockdep_assert_held(&pc->freq_lock); 666 667 /* Might be in the middle of a gt reset */ 668 if (!pc->freq_ready) 669 return -EAGAIN; 670 671 ret = pc_action_query_task_state(pc); 672 if (ret) 673 return ret; 674 675 *freq = pc_get_max_freq(pc); 676 677 return 0; 678 } 679 680 /** 681 * xe_guc_pc_get_max_freq - Get Maximum operational frequency 682 * @pc: The GuC PC 683 * @freq: A pointer to a u32 where the freq value will be returned 684 * 685 * Returns: 0 on success, 686 * -EAGAIN if GuC PC not ready (likely in middle of a reset). 687 */ 688 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) 689 { 690 guard(mutex)(&pc->freq_lock); 691 692 return xe_guc_pc_get_max_freq_locked(pc, freq); 693 } 694 695 static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq) 696 { 697 int ret; 698 699 lockdep_assert_held(&pc->freq_lock); 700 701 /* Might be in the middle of a gt reset */ 702 if (!pc->freq_ready) 703 return -EAGAIN; 704 705 ret = pc_set_max_freq(pc, freq); 706 if (ret) 707 return ret; 708 709 pc->user_requested_max = freq; 710 711 return 0; 712 } 713 714 /** 715 * xe_guc_pc_set_max_freq - Set the maximum operational frequency 716 * @pc: The GuC PC 717 * @freq: The selected maximum frequency value 718 * 719 * Returns: 0 on success, 720 * -EAGAIN if GuC PC not ready (likely in middle of a reset), 721 * -EINVAL if value out of bounds. 722 */ 723 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) 724 { 725 if (XE_WA(pc_to_gt(pc), 22019338487)) { 726 if (wait_for_flush_complete(pc) != 0) 727 return -EAGAIN; 728 } 729 730 guard(mutex)(&pc->freq_lock); 731 732 return xe_guc_pc_set_max_freq_locked(pc, freq); 733 } 734 735 /** 736 * xe_guc_pc_c_status - get the current GT C state 737 * @pc: XE_GuC_PC instance 738 */ 739 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) 740 { 741 struct xe_gt *gt = pc_to_gt(pc); 742 u32 reg, gt_c_state; 743 744 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { 745 reg = xe_mmio_read32(>->mmio, MTL_MIRROR_TARGET_WP1); 746 gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg); 747 } else { 748 reg = xe_mmio_read32(>->mmio, GT_CORE_STATUS); 749 gt_c_state = REG_FIELD_GET(RCN_MASK, reg); 750 } 751 752 switch (gt_c_state) { 753 case GT_C6: 754 return GT_IDLE_C6; 755 case GT_C0: 756 return GT_IDLE_C0; 757 default: 758 return GT_IDLE_UNKNOWN; 759 } 760 } 761 762 /** 763 * xe_guc_pc_rc6_residency - rc6 residency counter 764 * @pc: Xe_GuC_PC instance 765 */ 766 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) 767 { 768 struct xe_gt *gt = pc_to_gt(pc); 769 u32 reg; 770 771 reg = xe_mmio_read32(>->mmio, GT_GFX_RC6); 772 773 return reg; 774 } 775 776 /** 777 * xe_guc_pc_mc6_residency - mc6 residency counter 778 * @pc: Xe_GuC_PC instance 779 */ 780 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) 781 { 782 struct xe_gt *gt = pc_to_gt(pc); 783 u64 reg; 784 785 reg = xe_mmio_read32(>->mmio, MTL_MEDIA_MC6); 786 787 return reg; 788 } 789 790 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) 791 { 792 struct xe_gt *gt = pc_to_gt(pc); 793 u32 reg; 794 795 xe_device_assert_mem_access(pc_to_xe(pc)); 796 797 if (xe_gt_is_media_type(gt)) 798 reg = xe_mmio_read32(>->mmio, MTL_MEDIAP_STATE_CAP); 799 else 800 reg = xe_mmio_read32(>->mmio, MTL_RP_STATE_CAP); 801 802 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg)); 803 804 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg)); 805 } 806 807 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) 808 { 809 struct xe_gt *gt = pc_to_gt(pc); 810 struct xe_device *xe = gt_to_xe(gt); 811 u32 reg; 812 813 xe_device_assert_mem_access(pc_to_xe(pc)); 814 815 if (xe->info.platform == XE_PVC) 816 reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP); 817 else 818 reg = xe_mmio_read32(>->mmio, RP_STATE_CAP); 819 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 820 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; 821 } 822 823 static void pc_init_fused_rp_values(struct xe_guc_pc *pc) 824 { 825 struct xe_gt *gt = pc_to_gt(pc); 826 struct xe_device *xe = gt_to_xe(gt); 827 828 if (GRAPHICS_VERx100(xe) >= 1270) 829 mtl_init_fused_rp_values(pc); 830 else 831 tgl_init_fused_rp_values(pc); 832 } 833 834 static u32 pc_max_freq_cap(struct xe_guc_pc *pc) 835 { 836 struct xe_gt *gt = pc_to_gt(pc); 837 838 if (XE_WA(gt, 22019338487)) { 839 if (xe_gt_is_media_type(gt)) 840 return min(LNL_MERT_FREQ_CAP, pc->rp0_freq); 841 else 842 return min(BMG_MERT_FREQ_CAP, pc->rp0_freq); 843 } else { 844 return pc->rp0_freq; 845 } 846 } 847 848 /** 849 * xe_guc_pc_raise_unslice - Initialize RPx values and request a higher GT 850 * frequency to allow faster GuC load times 851 * @pc: Xe_GuC_PC instance 852 */ 853 void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc) 854 { 855 struct xe_gt *gt = pc_to_gt(pc); 856 857 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 858 pc_set_cur_freq(pc, pc_max_freq_cap(pc)); 859 } 860 861 /** 862 * xe_guc_pc_init_early - Initialize RPx values 863 * @pc: Xe_GuC_PC instance 864 */ 865 void xe_guc_pc_init_early(struct xe_guc_pc *pc) 866 { 867 struct xe_gt *gt = pc_to_gt(pc); 868 869 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 870 pc_init_fused_rp_values(pc); 871 } 872 873 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) 874 { 875 struct xe_tile *tile = gt_to_tile(pc_to_gt(pc)); 876 int ret; 877 878 lockdep_assert_held(&pc->freq_lock); 879 880 ret = pc_action_query_task_state(pc); 881 if (ret) 882 goto out; 883 884 /* 885 * GuC defaults to some RPmax that is not actually achievable without 886 * overclocking. Let's adjust it to the Hardware RP0, which is the 887 * regular maximum 888 */ 889 if (pc_get_max_freq(pc) > pc->rp0_freq) { 890 ret = pc_set_max_freq(pc, pc->rp0_freq); 891 if (ret) 892 goto out; 893 } 894 895 /* 896 * Same thing happens for Server platforms where min is listed as 897 * RPMax 898 */ 899 if (pc_get_min_freq(pc) > pc->rp0_freq) 900 ret = pc_set_min_freq(pc, pc->rp0_freq); 901 902 if (XE_WA(tile->primary_gt, 14022085890)) 903 ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc))); 904 905 out: 906 return ret; 907 } 908 909 static int pc_adjust_requested_freq(struct xe_guc_pc *pc) 910 { 911 int ret = 0; 912 913 lockdep_assert_held(&pc->freq_lock); 914 915 if (pc->user_requested_min != 0) { 916 ret = pc_set_min_freq(pc, pc->user_requested_min); 917 if (ret) 918 return ret; 919 } 920 921 if (pc->user_requested_max != 0) { 922 ret = pc_set_max_freq(pc, pc->user_requested_max); 923 if (ret) 924 return ret; 925 } 926 927 return ret; 928 } 929 930 static bool needs_flush_freq_limit(struct xe_guc_pc *pc) 931 { 932 struct xe_gt *gt = pc_to_gt(pc); 933 934 return XE_WA(gt, 22019338487) && 935 pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP; 936 } 937 938 /** 939 * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush 940 * @pc: the xe_guc_pc object 941 * 942 * As per the WA, reduce max GT frequency during L2 cache flush 943 */ 944 void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) 945 { 946 struct xe_gt *gt = pc_to_gt(pc); 947 u32 max_freq; 948 int ret; 949 950 if (!needs_flush_freq_limit(pc)) 951 return; 952 953 guard(mutex)(&pc->freq_lock); 954 955 ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq); 956 if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) { 957 ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP); 958 if (ret) { 959 xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n", 960 BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); 961 return; 962 } 963 964 atomic_set(&pc->flush_freq_limit, 1); 965 966 /* 967 * If user has previously changed max freq, stash that value to 968 * restore later, otherwise use the current max. New user 969 * requests wait on flush. 970 */ 971 if (pc->user_requested_max != 0) 972 pc->stashed_max_freq = pc->user_requested_max; 973 else 974 pc->stashed_max_freq = max_freq; 975 } 976 977 /* 978 * Wait for actual freq to go below the flush cap: even if the previous 979 * max was below cap, the current one might still be above it 980 */ 981 ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); 982 if (ret) 983 xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", 984 BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); 985 } 986 987 /** 988 * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes. 989 * @pc: the xe_guc_pc object 990 * 991 * Retrieve the previous GT max frequency value. 992 */ 993 void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc) 994 { 995 struct xe_gt *gt = pc_to_gt(pc); 996 int ret = 0; 997 998 if (!needs_flush_freq_limit(pc)) 999 return; 1000 1001 if (!atomic_read(&pc->flush_freq_limit)) 1002 return; 1003 1004 mutex_lock(&pc->freq_lock); 1005 1006 ret = pc_set_max_freq(>->uc.guc.pc, pc->stashed_max_freq); 1007 if (ret) 1008 xe_gt_err_once(gt, "Failed to restore max freq %u:%d", 1009 pc->stashed_max_freq, ret); 1010 1011 atomic_set(&pc->flush_freq_limit, 0); 1012 mutex_unlock(&pc->freq_lock); 1013 wake_up_var(&pc->flush_freq_limit); 1014 } 1015 1016 static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) 1017 { 1018 int ret; 1019 1020 if (!XE_WA(pc_to_gt(pc), 22019338487)) 1021 return 0; 1022 1023 guard(mutex)(&pc->freq_lock); 1024 1025 /* 1026 * Get updated min/max and stash them. 1027 */ 1028 ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq); 1029 if (!ret) 1030 ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq); 1031 if (ret) 1032 return ret; 1033 1034 /* 1035 * Ensure min and max are bound by MERT_FREQ_CAP until driver loads. 1036 */ 1037 ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc))); 1038 if (!ret) 1039 ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc))); 1040 1041 return ret; 1042 } 1043 1044 /** 1045 * xe_guc_pc_restore_stashed_freq - Set min/max back to stashed values 1046 * @pc: The GuC PC 1047 * 1048 * Returns: 0 on success, 1049 * error code on failure 1050 */ 1051 int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc) 1052 { 1053 int ret = 0; 1054 1055 if (IS_SRIOV_VF(pc_to_xe(pc)) || pc_to_xe(pc)->info.skip_guc_pc) 1056 return 0; 1057 1058 mutex_lock(&pc->freq_lock); 1059 ret = pc_set_max_freq(pc, pc->stashed_max_freq); 1060 if (!ret) 1061 ret = pc_set_min_freq(pc, pc->stashed_min_freq); 1062 mutex_unlock(&pc->freq_lock); 1063 1064 return ret; 1065 } 1066 1067 /** 1068 * xe_guc_pc_gucrc_disable - Disable GuC RC 1069 * @pc: Xe_GuC_PC instance 1070 * 1071 * Disables GuC RC by taking control of RC6 back from GuC. 1072 * 1073 * Return: 0 on success, negative error code on error. 1074 */ 1075 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) 1076 { 1077 struct xe_device *xe = pc_to_xe(pc); 1078 struct xe_gt *gt = pc_to_gt(pc); 1079 unsigned int fw_ref; 1080 int ret = 0; 1081 1082 if (xe->info.skip_guc_pc) 1083 return 0; 1084 1085 ret = pc_action_setup_gucrc(pc, GUCRC_HOST_CONTROL); 1086 if (ret) 1087 return ret; 1088 1089 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); 1090 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { 1091 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1092 return -ETIMEDOUT; 1093 } 1094 1095 xe_gt_idle_disable_c6(gt); 1096 1097 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1098 1099 return 0; 1100 } 1101 1102 /** 1103 * xe_guc_pc_override_gucrc_mode - override GUCRC mode 1104 * @pc: Xe_GuC_PC instance 1105 * @mode: new value of the mode. 1106 * 1107 * Return: 0 on success, negative error code on error 1108 */ 1109 int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode) 1110 { 1111 int ret; 1112 1113 xe_pm_runtime_get(pc_to_xe(pc)); 1114 ret = pc_action_set_param(pc, SLPC_PARAM_PWRGATE_RC_MODE, mode); 1115 xe_pm_runtime_put(pc_to_xe(pc)); 1116 1117 return ret; 1118 } 1119 1120 /** 1121 * xe_guc_pc_unset_gucrc_mode - unset GUCRC mode override 1122 * @pc: Xe_GuC_PC instance 1123 * 1124 * Return: 0 on success, negative error code on error 1125 */ 1126 int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc) 1127 { 1128 int ret; 1129 1130 xe_pm_runtime_get(pc_to_xe(pc)); 1131 ret = pc_action_unset_param(pc, SLPC_PARAM_PWRGATE_RC_MODE); 1132 xe_pm_runtime_put(pc_to_xe(pc)); 1133 1134 return ret; 1135 } 1136 1137 static void pc_init_pcode_freq(struct xe_guc_pc *pc) 1138 { 1139 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER); 1140 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER); 1141 1142 XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max)); 1143 } 1144 1145 static int pc_init_freqs(struct xe_guc_pc *pc) 1146 { 1147 int ret; 1148 1149 mutex_lock(&pc->freq_lock); 1150 1151 ret = pc_adjust_freq_bounds(pc); 1152 if (ret) 1153 goto out; 1154 1155 ret = pc_adjust_requested_freq(pc); 1156 if (ret) 1157 goto out; 1158 1159 pc_update_rp_values(pc); 1160 1161 pc_init_pcode_freq(pc); 1162 1163 /* 1164 * The frequencies are really ready for use only after the user 1165 * requested ones got restored. 1166 */ 1167 pc->freq_ready = true; 1168 1169 out: 1170 mutex_unlock(&pc->freq_lock); 1171 return ret; 1172 } 1173 1174 static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val) 1175 { 1176 int ret = 0; 1177 1178 ret = pc_action_set_param(pc, 1179 SLPC_PARAM_STRATEGIES, 1180 val); 1181 1182 return ret; 1183 } 1184 1185 /** 1186 * xe_guc_pc_start - Start GuC's Power Conservation component 1187 * @pc: Xe_GuC_PC instance 1188 */ 1189 int xe_guc_pc_start(struct xe_guc_pc *pc) 1190 { 1191 struct xe_device *xe = pc_to_xe(pc); 1192 struct xe_gt *gt = pc_to_gt(pc); 1193 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 1194 unsigned int fw_ref; 1195 ktime_t earlier; 1196 int ret; 1197 1198 xe_gt_assert(gt, xe_device_uc_enabled(xe)); 1199 1200 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1201 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) { 1202 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1203 return -ETIMEDOUT; 1204 } 1205 1206 if (xe->info.skip_guc_pc) { 1207 if (xe->info.platform != XE_PVC) 1208 xe_gt_idle_enable_c6(gt); 1209 1210 /* Request max possible since dynamic freq mgmt is not enabled */ 1211 pc_set_cur_freq(pc, UINT_MAX); 1212 1213 ret = 0; 1214 goto out; 1215 } 1216 1217 xe_map_memset(xe, &pc->bo->vmap, 0, 0, size); 1218 slpc_shared_data_write(pc, header.size, size); 1219 1220 earlier = ktime_get(); 1221 ret = pc_action_reset(pc); 1222 if (ret) 1223 goto out; 1224 1225 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 1226 SLPC_RESET_TIMEOUT_MS)) { 1227 xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n", 1228 xe_guc_pc_get_act_freq(pc), get_cur_freq(gt), 1229 xe_gt_throttle_get_limit_reasons(gt)); 1230 1231 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, 1232 SLPC_RESET_EXTENDED_TIMEOUT_MS)) { 1233 xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n"); 1234 ret = -EIO; 1235 goto out; 1236 } 1237 1238 xe_gt_warn(gt, "GuC PC excessive start time: %lldms", 1239 ktime_ms_delta(ktime_get(), earlier)); 1240 } 1241 1242 ret = pc_init_freqs(pc); 1243 if (ret) 1244 goto out; 1245 1246 ret = pc_set_mert_freq_cap(pc); 1247 if (ret) 1248 goto out; 1249 1250 if (xe->info.platform == XE_PVC) { 1251 xe_guc_pc_gucrc_disable(pc); 1252 ret = 0; 1253 goto out; 1254 } 1255 1256 ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL); 1257 if (ret) 1258 goto out; 1259 1260 /* Enable SLPC Optimized Strategy for compute */ 1261 ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE); 1262 1263 out: 1264 xe_force_wake_put(gt_to_fw(gt), fw_ref); 1265 return ret; 1266 } 1267 1268 /** 1269 * xe_guc_pc_stop - Stop GuC's Power Conservation component 1270 * @pc: Xe_GuC_PC instance 1271 */ 1272 int xe_guc_pc_stop(struct xe_guc_pc *pc) 1273 { 1274 struct xe_device *xe = pc_to_xe(pc); 1275 1276 if (xe->info.skip_guc_pc) { 1277 xe_gt_idle_disable_c6(pc_to_gt(pc)); 1278 return 0; 1279 } 1280 1281 mutex_lock(&pc->freq_lock); 1282 pc->freq_ready = false; 1283 mutex_unlock(&pc->freq_lock); 1284 1285 return 0; 1286 } 1287 1288 /** 1289 * xe_guc_pc_fini_hw - Finalize GuC's Power Conservation component 1290 * @arg: opaque pointer that should point to Xe_GuC_PC instance 1291 */ 1292 static void xe_guc_pc_fini_hw(void *arg) 1293 { 1294 struct xe_guc_pc *pc = arg; 1295 struct xe_device *xe = pc_to_xe(pc); 1296 unsigned int fw_ref; 1297 1298 if (xe_device_wedged(xe)) 1299 return; 1300 1301 fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); 1302 xe_guc_pc_gucrc_disable(pc); 1303 XE_WARN_ON(xe_guc_pc_stop(pc)); 1304 1305 /* Bind requested freq to mert_freq_cap before unload */ 1306 pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq)); 1307 1308 xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref); 1309 } 1310 1311 /** 1312 * xe_guc_pc_init - Initialize GuC's Power Conservation component 1313 * @pc: Xe_GuC_PC instance 1314 */ 1315 int xe_guc_pc_init(struct xe_guc_pc *pc) 1316 { 1317 struct xe_gt *gt = pc_to_gt(pc); 1318 struct xe_tile *tile = gt_to_tile(gt); 1319 struct xe_device *xe = gt_to_xe(gt); 1320 struct xe_bo *bo; 1321 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 1322 int err; 1323 1324 if (xe->info.skip_guc_pc) 1325 return 0; 1326 1327 err = drmm_mutex_init(&xe->drm, &pc->freq_lock); 1328 if (err) 1329 return err; 1330 1331 bo = xe_managed_bo_create_pin_map(xe, tile, size, 1332 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 1333 XE_BO_FLAG_GGTT | 1334 XE_BO_FLAG_GGTT_INVALIDATE | 1335 XE_BO_FLAG_PINNED_NORESTORE); 1336 if (IS_ERR(bo)) 1337 return PTR_ERR(bo); 1338 1339 pc->bo = bo; 1340 1341 return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc); 1342 } 1343 1344 static const char *pc_get_state_string(struct xe_guc_pc *pc) 1345 { 1346 switch (slpc_shared_data_read(pc, header.global_state)) { 1347 case SLPC_GLOBAL_STATE_NOT_RUNNING: 1348 return "not running"; 1349 case SLPC_GLOBAL_STATE_INITIALIZING: 1350 return "initializing"; 1351 case SLPC_GLOBAL_STATE_RESETTING: 1352 return "resetting"; 1353 case SLPC_GLOBAL_STATE_RUNNING: 1354 return "running"; 1355 case SLPC_GLOBAL_STATE_SHUTTING_DOWN: 1356 return "shutting down"; 1357 case SLPC_GLOBAL_STATE_ERROR: 1358 return "error"; 1359 default: 1360 return "unknown"; 1361 } 1362 } 1363 1364 /** 1365 * xe_guc_pc_print - Print GuC's Power Conservation information for debug 1366 * @pc: Xe_GuC_PC instance 1367 * @p: drm_printer 1368 */ 1369 void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p) 1370 { 1371 drm_printf(p, "SLPC Shared Data Header:\n"); 1372 drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size)); 1373 drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc)); 1374 1375 if (pc_action_query_task_state(pc)) 1376 return; 1377 1378 drm_printf(p, "\nSLPC Tasks Status:\n"); 1379 drm_printf(p, "\tGTPERF enabled: %s\n", 1380 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1381 SLPC_GTPERF_TASK_ENABLED)); 1382 drm_printf(p, "\tDCC enabled: %s\n", 1383 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1384 SLPC_DCC_TASK_ENABLED)); 1385 drm_printf(p, "\tDCC in use: %s\n", 1386 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1387 SLPC_IN_DCC)); 1388 drm_printf(p, "\tBalancer enabled: %s\n", 1389 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1390 SLPC_BALANCER_ENABLED)); 1391 drm_printf(p, "\tIBC enabled: %s\n", 1392 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1393 SLPC_IBC_TASK_ENABLED)); 1394 drm_printf(p, "\tBalancer IA LMT enabled: %s\n", 1395 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1396 SLPC_BALANCER_IA_LMT_ENABLED)); 1397 drm_printf(p, "\tBalancer IA LMT active: %s\n", 1398 str_yes_no(slpc_shared_data_read(pc, task_state_data.status) & 1399 SLPC_BALANCER_IA_LMT_ACTIVE)); 1400 } 1401