1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <drm/drm_atomic_state_helper.h> 7 #include <drm/drm_print.h> 8 9 #include "i915_drv.h" 10 #include "i915_reg.h" 11 #include "intel_bw.h" 12 #include "intel_crtc.h" 13 #include "intel_display_core.h" 14 #include "intel_display_regs.h" 15 #include "intel_display_types.h" 16 #include "intel_display_utils.h" 17 #include "intel_dram.h" 18 #include "intel_mchbar_regs.h" 19 #include "intel_pcode.h" 20 #include "intel_uncore.h" 21 #include "skl_watermark.h" 22 23 struct intel_bw_state { 24 struct intel_global_state base; 25 26 /* 27 * Contains a bit mask, used to determine, whether correspondent 28 * pipe allows SAGV or not. 29 */ 30 u8 pipe_sagv_reject; 31 32 /* bitmask of active pipes */ 33 u8 active_pipes; 34 35 /* 36 * From MTL onwards, to lock a QGV point, punit expects the peak BW of 37 * the selected QGV point as the parameter in multiples of 100MB/s 38 */ 39 u16 qgv_point_peakbw; 40 41 /* 42 * Current QGV points mask, which restricts 43 * some particular SAGV states, not to confuse 44 * with pipe_sagv_mask. 45 */ 46 u16 qgv_points_mask; 47 48 unsigned int data_rate[I915_MAX_PIPES]; 49 u8 num_active_planes[I915_MAX_PIPES]; 50 }; 51 52 /* Parameters for Qclk Geyserville (QGV) */ 53 struct intel_qgv_point { 54 u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd; 55 }; 56 57 #define DEPROGBWPCLIMIT 60 58 59 struct intel_psf_gv_point { 60 u8 clk; /* clock in multiples of 16.6666 MHz */ 61 }; 62 63 struct intel_qgv_info { 64 struct intel_qgv_point points[I915_NUM_QGV_POINTS]; 65 struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS]; 66 u8 num_points; 67 u8 num_psf_points; 68 u8 t_bl; 69 u8 max_numchannels; 70 u8 channel_width; 71 u8 deinterleave; 72 }; 73 74 static int dg1_mchbar_read_qgv_point_info(struct intel_display *display, 75 struct intel_qgv_point *sp, 76 int point) 77 { 78 struct drm_i915_private *i915 = to_i915(display->drm); 79 u32 dclk_ratio, dclk_reference; 80 u32 val; 81 82 val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC); 83 dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val); 84 if (val & DG1_QCLK_REFERENCE) 85 dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ 86 else 87 dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ 88 sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); 89 90 val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); 91 if (val & DG1_GEAR_TYPE) 92 sp->dclk *= 2; 93 94 if (sp->dclk == 0) 95 return -EINVAL; 96 97 val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR); 98 sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val); 99 sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val); 100 101 val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH); 102 sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val); 103 sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val); 104 105 sp->t_rc = sp->t_rp + sp->t_ras; 106 107 return 0; 108 } 109 110 static int icl_pcode_read_qgv_point_info(struct intel_display *display, 111 struct intel_qgv_point *sp, 112 int point) 113 { 114 u32 val = 0, val2 = 0; 115 u16 dclk; 116 int ret; 117 118 ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | 119 ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), 120 &val, &val2); 121 if (ret) 122 return ret; 123 124 dclk = val & 0xffff; 125 sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0), 126 1000); 127 sp->t_rp = (val & 0xff0000) >> 16; 128 sp->t_rcd = (val & 0xff000000) >> 24; 129 130 sp->t_rdpre = val2 & 0xff; 131 sp->t_ras = (val2 & 0xff00) >> 8; 132 133 sp->t_rc = sp->t_rp + sp->t_ras; 134 135 return 0; 136 } 137 138 static int adls_pcode_read_psf_gv_point_info(struct intel_display *display, 139 struct intel_psf_gv_point *points) 140 { 141 u32 val = 0; 142 int ret; 143 int i; 144 145 ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | 146 ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); 147 if (ret) 148 return ret; 149 150 for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) { 151 points[i].clk = val & 0xff; 152 val >>= 8; 153 } 154 155 return 0; 156 } 157 158 static u16 icl_qgv_points_mask(struct intel_display *display) 159 { 160 unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; 161 unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; 162 u16 qgv_points = 0, psf_points = 0; 163 164 /* 165 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects 166 * it with failure if we try masking any unadvertised points. 167 * So need to operate only with those returned from PCode. 168 */ 169 if (num_qgv_points > 0) 170 qgv_points = GENMASK(num_qgv_points - 1, 0); 171 172 if (num_psf_gv_points > 0) 173 psf_points = GENMASK(num_psf_gv_points - 1, 0); 174 175 return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); 176 } 177 178 static bool is_sagv_enabled(struct intel_display *display, u16 points_mask) 179 { 180 return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) & 181 ICL_PCODE_REQ_QGV_PT_MASK); 182 } 183 184 static int icl_pcode_restrict_qgv_points(struct intel_display *display, 185 u32 points_mask) 186 { 187 int ret; 188 189 if (DISPLAY_VER(display) >= 14) 190 return 0; 191 192 /* bspec says to keep retrying for at least 1 ms */ 193 ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, 194 points_mask, 195 ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, 196 ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, 197 1); 198 199 if (ret < 0) { 200 drm_err(display->drm, 201 "Failed to disable qgv points (0x%x) points: 0x%x\n", 202 ret, points_mask); 203 return ret; 204 } 205 206 display->sagv.status = is_sagv_enabled(display, points_mask) ? 207 I915_SAGV_ENABLED : I915_SAGV_DISABLED; 208 209 return 0; 210 } 211 212 static int mtl_read_qgv_point_info(struct intel_display *display, 213 struct intel_qgv_point *sp, int point) 214 { 215 struct drm_i915_private *i915 = to_i915(display->drm); 216 u32 val, val2; 217 u16 dclk; 218 219 val = intel_uncore_read(&i915->uncore, 220 MTL_MEM_SS_INFO_QGV_POINT_LOW(point)); 221 val2 = intel_uncore_read(&i915->uncore, 222 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); 223 dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); 224 sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000); 225 sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val); 226 sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val); 227 228 sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2); 229 sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2); 230 231 sp->t_rc = sp->t_rp + sp->t_ras; 232 233 return 0; 234 } 235 236 static int 237 intel_read_qgv_point_info(struct intel_display *display, 238 struct intel_qgv_point *sp, 239 int point) 240 { 241 if (DISPLAY_VER(display) >= 14) 242 return mtl_read_qgv_point_info(display, sp, point); 243 else if (display->platform.dg1) 244 return dg1_mchbar_read_qgv_point_info(display, sp, point); 245 else 246 return icl_pcode_read_qgv_point_info(display, sp, point); 247 } 248 249 static int icl_get_qgv_points(struct intel_display *display, 250 const struct dram_info *dram_info, 251 struct intel_qgv_info *qi, 252 bool is_y_tile) 253 { 254 int i, ret; 255 256 qi->num_points = dram_info->num_qgv_points; 257 qi->num_psf_points = dram_info->num_psf_gv_points; 258 259 if (DISPLAY_VER(display) >= 14) { 260 switch (dram_info->type) { 261 case INTEL_DRAM_DDR4: 262 qi->t_bl = 4; 263 qi->max_numchannels = 2; 264 qi->channel_width = 64; 265 qi->deinterleave = 2; 266 break; 267 case INTEL_DRAM_DDR5: 268 qi->t_bl = 8; 269 qi->max_numchannels = 4; 270 qi->channel_width = 32; 271 qi->deinterleave = 2; 272 break; 273 case INTEL_DRAM_LPDDR4: 274 case INTEL_DRAM_LPDDR5: 275 qi->t_bl = 16; 276 qi->max_numchannels = 8; 277 qi->channel_width = 16; 278 qi->deinterleave = 4; 279 break; 280 case INTEL_DRAM_GDDR: 281 case INTEL_DRAM_GDDR_ECC: 282 qi->channel_width = 32; 283 break; 284 default: 285 MISSING_CASE(dram_info->type); 286 return -EINVAL; 287 } 288 } else if (DISPLAY_VER(display) >= 12) { 289 switch (dram_info->type) { 290 case INTEL_DRAM_DDR4: 291 qi->t_bl = is_y_tile ? 8 : 4; 292 qi->max_numchannels = 2; 293 qi->channel_width = 64; 294 qi->deinterleave = is_y_tile ? 1 : 2; 295 break; 296 case INTEL_DRAM_DDR5: 297 qi->t_bl = is_y_tile ? 16 : 8; 298 qi->max_numchannels = 4; 299 qi->channel_width = 32; 300 qi->deinterleave = is_y_tile ? 1 : 2; 301 break; 302 case INTEL_DRAM_LPDDR4: 303 if (display->platform.rocketlake) { 304 qi->t_bl = 8; 305 qi->max_numchannels = 4; 306 qi->channel_width = 32; 307 qi->deinterleave = 2; 308 break; 309 } 310 fallthrough; 311 case INTEL_DRAM_LPDDR5: 312 qi->t_bl = 16; 313 qi->max_numchannels = 8; 314 qi->channel_width = 16; 315 qi->deinterleave = is_y_tile ? 2 : 4; 316 break; 317 default: 318 qi->t_bl = 16; 319 qi->max_numchannels = 1; 320 break; 321 } 322 } else if (DISPLAY_VER(display) == 11) { 323 qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8; 324 qi->max_numchannels = 1; 325 } 326 327 if (drm_WARN_ON(display->drm, 328 qi->num_points > ARRAY_SIZE(qi->points))) 329 qi->num_points = ARRAY_SIZE(qi->points); 330 331 for (i = 0; i < qi->num_points; i++) { 332 struct intel_qgv_point *sp = &qi->points[i]; 333 334 ret = intel_read_qgv_point_info(display, sp, i); 335 if (ret) { 336 drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i); 337 return ret; 338 } 339 340 drm_dbg_kms(display->drm, 341 "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", 342 i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, 343 sp->t_rcd, sp->t_rc); 344 } 345 346 if (qi->num_psf_points > 0) { 347 ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points); 348 if (ret) { 349 drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n"); 350 qi->num_psf_points = 0; 351 } 352 353 for (i = 0; i < qi->num_psf_points; i++) 354 drm_dbg_kms(display->drm, 355 "PSF GV %d: CLK=%d\n", 356 i, qi->psf_points[i].clk); 357 } 358 359 return 0; 360 } 361 362 static int adl_calc_psf_bw(int clk) 363 { 364 /* 365 * clk is multiples of 16.666MHz (100/6) 366 * According to BSpec PSF GV bandwidth is 367 * calculated as BW = 64 * clk * 16.666Mhz 368 */ 369 return DIV_ROUND_CLOSEST(64 * clk * 100, 6); 370 } 371 372 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) 373 { 374 u16 dclk = 0; 375 int i; 376 377 for (i = 0; i < qi->num_points; i++) 378 dclk = max(dclk, qi->points[i].dclk); 379 380 return dclk; 381 } 382 383 struct intel_sa_info { 384 u16 displayrtids; 385 u8 deburst, deprogbwlimit, derating; 386 }; 387 388 static const struct intel_sa_info icl_sa_info = { 389 .deburst = 8, 390 .deprogbwlimit = 25, /* GB/s */ 391 .displayrtids = 128, 392 .derating = 10, 393 }; 394 395 static const struct intel_sa_info tgl_sa_info = { 396 .deburst = 16, 397 .deprogbwlimit = 34, /* GB/s */ 398 .displayrtids = 256, 399 .derating = 10, 400 }; 401 402 static const struct intel_sa_info rkl_sa_info = { 403 .deburst = 8, 404 .deprogbwlimit = 20, /* GB/s */ 405 .displayrtids = 128, 406 .derating = 10, 407 }; 408 409 static const struct intel_sa_info adls_sa_info = { 410 .deburst = 16, 411 .deprogbwlimit = 38, /* GB/s */ 412 .displayrtids = 256, 413 .derating = 10, 414 }; 415 416 static const struct intel_sa_info adlp_sa_info = { 417 .deburst = 16, 418 .deprogbwlimit = 38, /* GB/s */ 419 .displayrtids = 256, 420 .derating = 20, 421 }; 422 423 static const struct intel_sa_info mtl_sa_info = { 424 .deburst = 32, 425 .deprogbwlimit = 38, /* GB/s */ 426 .displayrtids = 256, 427 .derating = 10, 428 }; 429 430 static const struct intel_sa_info xe2_hpd_sa_info = { 431 .derating = 30, 432 .deprogbwlimit = 53, 433 /* Other values not used by simplified algorithm */ 434 }; 435 436 static const struct intel_sa_info xe2_hpd_ecc_sa_info = { 437 .derating = 45, 438 .deprogbwlimit = 53, 439 /* Other values not used by simplified algorithm */ 440 }; 441 442 static const struct intel_sa_info xe3lpd_sa_info = { 443 .deburst = 32, 444 .deprogbwlimit = 65, /* GB/s */ 445 .displayrtids = 256, 446 .derating = 10, 447 }; 448 449 static const struct intel_sa_info xe3lpd_3002_sa_info = { 450 .deburst = 32, 451 .deprogbwlimit = 22, /* GB/s */ 452 .displayrtids = 256, 453 .derating = 10, 454 }; 455 456 static int icl_get_bw_info(struct intel_display *display, 457 const struct dram_info *dram_info, 458 const struct intel_sa_info *sa) 459 { 460 struct intel_qgv_info qi = {}; 461 bool is_y_tile = true; /* assume y tile may be used */ 462 int num_channels = max_t(u8, 1, dram_info->num_channels); 463 int ipqdepth, ipqdepthpch = 16; 464 int dclk_max; 465 int maxdebw; 466 int num_groups = ARRAY_SIZE(display->bw.max); 467 int i, ret; 468 469 ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile); 470 if (ret) { 471 drm_dbg_kms(display->drm, 472 "Failed to get memory subsystem information, ignoring bandwidth limits"); 473 return ret; 474 } 475 476 dclk_max = icl_sagv_max_dclk(&qi); 477 maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10); 478 ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); 479 qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); 480 481 for (i = 0; i < num_groups; i++) { 482 struct intel_bw_info *bi = &display->bw.max[i]; 483 int clpchgroup; 484 int j; 485 486 clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; 487 bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; 488 489 bi->num_qgv_points = qi.num_points; 490 bi->num_psf_gv_points = qi.num_psf_points; 491 492 for (j = 0; j < qi.num_points; j++) { 493 const struct intel_qgv_point *sp = &qi.points[j]; 494 int ct, bw; 495 496 /* 497 * Max row cycle time 498 * 499 * FIXME what is the logic behind the 500 * assumed burst length? 501 */ 502 ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + 503 (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); 504 bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); 505 506 bi->deratedbw[j] = min(maxdebw, 507 bw * (100 - sa->derating) / 100); 508 509 drm_dbg_kms(display->drm, 510 "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", 511 i, j, bi->num_planes, bi->deratedbw[j]); 512 } 513 } 514 /* 515 * In case if SAGV is disabled in BIOS, we always get 1 516 * SAGV point, but we can't send PCode commands to restrict it 517 * as it will fail and pointless anyway. 518 */ 519 if (qi.num_points == 1) 520 display->sagv.status = I915_SAGV_NOT_CONTROLLED; 521 else 522 display->sagv.status = I915_SAGV_ENABLED; 523 524 return 0; 525 } 526 527 static int tgl_get_bw_info(struct intel_display *display, 528 const struct dram_info *dram_info, 529 const struct intel_sa_info *sa) 530 { 531 struct intel_qgv_info qi = {}; 532 bool is_y_tile = true; /* assume y tile may be used */ 533 int num_channels = max_t(u8, 1, dram_info->num_channels); 534 int ipqdepth, ipqdepthpch = 16; 535 int dclk_max; 536 int maxdebw, peakbw; 537 int clperchgroup; 538 int num_groups = ARRAY_SIZE(display->bw.max); 539 int i, ret; 540 541 ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile); 542 if (ret) { 543 drm_dbg_kms(display->drm, 544 "Failed to get memory subsystem information, ignoring bandwidth limits"); 545 return ret; 546 } 547 548 if (DISPLAY_VER(display) < 14 && 549 (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)) 550 num_channels *= 2; 551 552 qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); 553 554 if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12) 555 qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); 556 557 if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels) 558 drm_warn(display->drm, "Number of channels exceeds max number of channels."); 559 if (qi.max_numchannels != 0) 560 num_channels = min_t(u8, num_channels, qi.max_numchannels); 561 562 dclk_max = icl_sagv_max_dclk(&qi); 563 564 peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max; 565 maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100); 566 567 ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); 568 /* 569 * clperchgroup = 4kpagespermempage * clperchperblock, 570 * clperchperblock = 8 / num_channels * interleave 571 */ 572 clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; 573 574 for (i = 0; i < num_groups; i++) { 575 struct intel_bw_info *bi = &display->bw.max[i]; 576 struct intel_bw_info *bi_next; 577 int clpchgroup; 578 int j; 579 580 clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; 581 582 if (i < num_groups - 1) { 583 bi_next = &display->bw.max[i + 1]; 584 585 if (clpchgroup < clperchgroup) 586 bi_next->num_planes = (ipqdepth - clpchgroup) / 587 clpchgroup + 1; 588 else 589 bi_next->num_planes = 0; 590 } 591 592 bi->num_qgv_points = qi.num_points; 593 bi->num_psf_gv_points = qi.num_psf_points; 594 595 for (j = 0; j < qi.num_points; j++) { 596 const struct intel_qgv_point *sp = &qi.points[j]; 597 int ct, bw; 598 599 /* 600 * Max row cycle time 601 * 602 * FIXME what is the logic behind the 603 * assumed burst length? 604 */ 605 ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + 606 (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); 607 bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); 608 609 bi->deratedbw[j] = min(maxdebw, 610 bw * (100 - sa->derating) / 100); 611 bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk * 612 num_channels * 613 qi.channel_width, 8); 614 615 drm_dbg_kms(display->drm, 616 "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n", 617 i, j, bi->num_planes, bi->deratedbw[j], 618 bi->peakbw[j]); 619 } 620 621 for (j = 0; j < qi.num_psf_points; j++) { 622 const struct intel_psf_gv_point *sp = &qi.psf_points[j]; 623 624 bi->psf_bw[j] = adl_calc_psf_bw(sp->clk); 625 626 drm_dbg_kms(display->drm, 627 "BW%d / PSF GV %d: num_planes=%d bw=%u\n", 628 i, j, bi->num_planes, bi->psf_bw[j]); 629 } 630 } 631 632 /* 633 * In case if SAGV is disabled in BIOS, we always get 1 634 * SAGV point, but we can't send PCode commands to restrict it 635 * as it will fail and pointless anyway. 636 */ 637 if (qi.num_points == 1) 638 display->sagv.status = I915_SAGV_NOT_CONTROLLED; 639 else 640 display->sagv.status = I915_SAGV_ENABLED; 641 642 return 0; 643 } 644 645 static void dg2_get_bw_info(struct intel_display *display) 646 { 647 unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000; 648 int num_groups = ARRAY_SIZE(display->bw.max); 649 int i; 650 651 /* 652 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth 653 * that doesn't depend on the number of planes enabled. So fill all the 654 * plane group with constant bw information for uniformity with other 655 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth, 656 * whereas DG2-G11 platforms have 38 GB/s. 657 */ 658 for (i = 0; i < num_groups; i++) { 659 struct intel_bw_info *bi = &display->bw.max[i]; 660 661 bi->num_planes = 1; 662 /* Need only one dummy QGV point per group */ 663 bi->num_qgv_points = 1; 664 bi->deratedbw[0] = deratedbw; 665 } 666 667 display->sagv.status = I915_SAGV_NOT_CONTROLLED; 668 } 669 670 static int xe2_hpd_get_bw_info(struct intel_display *display, 671 const struct dram_info *dram_info, 672 const struct intel_sa_info *sa) 673 { 674 struct intel_qgv_info qi = {}; 675 int num_channels = dram_info->num_channels; 676 int peakbw, maxdebw; 677 int ret, i; 678 679 ret = icl_get_qgv_points(display, dram_info, &qi, true); 680 if (ret) { 681 drm_dbg_kms(display->drm, 682 "Failed to get memory subsystem information, ignoring bandwidth limits"); 683 return ret; 684 } 685 686 peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi); 687 maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10); 688 689 for (i = 0; i < qi.num_points; i++) { 690 const struct intel_qgv_point *point = &qi.points[i]; 691 int bw = num_channels * (qi.channel_width / 8) * point->dclk; 692 693 display->bw.max[0].deratedbw[i] = 694 min(maxdebw, (100 - sa->derating) * bw / 100); 695 display->bw.max[0].peakbw[i] = bw; 696 697 drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n", 698 i, display->bw.max[0].deratedbw[i], 699 display->bw.max[0].peakbw[i]); 700 } 701 702 /* Bandwidth does not depend on # of planes; set all groups the same */ 703 display->bw.max[0].num_planes = 1; 704 display->bw.max[0].num_qgv_points = qi.num_points; 705 for (i = 1; i < ARRAY_SIZE(display->bw.max); i++) 706 memcpy(&display->bw.max[i], &display->bw.max[0], 707 sizeof(display->bw.max[0])); 708 709 /* 710 * Xe2_HPD should always have exactly two QGV points representing 711 * battery and plugged-in operation. 712 */ 713 drm_WARN_ON(display->drm, qi.num_points != 2); 714 display->sagv.status = I915_SAGV_ENABLED; 715 716 return 0; 717 } 718 719 static unsigned int icl_max_bw_index(struct intel_display *display, 720 int num_planes, int qgv_point) 721 { 722 int i; 723 724 /* 725 * Let's return max bw for 0 planes 726 */ 727 num_planes = max(1, num_planes); 728 729 for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) { 730 const struct intel_bw_info *bi = 731 &display->bw.max[i]; 732 733 /* 734 * Pcode will not expose all QGV points when 735 * SAGV is forced to off/min/med/max. 736 */ 737 if (qgv_point >= bi->num_qgv_points) 738 return UINT_MAX; 739 740 if (num_planes >= bi->num_planes) 741 return i; 742 } 743 744 return UINT_MAX; 745 } 746 747 static unsigned int tgl_max_bw_index(struct intel_display *display, 748 int num_planes, int qgv_point) 749 { 750 int i; 751 752 /* 753 * Let's return max bw for 0 planes 754 */ 755 num_planes = max(1, num_planes); 756 757 for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) { 758 const struct intel_bw_info *bi = 759 &display->bw.max[i]; 760 761 /* 762 * Pcode will not expose all QGV points when 763 * SAGV is forced to off/min/med/max. 764 */ 765 if (qgv_point >= bi->num_qgv_points) 766 return UINT_MAX; 767 768 if (num_planes <= bi->num_planes) 769 return i; 770 } 771 772 return 0; 773 } 774 775 static unsigned int adl_psf_bw(struct intel_display *display, 776 int psf_gv_point) 777 { 778 const struct intel_bw_info *bi = 779 &display->bw.max[0]; 780 781 return bi->psf_bw[psf_gv_point]; 782 } 783 784 static unsigned int icl_qgv_bw(struct intel_display *display, 785 int num_active_planes, int qgv_point) 786 { 787 unsigned int idx; 788 789 if (DISPLAY_VER(display) >= 12) 790 idx = tgl_max_bw_index(display, num_active_planes, qgv_point); 791 else 792 idx = icl_max_bw_index(display, num_active_planes, qgv_point); 793 794 if (idx >= ARRAY_SIZE(display->bw.max)) 795 return 0; 796 797 return display->bw.max[idx].deratedbw[qgv_point]; 798 } 799 800 void intel_bw_init_hw(struct intel_display *display) 801 { 802 const struct dram_info *dram_info = intel_dram_info(display); 803 804 if (!HAS_DISPLAY(display)) 805 return; 806 807 /* 808 * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC 809 * enabled that would impact display bandwidth. However, so far there 810 * are no instructions in Bspec on how to handle that case. Let's 811 * complain if we ever find such a scenario. 812 */ 813 if (DISPLAY_VER(display) >= 35) 814 drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw); 815 816 if (DISPLAY_VER(display) >= 30) { 817 if (DISPLAY_VERx100(display) == 3002) 818 tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info); 819 else 820 tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info); 821 } else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) { 822 if (dram_info->type == INTEL_DRAM_GDDR_ECC) 823 xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info); 824 else 825 xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info); 826 } else if (DISPLAY_VER(display) >= 14) { 827 tgl_get_bw_info(display, dram_info, &mtl_sa_info); 828 } else if (display->platform.dg2) { 829 dg2_get_bw_info(display); 830 } else if (display->platform.alderlake_p) { 831 tgl_get_bw_info(display, dram_info, &adlp_sa_info); 832 } else if (display->platform.alderlake_s) { 833 tgl_get_bw_info(display, dram_info, &adls_sa_info); 834 } else if (display->platform.rocketlake) { 835 tgl_get_bw_info(display, dram_info, &rkl_sa_info); 836 } else if (DISPLAY_VER(display) == 12) { 837 tgl_get_bw_info(display, dram_info, &tgl_sa_info); 838 } else if (DISPLAY_VER(display) == 11) { 839 icl_get_bw_info(display, dram_info, &icl_sa_info); 840 } 841 } 842 843 static unsigned int intel_bw_num_active_planes(struct intel_display *display, 844 const struct intel_bw_state *bw_state) 845 { 846 unsigned int num_active_planes = 0; 847 enum pipe pipe; 848 849 for_each_pipe(display, pipe) 850 num_active_planes += bw_state->num_active_planes[pipe]; 851 852 return num_active_planes; 853 } 854 855 static unsigned int intel_bw_data_rate(struct intel_display *display, 856 const struct intel_bw_state *bw_state) 857 { 858 unsigned int data_rate = 0; 859 enum pipe pipe; 860 861 for_each_pipe(display, pipe) 862 data_rate += bw_state->data_rate[pipe]; 863 864 if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display)) 865 data_rate = DIV_ROUND_UP(data_rate * 105, 100); 866 867 return data_rate; 868 } 869 870 struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state) 871 { 872 return container_of(obj_state, struct intel_bw_state, base); 873 } 874 875 struct intel_bw_state * 876 intel_atomic_get_old_bw_state(struct intel_atomic_state *state) 877 { 878 struct intel_display *display = to_intel_display(state); 879 struct intel_global_state *bw_state; 880 881 bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj); 882 883 return to_intel_bw_state(bw_state); 884 } 885 886 struct intel_bw_state * 887 intel_atomic_get_new_bw_state(struct intel_atomic_state *state) 888 { 889 struct intel_display *display = to_intel_display(state); 890 struct intel_global_state *bw_state; 891 892 bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj); 893 894 return to_intel_bw_state(bw_state); 895 } 896 897 struct intel_bw_state * 898 intel_atomic_get_bw_state(struct intel_atomic_state *state) 899 { 900 struct intel_display *display = to_intel_display(state); 901 struct intel_global_state *bw_state; 902 903 bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj); 904 if (IS_ERR(bw_state)) 905 return ERR_CAST(bw_state); 906 907 return to_intel_bw_state(bw_state); 908 } 909 910 static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display, 911 int num_active_planes) 912 { 913 unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; 914 unsigned int max_bw_point = 0; 915 unsigned int max_bw = 0; 916 int i; 917 918 for (i = 0; i < num_qgv_points; i++) { 919 unsigned int max_data_rate = 920 icl_qgv_bw(display, num_active_planes, i); 921 922 /* 923 * We need to know which qgv point gives us 924 * maximum bandwidth in order to disable SAGV 925 * if we find that we exceed SAGV block time 926 * with watermarks. By that moment we already 927 * have those, as it is calculated earlier in 928 * intel_atomic_check, 929 */ 930 if (max_data_rate > max_bw) { 931 max_bw_point = BIT(i); 932 max_bw = max_data_rate; 933 } 934 } 935 936 return max_bw_point; 937 } 938 939 static u16 icl_prepare_qgv_points_mask(struct intel_display *display, 940 unsigned int qgv_points, 941 unsigned int psf_points) 942 { 943 return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | 944 ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display); 945 } 946 947 static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display) 948 { 949 unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; 950 unsigned int max_bw_point_mask = 0; 951 unsigned int max_bw = 0; 952 int i; 953 954 for (i = 0; i < num_psf_gv_points; i++) { 955 unsigned int max_data_rate = adl_psf_bw(display, i); 956 957 if (max_data_rate > max_bw) { 958 max_bw_point_mask = BIT(i); 959 max_bw = max_data_rate; 960 } else if (max_data_rate == max_bw) { 961 max_bw_point_mask |= BIT(i); 962 } 963 } 964 965 return max_bw_point_mask; 966 } 967 968 static void icl_force_disable_sagv(struct intel_display *display, 969 struct intel_bw_state *bw_state) 970 { 971 unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0); 972 unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display); 973 974 bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display, 975 qgv_points, 976 psf_points); 977 978 drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n", 979 bw_state->qgv_points_mask); 980 981 icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask); 982 } 983 984 void icl_sagv_pre_plane_update(struct intel_atomic_state *state) 985 { 986 struct intel_display *display = to_intel_display(state); 987 const struct intel_bw_state *old_bw_state = 988 intel_atomic_get_old_bw_state(state); 989 const struct intel_bw_state *new_bw_state = 990 intel_atomic_get_new_bw_state(state); 991 u16 old_mask, new_mask; 992 993 if (!new_bw_state) 994 return; 995 996 old_mask = old_bw_state->qgv_points_mask; 997 new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; 998 999 if (old_mask == new_mask) 1000 return; 1001 1002 WARN_ON(!new_bw_state->base.changed); 1003 1004 drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n", 1005 old_mask, new_mask); 1006 1007 /* 1008 * Restrict required qgv points before updating the configuration. 1009 * According to BSpec we can't mask and unmask qgv points at the same 1010 * time. Also masking should be done before updating the configuration 1011 * and unmasking afterwards. 1012 */ 1013 icl_pcode_restrict_qgv_points(display, new_mask); 1014 } 1015 1016 void icl_sagv_post_plane_update(struct intel_atomic_state *state) 1017 { 1018 struct intel_display *display = to_intel_display(state); 1019 const struct intel_bw_state *old_bw_state = 1020 intel_atomic_get_old_bw_state(state); 1021 const struct intel_bw_state *new_bw_state = 1022 intel_atomic_get_new_bw_state(state); 1023 u16 old_mask, new_mask; 1024 1025 if (!new_bw_state) 1026 return; 1027 1028 old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; 1029 new_mask = new_bw_state->qgv_points_mask; 1030 1031 if (old_mask == new_mask) 1032 return; 1033 1034 WARN_ON(!new_bw_state->base.changed); 1035 1036 drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", 1037 old_mask, new_mask); 1038 1039 /* 1040 * Allow required qgv points after updating the configuration. 1041 * According to BSpec we can't mask and unmask qgv points at the same 1042 * time. Also masking should be done before updating the configuration 1043 * and unmasking afterwards. 1044 */ 1045 icl_pcode_restrict_qgv_points(display, new_mask); 1046 } 1047 1048 static int mtl_find_qgv_points(struct intel_display *display, 1049 unsigned int data_rate, 1050 unsigned int num_active_planes, 1051 struct intel_bw_state *new_bw_state) 1052 { 1053 unsigned int best_rate = UINT_MAX; 1054 unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; 1055 unsigned int qgv_peak_bw = 0; 1056 int i; 1057 int ret; 1058 1059 ret = intel_atomic_lock_global_state(&new_bw_state->base); 1060 if (ret) 1061 return ret; 1062 1063 /* 1064 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's 1065 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is 1066 * not enabled. PM Demand code will clamp the value for the register 1067 */ 1068 if (!intel_bw_can_enable_sagv(display, new_bw_state)) { 1069 new_bw_state->qgv_point_peakbw = U16_MAX; 1070 drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw."); 1071 return 0; 1072 } 1073 1074 /* 1075 * Find the best QGV point by comparing the data_rate with max data rate 1076 * offered per plane group 1077 */ 1078 for (i = 0; i < num_qgv_points; i++) { 1079 unsigned int bw_index = 1080 tgl_max_bw_index(display, num_active_planes, i); 1081 unsigned int max_data_rate; 1082 1083 if (bw_index >= ARRAY_SIZE(display->bw.max)) 1084 continue; 1085 1086 max_data_rate = display->bw.max[bw_index].deratedbw[i]; 1087 1088 if (max_data_rate < data_rate) 1089 continue; 1090 1091 if (max_data_rate - data_rate < best_rate) { 1092 best_rate = max_data_rate - data_rate; 1093 qgv_peak_bw = display->bw.max[bw_index].peakbw[i]; 1094 } 1095 1096 drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", 1097 i, max_data_rate, data_rate, qgv_peak_bw); 1098 } 1099 1100 drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", 1101 qgv_peak_bw, data_rate); 1102 1103 /* 1104 * The display configuration cannot be supported if no QGV point 1105 * satisfying the required data rate is found 1106 */ 1107 if (qgv_peak_bw == 0) { 1108 drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", 1109 data_rate, num_active_planes); 1110 return -EINVAL; 1111 } 1112 1113 /* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */ 1114 new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100); 1115 1116 return 0; 1117 } 1118 1119 static int icl_find_qgv_points(struct intel_display *display, 1120 unsigned int data_rate, 1121 unsigned int num_active_planes, 1122 const struct intel_bw_state *old_bw_state, 1123 struct intel_bw_state *new_bw_state) 1124 { 1125 unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; 1126 unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; 1127 u16 psf_points = 0; 1128 u16 qgv_points = 0; 1129 int i; 1130 int ret; 1131 1132 ret = intel_atomic_lock_global_state(&new_bw_state->base); 1133 if (ret) 1134 return ret; 1135 1136 for (i = 0; i < num_qgv_points; i++) { 1137 unsigned int max_data_rate = icl_qgv_bw(display, 1138 num_active_planes, i); 1139 if (max_data_rate >= data_rate) 1140 qgv_points |= BIT(i); 1141 1142 drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n", 1143 i, max_data_rate, data_rate); 1144 } 1145 1146 for (i = 0; i < num_psf_gv_points; i++) { 1147 unsigned int max_data_rate = adl_psf_bw(display, i); 1148 1149 if (max_data_rate >= data_rate) 1150 psf_points |= BIT(i); 1151 1152 drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d" 1153 " required %d\n", 1154 i, max_data_rate, data_rate); 1155 } 1156 1157 /* 1158 * BSpec states that we always should have at least one allowed point 1159 * left, so if we couldn't - simply reject the configuration for obvious 1160 * reasons. 1161 */ 1162 if (qgv_points == 0) { 1163 drm_dbg_kms(display->drm, "No QGV points provide sufficient memory" 1164 " bandwidth %d for display configuration(%d active planes).\n", 1165 data_rate, num_active_planes); 1166 return -EINVAL; 1167 } 1168 1169 if (num_psf_gv_points > 0 && psf_points == 0) { 1170 drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory" 1171 " bandwidth %d for display configuration(%d active planes).\n", 1172 data_rate, num_active_planes); 1173 return -EINVAL; 1174 } 1175 1176 /* 1177 * Leave only single point with highest bandwidth, if 1178 * we can't enable SAGV due to the increased memory latency it may 1179 * cause. 1180 */ 1181 if (!intel_bw_can_enable_sagv(display, new_bw_state)) { 1182 qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes); 1183 drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n", 1184 qgv_points); 1185 } 1186 1187 /* 1188 * We store the ones which need to be masked as that is what PCode 1189 * actually accepts as a parameter. 1190 */ 1191 new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display, 1192 qgv_points, 1193 psf_points); 1194 /* 1195 * If the actual mask had changed we need to make sure that 1196 * the commits are serialized(in case this is a nomodeset, nonblocking) 1197 */ 1198 if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) { 1199 ret = intel_atomic_serialize_global_state(&new_bw_state->base); 1200 if (ret) 1201 return ret; 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int intel_bw_check_qgv_points(struct intel_display *display, 1208 const struct intel_bw_state *old_bw_state, 1209 struct intel_bw_state *new_bw_state) 1210 { 1211 unsigned int data_rate = intel_bw_data_rate(display, new_bw_state); 1212 unsigned int num_active_planes = 1213 intel_bw_num_active_planes(display, new_bw_state); 1214 1215 data_rate = DIV_ROUND_UP(data_rate, 1000); 1216 1217 if (DISPLAY_VER(display) >= 14) 1218 return mtl_find_qgv_points(display, data_rate, num_active_planes, 1219 new_bw_state); 1220 else 1221 return icl_find_qgv_points(display, data_rate, num_active_planes, 1222 old_bw_state, new_bw_state); 1223 } 1224 1225 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) 1226 { 1227 struct intel_display *display = to_intel_display(state); 1228 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1229 struct intel_crtc *crtc; 1230 int i; 1231 1232 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 1233 new_crtc_state, i) { 1234 unsigned int old_data_rate = 1235 intel_crtc_bw_data_rate(old_crtc_state); 1236 unsigned int new_data_rate = 1237 intel_crtc_bw_data_rate(new_crtc_state); 1238 unsigned int old_active_planes = 1239 intel_crtc_bw_num_active_planes(old_crtc_state); 1240 unsigned int new_active_planes = 1241 intel_crtc_bw_num_active_planes(new_crtc_state); 1242 struct intel_bw_state *new_bw_state; 1243 1244 /* 1245 * Avoid locking the bw state when 1246 * nothing significant has changed. 1247 */ 1248 if (old_data_rate == new_data_rate && 1249 old_active_planes == new_active_planes) 1250 continue; 1251 1252 new_bw_state = intel_atomic_get_bw_state(state); 1253 if (IS_ERR(new_bw_state)) 1254 return PTR_ERR(new_bw_state); 1255 1256 new_bw_state->data_rate[crtc->pipe] = new_data_rate; 1257 new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; 1258 1259 *changed = true; 1260 1261 drm_dbg_kms(display->drm, 1262 "[CRTC:%d:%s] data rate %u num active planes %u\n", 1263 crtc->base.base.id, crtc->base.name, 1264 new_bw_state->data_rate[crtc->pipe], 1265 new_bw_state->num_active_planes[crtc->pipe]); 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int intel_bw_modeset_checks(struct intel_atomic_state *state) 1272 { 1273 const struct intel_bw_state *old_bw_state; 1274 struct intel_bw_state *new_bw_state; 1275 int ret; 1276 1277 if (!intel_any_crtc_active_changed(state)) 1278 return 0; 1279 1280 new_bw_state = intel_atomic_get_bw_state(state); 1281 if (IS_ERR(new_bw_state)) 1282 return PTR_ERR(new_bw_state); 1283 1284 old_bw_state = intel_atomic_get_old_bw_state(state); 1285 1286 new_bw_state->active_pipes = 1287 intel_calc_active_pipes(state, old_bw_state->active_pipes); 1288 1289 ret = intel_atomic_lock_global_state(&new_bw_state->base); 1290 if (ret) 1291 return ret; 1292 1293 return 0; 1294 } 1295 1296 static int intel_bw_check_sagv_mask(struct intel_atomic_state *state) 1297 { 1298 struct intel_display *display = to_intel_display(state); 1299 const struct intel_crtc_state *old_crtc_state; 1300 const struct intel_crtc_state *new_crtc_state; 1301 const struct intel_bw_state *old_bw_state = NULL; 1302 struct intel_bw_state *new_bw_state = NULL; 1303 struct intel_crtc *crtc; 1304 int ret, i; 1305 1306 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 1307 new_crtc_state, i) { 1308 if (intel_crtc_can_enable_sagv(old_crtc_state) == 1309 intel_crtc_can_enable_sagv(new_crtc_state)) 1310 continue; 1311 1312 new_bw_state = intel_atomic_get_bw_state(state); 1313 if (IS_ERR(new_bw_state)) 1314 return PTR_ERR(new_bw_state); 1315 1316 old_bw_state = intel_atomic_get_old_bw_state(state); 1317 1318 if (intel_crtc_can_enable_sagv(new_crtc_state)) 1319 new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); 1320 else 1321 new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); 1322 } 1323 1324 if (!new_bw_state) 1325 return 0; 1326 1327 if (intel_bw_can_enable_sagv(display, new_bw_state) != 1328 intel_bw_can_enable_sagv(display, old_bw_state)) { 1329 ret = intel_atomic_serialize_global_state(&new_bw_state->base); 1330 if (ret) 1331 return ret; 1332 } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { 1333 ret = intel_atomic_lock_global_state(&new_bw_state->base); 1334 if (ret) 1335 return ret; 1336 } 1337 1338 return 0; 1339 } 1340 1341 int intel_bw_atomic_check(struct intel_atomic_state *state) 1342 { 1343 struct intel_display *display = to_intel_display(state); 1344 bool changed = false; 1345 struct intel_bw_state *new_bw_state; 1346 const struct intel_bw_state *old_bw_state; 1347 int ret; 1348 1349 if (DISPLAY_VER(display) < 9) 1350 return 0; 1351 1352 ret = intel_bw_modeset_checks(state); 1353 if (ret) 1354 return ret; 1355 1356 ret = intel_bw_check_sagv_mask(state); 1357 if (ret) 1358 return ret; 1359 1360 /* FIXME earlier gens need some checks too */ 1361 if (DISPLAY_VER(display) < 11) 1362 return 0; 1363 1364 ret = intel_bw_check_data_rate(state, &changed); 1365 if (ret) 1366 return ret; 1367 1368 old_bw_state = intel_atomic_get_old_bw_state(state); 1369 new_bw_state = intel_atomic_get_new_bw_state(state); 1370 1371 if (new_bw_state && 1372 intel_bw_can_enable_sagv(display, old_bw_state) != 1373 intel_bw_can_enable_sagv(display, new_bw_state)) 1374 changed = true; 1375 1376 /* 1377 * If none of our inputs (data rates, number of active 1378 * planes, SAGV yes/no) changed then nothing to do here. 1379 */ 1380 if (!changed) 1381 return 0; 1382 1383 ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state); 1384 if (ret) 1385 return ret; 1386 1387 return 0; 1388 } 1389 1390 static void intel_bw_crtc_update(struct intel_bw_state *bw_state, 1391 const struct intel_crtc_state *crtc_state) 1392 { 1393 struct intel_display *display = to_intel_display(crtc_state); 1394 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1395 1396 bw_state->data_rate[crtc->pipe] = 1397 intel_crtc_bw_data_rate(crtc_state); 1398 bw_state->num_active_planes[crtc->pipe] = 1399 intel_crtc_bw_num_active_planes(crtc_state); 1400 1401 drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n", 1402 pipe_name(crtc->pipe), 1403 bw_state->data_rate[crtc->pipe], 1404 bw_state->num_active_planes[crtc->pipe]); 1405 } 1406 1407 void intel_bw_update_hw_state(struct intel_display *display) 1408 { 1409 struct intel_bw_state *bw_state = 1410 to_intel_bw_state(display->bw.obj.state); 1411 struct intel_crtc *crtc; 1412 1413 if (DISPLAY_VER(display) < 9) 1414 return; 1415 1416 bw_state->active_pipes = 0; 1417 bw_state->pipe_sagv_reject = 0; 1418 1419 for_each_intel_crtc(display->drm, crtc) { 1420 const struct intel_crtc_state *crtc_state = 1421 to_intel_crtc_state(crtc->base.state); 1422 enum pipe pipe = crtc->pipe; 1423 1424 if (crtc_state->hw.active) 1425 bw_state->active_pipes |= BIT(pipe); 1426 1427 if (DISPLAY_VER(display) >= 11) 1428 intel_bw_crtc_update(bw_state, crtc_state); 1429 1430 /* initially SAGV has been forced off */ 1431 bw_state->pipe_sagv_reject |= BIT(pipe); 1432 } 1433 } 1434 1435 void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc) 1436 { 1437 struct intel_display *display = to_intel_display(crtc); 1438 struct intel_bw_state *bw_state = 1439 to_intel_bw_state(display->bw.obj.state); 1440 enum pipe pipe = crtc->pipe; 1441 1442 if (DISPLAY_VER(display) < 9) 1443 return; 1444 1445 bw_state->data_rate[pipe] = 0; 1446 bw_state->num_active_planes[pipe] = 0; 1447 } 1448 1449 static struct intel_global_state * 1450 intel_bw_duplicate_state(struct intel_global_obj *obj) 1451 { 1452 struct intel_bw_state *state; 1453 1454 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 1455 if (!state) 1456 return NULL; 1457 1458 return &state->base; 1459 } 1460 1461 static void intel_bw_destroy_state(struct intel_global_obj *obj, 1462 struct intel_global_state *state) 1463 { 1464 kfree(state); 1465 } 1466 1467 static const struct intel_global_state_funcs intel_bw_funcs = { 1468 .atomic_duplicate_state = intel_bw_duplicate_state, 1469 .atomic_destroy_state = intel_bw_destroy_state, 1470 }; 1471 1472 int intel_bw_init(struct intel_display *display) 1473 { 1474 struct intel_bw_state *state; 1475 1476 state = kzalloc(sizeof(*state), GFP_KERNEL); 1477 if (!state) 1478 return -ENOMEM; 1479 1480 intel_atomic_global_obj_init(display, &display->bw.obj, 1481 &state->base, &intel_bw_funcs); 1482 1483 /* 1484 * Limit this only if we have SAGV. And for Display version 14 onwards 1485 * sagv is handled though pmdemand requests 1486 */ 1487 if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13)) 1488 icl_force_disable_sagv(display, state); 1489 1490 return 0; 1491 } 1492 1493 bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state) 1494 { 1495 const struct intel_bw_state *new_bw_state, *old_bw_state; 1496 1497 new_bw_state = intel_atomic_get_new_bw_state(state); 1498 old_bw_state = intel_atomic_get_old_bw_state(state); 1499 1500 if (new_bw_state && 1501 new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw) 1502 return true; 1503 1504 return false; 1505 } 1506 1507 bool intel_bw_can_enable_sagv(struct intel_display *display, 1508 const struct intel_bw_state *bw_state) 1509 { 1510 if (DISPLAY_VER(display) < 11 && 1511 bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) 1512 return false; 1513 1514 return bw_state->pipe_sagv_reject == 0; 1515 } 1516 1517 int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state) 1518 { 1519 return bw_state->qgv_point_peakbw; 1520 } 1521