1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 #include <linux/delay.h> 26 27 #include "dm_services.h" 28 #include "basics/dc_common.h" 29 #include "dm_helpers.h" 30 #include "core_types.h" 31 #include "resource.h" 32 #include "dcn20/dcn20_resource.h" 33 #include "dcn20_hwseq.h" 34 #include "dce/dce_hwseq.h" 35 #include "dcn20/dcn20_dsc.h" 36 #include "dcn20/dcn20_optc.h" 37 #include "abm.h" 38 #include "clk_mgr.h" 39 #include "dmcu.h" 40 #include "hubp.h" 41 #include "timing_generator.h" 42 #include "opp.h" 43 #include "ipp.h" 44 #include "mpc.h" 45 #include "mcif_wb.h" 46 #include "dchubbub.h" 47 #include "reg_helper.h" 48 #include "dcn10/dcn10_cm_common.h" 49 #include "dcn10/dcn10_hubbub.h" 50 #include "vm_helper.h" 51 #include "dccg.h" 52 #include "dc_dmub_srv.h" 53 #include "dce/dmub_hw_lock_mgr.h" 54 #include "hw_sequencer.h" 55 #include "dpcd_defs.h" 56 #include "inc/link_enc_cfg.h" 57 #include "link_hwss.h" 58 #include "link_service.h" 59 #include "dc_state_priv.h" 60 61 #define DC_LOGGER \ 62 dc_logger 63 #define DC_LOGGER_INIT(logger) \ 64 struct dal_logger *dc_logger = logger 65 66 #define CTX \ 67 hws->ctx 68 #define REG(reg)\ 69 hws->regs->reg 70 71 #undef FN 72 #define FN(reg_name, field_name) \ 73 hws->shifts->field_name, hws->masks->field_name 74 75 void dcn20_log_color_state(struct dc *dc, 76 struct dc_log_buffer_ctx *log_ctx) 77 { 78 (void)log_ctx; 79 struct dc_context *dc_ctx = dc->ctx; 80 struct resource_pool *pool = dc->res_pool; 81 bool is_gamut_remap_available = false; 82 int i; 83 84 DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth" 85 " 3DLUT size RGAM mode GAMUT adjust " 86 "C11 C12 C13 C14 " 87 "C21 C22 C23 C24 " 88 "C31 C32 C33 C34 \n"); 89 90 for (i = 0; i < pool->pipe_count; i++) { 91 struct dpp *dpp = pool->dpps[i]; 92 struct dcn_dpp_state s = {0}; 93 94 dpp->funcs->dpp_read_state(dpp, &s); 95 if (dpp->funcs->dpp_get_gamut_remap) { 96 dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); 97 is_gamut_remap_available = true; 98 } 99 100 if (!s.is_enabled) 101 continue; 102 103 DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s", 104 dpp->inst, 105 (s.dgam_lut_mode == 0) ? "Bypass" : 106 ((s.dgam_lut_mode == 1) ? "sRGB" : 107 ((s.dgam_lut_mode == 2) ? "Ycc" : 108 ((s.dgam_lut_mode == 3) ? "RAM" : 109 ((s.dgam_lut_mode == 4) ? "RAM" : 110 "Unknown")))), 111 (s.shaper_lut_mode == 1) ? "RAM A" : 112 ((s.shaper_lut_mode == 2) ? "RAM B" : 113 "Bypass"), 114 (s.lut3d_mode == 1) ? "RAM A" : 115 ((s.lut3d_mode == 2) ? "RAM B" : 116 "Bypass"), 117 (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", 118 (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", 119 (s.rgam_lut_mode == 1) ? "RAM A" : 120 ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass")); 121 122 if (is_gamut_remap_available) { 123 DTN_INFO(" %12s " 124 "%010lld %010lld %010lld %010lld " 125 "%010lld %010lld %010lld %010lld " 126 "%010lld %010lld %010lld %010lld", 127 128 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : 129 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : 130 "SW"), 131 s.gamut_remap.temperature_matrix[0].value, 132 s.gamut_remap.temperature_matrix[1].value, 133 s.gamut_remap.temperature_matrix[2].value, 134 s.gamut_remap.temperature_matrix[3].value, 135 s.gamut_remap.temperature_matrix[4].value, 136 s.gamut_remap.temperature_matrix[5].value, 137 s.gamut_remap.temperature_matrix[6].value, 138 s.gamut_remap.temperature_matrix[7].value, 139 s.gamut_remap.temperature_matrix[8].value, 140 s.gamut_remap.temperature_matrix[9].value, 141 s.gamut_remap.temperature_matrix[10].value, 142 s.gamut_remap.temperature_matrix[11].value); 143 } 144 145 DTN_INFO("\n"); 146 } 147 DTN_INFO("\n"); 148 DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d" 149 " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d" 150 " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d" 151 " blnd_lut:%d oscs:%d\n\n", 152 dc->caps.color.dpp.input_lut_shared, 153 dc->caps.color.dpp.icsc, 154 dc->caps.color.dpp.dgam_ram, 155 dc->caps.color.dpp.dgam_rom_caps.srgb, 156 dc->caps.color.dpp.dgam_rom_caps.bt2020, 157 dc->caps.color.dpp.dgam_rom_caps.gamma2_2, 158 dc->caps.color.dpp.dgam_rom_caps.pq, 159 dc->caps.color.dpp.dgam_rom_caps.hlg, 160 dc->caps.color.dpp.post_csc, 161 dc->caps.color.dpp.gamma_corr, 162 dc->caps.color.dpp.dgam_rom_for_yuv, 163 dc->caps.color.dpp.hw_3d_lut, 164 dc->caps.color.dpp.ogam_ram, 165 dc->caps.color.dpp.ocsc); 166 167 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE" 168 " OGAM mode\n"); 169 170 for (i = 0; i < pool->mpcc_count; i++) { 171 struct mpcc_state s = {0}; 172 173 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); 174 if (s.opp_id != 0xf) 175 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d %9s\n", 176 i, s.opp_id, s.dpp_id, s.bot_mpcc_id, 177 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, 178 s.idle, 179 (s.rgam_mode == 1) ? "RAM A" : 180 ((s.rgam_mode == 2) ? "RAM B" : 181 "Bypass")); 182 } 183 DTN_INFO("\n"); 184 DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n", 185 dc->caps.color.mpc.gamut_remap, 186 dc->caps.color.mpc.num_3dluts, 187 dc->caps.color.mpc.ogam_ram, 188 dc->caps.color.mpc.ocsc); 189 } 190 191 192 static int find_free_gsl_group(const struct dc *dc) 193 { 194 if (dc->res_pool->gsl_groups.gsl_0 == 0) 195 return 1; 196 if (dc->res_pool->gsl_groups.gsl_1 == 0) 197 return 2; 198 if (dc->res_pool->gsl_groups.gsl_2 == 0) 199 return 3; 200 201 return 0; 202 } 203 204 /* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) 205 * This is only used to lock pipes in pipe splitting case with immediate flip 206 * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, 207 * so we get tearing with freesync since we cannot flip multiple pipes 208 * atomically. 209 * We use GSL for this: 210 * - immediate flip: find first available GSL group if not already assigned 211 * program gsl with that group, set current OTG as master 212 * and always us 0x4 = AND of flip_ready from all pipes 213 * - vsync flip: disable GSL if used 214 * 215 * Groups in stream_res are stored as +1 from HW registers, i.e. 216 * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 217 * Using a magic value like -1 would require tracking all inits/resets 218 */ 219 void dcn20_setup_gsl_group_as_lock( 220 const struct dc *dc, 221 struct pipe_ctx *pipe_ctx, 222 bool enable) 223 { 224 struct gsl_params gsl; 225 int group_idx; 226 227 memset(&gsl, 0, sizeof(struct gsl_params)); 228 229 if (enable) { 230 /* return if group already assigned since GSL was set up 231 * for vsync flip, we would unassign so it can't be "left over" 232 */ 233 if (pipe_ctx->stream_res.gsl_group > 0) 234 return; 235 236 group_idx = find_free_gsl_group(dc); 237 ASSERT(group_idx != 0); 238 pipe_ctx->stream_res.gsl_group = group_idx; 239 240 /* set gsl group reg field and mark resource used */ 241 switch (group_idx) { 242 case 1: 243 gsl.gsl0_en = 1; 244 dc->res_pool->gsl_groups.gsl_0 = 1; 245 break; 246 case 2: 247 gsl.gsl1_en = 1; 248 dc->res_pool->gsl_groups.gsl_1 = 1; 249 break; 250 case 3: 251 gsl.gsl2_en = 1; 252 dc->res_pool->gsl_groups.gsl_2 = 1; 253 break; 254 default: 255 BREAK_TO_DEBUGGER(); 256 return; // invalid case 257 } 258 gsl.gsl_master_en = 1; 259 } else { 260 group_idx = pipe_ctx->stream_res.gsl_group; 261 if (group_idx == 0) 262 return; // if not in use, just return 263 264 pipe_ctx->stream_res.gsl_group = 0; 265 266 /* unset gsl group reg field and mark resource free */ 267 switch (group_idx) { 268 case 1: 269 gsl.gsl0_en = 0; 270 dc->res_pool->gsl_groups.gsl_0 = 0; 271 break; 272 case 2: 273 gsl.gsl1_en = 0; 274 dc->res_pool->gsl_groups.gsl_1 = 0; 275 break; 276 case 3: 277 gsl.gsl2_en = 0; 278 dc->res_pool->gsl_groups.gsl_2 = 0; 279 break; 280 default: 281 BREAK_TO_DEBUGGER(); 282 return; 283 } 284 gsl.gsl_master_en = 0; 285 } 286 287 /* at this point we want to program whether it's to enable or disable */ 288 if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { 289 pipe_ctx->stream_res.tg->funcs->set_gsl( 290 pipe_ctx->stream_res.tg, 291 &gsl); 292 if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) 293 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( 294 pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); 295 } else 296 BREAK_TO_DEBUGGER(); 297 } 298 299 void dcn20_set_flip_control_gsl( 300 struct pipe_ctx *pipe_ctx, 301 bool flip_immediate) 302 { 303 if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) 304 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( 305 pipe_ctx->plane_res.hubp, flip_immediate); 306 307 } 308 309 void dcn20_enable_power_gating_plane( 310 struct dce_hwseq *hws, 311 bool enable) 312 { 313 bool force_on = true; /* disable power gating */ 314 uint32_t org_ip_request_cntl = 0; 315 316 if (enable) 317 force_on = false; 318 319 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 320 if (org_ip_request_cntl == 0) 321 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 322 323 /* DCHUBP0/1/2/3/4/5 */ 324 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); 325 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on); 326 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on); 327 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on); 328 if (REG(DOMAIN8_PG_CONFIG)) 329 REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 330 if (REG(DOMAIN10_PG_CONFIG)) 331 REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 332 333 /* DPP0/1/2/3/4/5 */ 334 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on); 335 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on); 336 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on); 337 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); 338 if (REG(DOMAIN9_PG_CONFIG)) 339 REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 340 if (REG(DOMAIN11_PG_CONFIG)) 341 REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 342 343 /* DCS0/1/2/3/4/5 */ 344 REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on); 345 REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on); 346 REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on); 347 if (REG(DOMAIN19_PG_CONFIG)) 348 REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on); 349 if (REG(DOMAIN20_PG_CONFIG)) 350 REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on); 351 if (REG(DOMAIN21_PG_CONFIG)) 352 REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); 353 354 if (org_ip_request_cntl == 0) 355 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); 356 357 } 358 359 void dcn20_dccg_init(struct dce_hwseq *hws) 360 { 361 struct dc *dc = hws->ctx->dc; 362 363 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->dccg_init) 364 dc->res_pool->dccg->funcs->dccg_init(dc->res_pool->dccg); 365 } 366 367 void dcn20_disable_vga( 368 struct dce_hwseq *hws) 369 { 370 REG_WRITE(D1VGA_CONTROL, 0); 371 REG_WRITE(D2VGA_CONTROL, 0); 372 REG_WRITE(D3VGA_CONTROL, 0); 373 REG_WRITE(D4VGA_CONTROL, 0); 374 REG_WRITE(D5VGA_CONTROL, 0); 375 REG_WRITE(D6VGA_CONTROL, 0); 376 } 377 378 void dcn20_program_triple_buffer( 379 const struct dc *dc, 380 struct pipe_ctx *pipe_ctx, 381 bool enable_triple_buffer) 382 { 383 (void)dc; 384 if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { 385 pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( 386 pipe_ctx->plane_res.hubp, 387 enable_triple_buffer); 388 } 389 } 390 391 /* Blank pixel data during initialization */ 392 void dcn20_init_blank( 393 struct dc *dc, 394 struct timing_generator *tg) 395 { 396 struct dce_hwseq *hws = dc->hwseq; 397 enum dc_color_space color_space; 398 struct tg_color black_color = {0}; 399 struct output_pixel_processor *opp = NULL; 400 struct output_pixel_processor *bottom_opp = NULL; 401 uint32_t num_opps, opp_id_src0, opp_id_src1; 402 uint32_t otg_active_width = 0, otg_active_height = 0; 403 404 /* program opp dpg blank color */ 405 color_space = COLOR_SPACE_SRGB; 406 color_space_to_black_color(dc, color_space, &black_color); 407 408 /* get the OTG active size */ 409 tg->funcs->get_otg_active_size(tg, 410 &otg_active_width, 411 &otg_active_height); 412 413 /* get the OPTC source */ 414 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 415 416 if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) { 417 ASSERT(false); 418 return; 419 } 420 opp = dc->res_pool->opps[opp_id_src0]; 421 422 /* don't override the blank pattern if already enabled with the correct one. */ 423 if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp)) 424 return; 425 426 if (num_opps == 2) { 427 otg_active_width = otg_active_width / 2; 428 429 if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) { 430 ASSERT(false); 431 return; 432 } 433 bottom_opp = dc->res_pool->opps[opp_id_src1]; 434 } 435 436 opp->funcs->opp_set_disp_pattern_generator( 437 opp, 438 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 439 CONTROLLER_DP_COLOR_SPACE_UDEFINED, 440 COLOR_DEPTH_UNDEFINED, 441 &black_color, 442 otg_active_width, 443 otg_active_height, 444 0); 445 446 if (num_opps == 2) { 447 bottom_opp->funcs->opp_set_disp_pattern_generator( 448 bottom_opp, 449 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 450 CONTROLLER_DP_COLOR_SPACE_UDEFINED, 451 COLOR_DEPTH_UNDEFINED, 452 &black_color, 453 otg_active_width, 454 otg_active_height, 455 0); 456 } 457 458 hws->funcs.wait_for_blank_complete(opp); 459 } 460 461 void dcn20_dsc_pg_control( 462 struct dce_hwseq *hws, 463 unsigned int dsc_inst, 464 bool power_on) 465 { 466 uint32_t power_gate = power_on ? 0 : 1; 467 uint32_t pwr_status = power_on ? 0 : 2; 468 uint32_t org_ip_request_cntl = 0; 469 470 if (hws->ctx->dc->debug.disable_dsc_power_gate) 471 return; 472 473 if (REG(DOMAIN16_PG_CONFIG) == 0) 474 return; 475 476 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 477 if (org_ip_request_cntl == 0) 478 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 479 480 switch (dsc_inst) { 481 case 0: /* DSC0 */ 482 REG_UPDATE(DOMAIN16_PG_CONFIG, 483 DOMAIN16_POWER_GATE, power_gate); 484 485 REG_WAIT(DOMAIN16_PG_STATUS, 486 DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 487 1, 1000); 488 break; 489 case 1: /* DSC1 */ 490 REG_UPDATE(DOMAIN17_PG_CONFIG, 491 DOMAIN17_POWER_GATE, power_gate); 492 493 REG_WAIT(DOMAIN17_PG_STATUS, 494 DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 495 1, 1000); 496 break; 497 case 2: /* DSC2 */ 498 REG_UPDATE(DOMAIN18_PG_CONFIG, 499 DOMAIN18_POWER_GATE, power_gate); 500 501 REG_WAIT(DOMAIN18_PG_STATUS, 502 DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 503 1, 1000); 504 break; 505 case 3: /* DSC3 */ 506 REG_UPDATE(DOMAIN19_PG_CONFIG, 507 DOMAIN19_POWER_GATE, power_gate); 508 509 REG_WAIT(DOMAIN19_PG_STATUS, 510 DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 511 1, 1000); 512 break; 513 case 4: /* DSC4 */ 514 REG_UPDATE(DOMAIN20_PG_CONFIG, 515 DOMAIN20_POWER_GATE, power_gate); 516 517 REG_WAIT(DOMAIN20_PG_STATUS, 518 DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 519 1, 1000); 520 break; 521 case 5: /* DSC5 */ 522 REG_UPDATE(DOMAIN21_PG_CONFIG, 523 DOMAIN21_POWER_GATE, power_gate); 524 525 REG_WAIT(DOMAIN21_PG_STATUS, 526 DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 527 1, 1000); 528 break; 529 default: 530 BREAK_TO_DEBUGGER(); 531 break; 532 } 533 534 if (org_ip_request_cntl == 0) 535 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); 536 } 537 538 void dcn20_dpp_pg_control( 539 struct dce_hwseq *hws, 540 unsigned int dpp_inst, 541 bool power_on) 542 { 543 uint32_t power_gate = power_on ? 0 : 1; 544 uint32_t pwr_status = power_on ? 0 : 2; 545 546 if (hws->ctx->dc->debug.disable_dpp_power_gate) 547 return; 548 if (REG(DOMAIN1_PG_CONFIG) == 0) 549 return; 550 551 switch (dpp_inst) { 552 case 0: /* DPP0 */ 553 REG_UPDATE(DOMAIN1_PG_CONFIG, 554 DOMAIN1_POWER_GATE, power_gate); 555 556 REG_WAIT(DOMAIN1_PG_STATUS, 557 DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 558 1, 1000); 559 break; 560 case 1: /* DPP1 */ 561 REG_UPDATE(DOMAIN3_PG_CONFIG, 562 DOMAIN3_POWER_GATE, power_gate); 563 564 REG_WAIT(DOMAIN3_PG_STATUS, 565 DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 566 1, 1000); 567 break; 568 case 2: /* DPP2 */ 569 REG_UPDATE(DOMAIN5_PG_CONFIG, 570 DOMAIN5_POWER_GATE, power_gate); 571 572 REG_WAIT(DOMAIN5_PG_STATUS, 573 DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 574 1, 1000); 575 break; 576 case 3: /* DPP3 */ 577 REG_UPDATE(DOMAIN7_PG_CONFIG, 578 DOMAIN7_POWER_GATE, power_gate); 579 580 REG_WAIT(DOMAIN7_PG_STATUS, 581 DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 582 1, 1000); 583 break; 584 case 4: /* DPP4 */ 585 REG_UPDATE(DOMAIN9_PG_CONFIG, 586 DOMAIN9_POWER_GATE, power_gate); 587 588 REG_WAIT(DOMAIN9_PG_STATUS, 589 DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 590 1, 1000); 591 break; 592 case 5: /* DPP5 */ 593 /* 594 * Do not power gate DPP5, should be left at HW default, power on permanently. 595 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 596 * reset. 597 * REG_UPDATE(DOMAIN11_PG_CONFIG, 598 * DOMAIN11_POWER_GATE, power_gate); 599 * 600 * REG_WAIT(DOMAIN11_PG_STATUS, 601 * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, 602 * 1, 1000); 603 */ 604 605 /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */ 606 if (!power_on) { 607 struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst]; 608 if (dpp5 && dpp5->funcs->dpp_force_disable_cursor) 609 dpp5->funcs->dpp_force_disable_cursor(dpp5); 610 } 611 612 break; 613 default: 614 BREAK_TO_DEBUGGER(); 615 break; 616 } 617 } 618 619 620 void dcn20_hubp_pg_control( 621 struct dce_hwseq *hws, 622 unsigned int hubp_inst, 623 bool power_on) 624 { 625 uint32_t power_gate = power_on ? 0 : 1; 626 uint32_t pwr_status = power_on ? 0 : 2; 627 628 if (hws->ctx->dc->debug.disable_hubp_power_gate) 629 return; 630 if (REG(DOMAIN0_PG_CONFIG) == 0) 631 return; 632 633 switch (hubp_inst) { 634 case 0: /* DCHUBP0 */ 635 REG_UPDATE(DOMAIN0_PG_CONFIG, 636 DOMAIN0_POWER_GATE, power_gate); 637 638 REG_WAIT(DOMAIN0_PG_STATUS, 639 DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 640 1, 1000); 641 break; 642 case 1: /* DCHUBP1 */ 643 REG_UPDATE(DOMAIN2_PG_CONFIG, 644 DOMAIN2_POWER_GATE, power_gate); 645 646 REG_WAIT(DOMAIN2_PG_STATUS, 647 DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 648 1, 1000); 649 break; 650 case 2: /* DCHUBP2 */ 651 REG_UPDATE(DOMAIN4_PG_CONFIG, 652 DOMAIN4_POWER_GATE, power_gate); 653 654 REG_WAIT(DOMAIN4_PG_STATUS, 655 DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 656 1, 1000); 657 break; 658 case 3: /* DCHUBP3 */ 659 REG_UPDATE(DOMAIN6_PG_CONFIG, 660 DOMAIN6_POWER_GATE, power_gate); 661 662 REG_WAIT(DOMAIN6_PG_STATUS, 663 DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 664 1, 1000); 665 break; 666 case 4: /* DCHUBP4 */ 667 REG_UPDATE(DOMAIN8_PG_CONFIG, 668 DOMAIN8_POWER_GATE, power_gate); 669 670 REG_WAIT(DOMAIN8_PG_STATUS, 671 DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 672 1, 1000); 673 break; 674 case 5: /* DCHUBP5 */ 675 /* 676 * Do not power gate DCHUB5, should be left at HW default, power on permanently. 677 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 678 * reset. 679 * REG_UPDATE(DOMAIN10_PG_CONFIG, 680 * DOMAIN10_POWER_GATE, power_gate); 681 * 682 * REG_WAIT(DOMAIN10_PG_STATUS, 683 * DOMAIN10_PGFSM_PWR_STATUS, pwr_status, 684 * 1, 1000); 685 */ 686 break; 687 default: 688 BREAK_TO_DEBUGGER(); 689 break; 690 } 691 } 692 693 694 /* disable HW used by plane. 695 * note: cannot disable until disconnect is complete 696 */ 697 void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 698 { 699 struct dce_hwseq *hws = dc->hwseq; 700 struct hubp *hubp = pipe_ctx->plane_res.hubp; 701 struct dpp *dpp = pipe_ctx->plane_res.dpp; 702 703 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 704 705 /* In flip immediate with pipe splitting case GSL is used for 706 * synchronization so we must disable it when the plane is disabled. 707 */ 708 if (pipe_ctx->stream_res.gsl_group != 0) 709 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); 710 711 if (hubp->funcs->hubp_update_mall_sel) 712 hubp->funcs->hubp_update_mall_sel(hubp, 0, false); 713 714 dc->hwss.set_flip_control_gsl(pipe_ctx, false); 715 716 hubp->funcs->hubp_clk_cntl(hubp, false); 717 718 dpp->funcs->dpp_dppclk_control(dpp, false, false); 719 720 hubp->power_gated = true; 721 722 hws->funcs.plane_atomic_power_down(dc, 723 pipe_ctx->plane_res.dpp, 724 pipe_ctx->plane_res.hubp); 725 726 pipe_ctx->stream = NULL; 727 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 728 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); 729 pipe_ctx->top_pipe = NULL; 730 pipe_ctx->bottom_pipe = NULL; 731 pipe_ctx->prev_odm_pipe = NULL; 732 pipe_ctx->next_odm_pipe = NULL; 733 pipe_ctx->plane_state = NULL; 734 } 735 736 737 void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) 738 { 739 bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; 740 struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; 741 742 DC_LOGGER_INIT(dc->ctx->logger); 743 744 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 745 return; 746 747 dcn20_plane_atomic_disable(dc, pipe_ctx); 748 749 /* Turn back off the phantom OTG after the phantom plane is fully disabled 750 */ 751 if (is_phantom) 752 if (tg && tg->funcs->disable_phantom_crtc) 753 tg->funcs->disable_phantom_crtc(tg); 754 755 DC_LOG_DC("Power down front end %d\n", 756 pipe_ctx->pipe_idx); 757 } 758 759 void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) 760 { 761 dcn20_blank_pixel_data(dc, pipe_ctx, blank); 762 } 763 764 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, 765 int opp_cnt, bool is_two_pixels_per_container) 766 { 767 bool hblank_halved = is_two_pixels_per_container; 768 int flow_ctrl_cnt; 769 770 if (opp_cnt >= 2) 771 hblank_halved = true; 772 773 flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - 774 stream->timing.h_border_left - 775 stream->timing.h_border_right; 776 777 if (hblank_halved) 778 flow_ctrl_cnt /= 2; 779 780 /* ODM combine 4:1 case */ 781 if (opp_cnt == 4) 782 flow_ctrl_cnt /= 2; 783 784 return flow_ctrl_cnt; 785 } 786 787 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) 788 { 789 switch (link->link_enc->transmitter) { 790 case TRANSMITTER_UNIPHY_A: 791 return PHYD32CLKA; 792 case TRANSMITTER_UNIPHY_B: 793 return PHYD32CLKB; 794 case TRANSMITTER_UNIPHY_C: 795 return PHYD32CLKC; 796 case TRANSMITTER_UNIPHY_D: 797 return PHYD32CLKD; 798 case TRANSMITTER_UNIPHY_E: 799 return PHYD32CLKE; 800 default: 801 return PHYD32CLKA; 802 } 803 } 804 805 static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) 806 { 807 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 808 int count = 1; 809 810 while (odm_pipe != NULL) { 811 count++; 812 odm_pipe = odm_pipe->next_odm_pipe; 813 } 814 815 return count; 816 } 817 818 enum dc_status dcn20_enable_stream_timing( 819 struct pipe_ctx *pipe_ctx, 820 struct dc_state *context, 821 struct dc *dc) 822 { 823 struct dce_hwseq *hws = dc->hwseq; 824 struct dc_stream_state *stream = pipe_ctx->stream; 825 struct drr_params params = {0}; 826 unsigned int event_triggers = 0; 827 int opp_cnt = 1; 828 int opp_inst[MAX_PIPES] = {0}; 829 bool interlace = stream->timing.flags.INTERLACE; 830 int i; 831 struct mpc_dwb_flow_control flow_control; 832 struct mpc *mpc = dc->res_pool->mpc; 833 bool is_two_pixels_per_container = 834 pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing); 835 bool rate_control_2x_pclk = (interlace || is_two_pixels_per_container); 836 int odm_slice_width; 837 int last_odm_slice_width; 838 struct pipe_ctx *opp_heads[MAX_PIPES]; 839 840 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) 841 dc->res_pool->dccg->funcs->set_pixel_rate_div( 842 dc->res_pool->dccg, 843 pipe_ctx->stream_res.tg->inst, 844 pipe_ctx->pixel_rate_divider.div_factor1, 845 pipe_ctx->pixel_rate_divider.div_factor2); 846 847 /* by upper caller loop, pipe0 is parent pipe and be called first. 848 * back end is set up by for pipe0. Other children pipe share back end 849 * with pipe 0. No program is needed. 850 */ 851 if (pipe_ctx->top_pipe != NULL) 852 return DC_OK; 853 854 /* TODO check if timing_changed, disable stream if timing changed */ 855 856 opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads); 857 for (i = 0; i < opp_cnt; i++) 858 opp_inst[i] = opp_heads[i]->stream_res.opp->inst; 859 860 odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); 861 last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); 862 if (opp_cnt > 1) 863 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 864 pipe_ctx->stream_res.tg, 865 opp_inst, opp_cnt, odm_slice_width, 866 last_odm_slice_width); 867 868 /* HW program guide assume display already disable 869 * by unplug sequence. OTG assume stop. 870 */ 871 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 872 873 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 874 pipe_ctx->clock_source, 875 &pipe_ctx->stream_res.pix_clk_params, 876 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), 877 &pipe_ctx->pll_settings)) { 878 BREAK_TO_DEBUGGER(); 879 return DC_ERROR_UNEXPECTED; 880 } 881 882 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 883 struct dccg *dccg = dc->res_pool->dccg; 884 struct timing_generator *tg = pipe_ctx->stream_res.tg; 885 struct dtbclk_dto_params dto_params = {0}; 886 887 if (dccg->funcs->set_dtbclk_p_src) 888 dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); 889 890 dto_params.otg_inst = tg->inst; 891 dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; 892 dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); 893 dto_params.timing = &pipe_ctx->stream->timing; 894 dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); 895 dccg->funcs->set_dtbclk_dto(dccg, &dto_params); 896 } 897 898 if (dc_is_tmds_signal(stream->signal)) { 899 stream->link->phy_state.symclk_ref_cnts.otg = 1; 900 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) 901 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; 902 else 903 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 904 } 905 906 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) 907 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); 908 909 pipe_ctx->stream_res.tg->funcs->program_timing( 910 pipe_ctx->stream_res.tg, 911 &stream->timing, 912 pipe_ctx->pipe_dlg_param.vready_offset, 913 pipe_ctx->pipe_dlg_param.vstartup_start, 914 pipe_ctx->pipe_dlg_param.vupdate_offset, 915 pipe_ctx->pipe_dlg_param.vupdate_width, 916 pipe_ctx->pipe_dlg_param.pstate_keepout, 917 pipe_ctx->stream->signal, 918 true); 919 920 rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; 921 flow_control.flow_ctrl_mode = 0; 922 flow_control.flow_ctrl_cnt0 = 0x80; 923 flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt, 924 is_two_pixels_per_container); 925 if (mpc->funcs->set_out_rate_control) { 926 for (i = 0; i < opp_cnt; ++i) { 927 mpc->funcs->set_out_rate_control( 928 mpc, opp_inst[i], 929 true, 930 rate_control_2x_pclk, 931 &flow_control); 932 } 933 } 934 935 for (i = 0; i < opp_cnt; i++) { 936 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control( 937 opp_heads[i]->stream_res.opp, 938 true); 939 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( 940 opp_heads[i]->stream_res.opp, 941 stream->timing.pixel_encoding, 942 resource_is_pipe_type(opp_heads[i], OTG_MASTER)); 943 } 944 945 hws->funcs.blank_pixel_data(dc, pipe_ctx, true); 946 947 /* VTG is within DCHUB command block. DCFCLK is always on */ 948 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 949 BREAK_TO_DEBUGGER(); 950 return DC_ERROR_UNEXPECTED; 951 } 952 953 udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz)); 954 955 params.vertical_total_min = stream->adjust.v_total_min; 956 params.vertical_total_max = stream->adjust.v_total_max; 957 params.vertical_total_mid = stream->adjust.v_total_mid; 958 params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; 959 set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms); 960 961 // DRR should set trigger event to monitor surface update event 962 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) 963 event_triggers = 0x80; 964 /* Event triggers and num frames initialized for DRR, but can be 965 * later updated for PSR use. Note DRR trigger events are generated 966 * regardless of whether num frames met. 967 */ 968 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) 969 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 970 pipe_ctx->stream_res.tg, event_triggers, 2); 971 972 /* TODO program crtc source select for non-virtual signal*/ 973 /* TODO program FMT */ 974 /* TODO setup link_enc */ 975 /* TODO set stream attributes */ 976 /* TODO program audio */ 977 /* TODO enable stream if timing changed */ 978 /* TODO unblank stream if DP */ 979 980 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { 981 if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) 982 pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); 983 } 984 985 return DC_OK; 986 } 987 988 void dcn20_program_output_csc(struct dc *dc, 989 struct pipe_ctx *pipe_ctx, 990 enum dc_color_space colorspace, 991 uint16_t *matrix, 992 int opp_id) 993 { 994 struct mpc *mpc = dc->res_pool->mpc; 995 enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A; 996 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 997 998 if (mpc->funcs->power_on_mpc_mem_pwr) 999 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 1000 1001 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 1002 if (mpc->funcs->set_output_csc != NULL) 1003 mpc->funcs->set_output_csc(mpc, 1004 opp_id, 1005 matrix, 1006 ocsc_mode); 1007 } else { 1008 if (mpc->funcs->set_ocsc_default != NULL) 1009 mpc->funcs->set_ocsc_default(mpc, 1010 opp_id, 1011 colorspace, 1012 ocsc_mode); 1013 } 1014 } 1015 1016 bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 1017 const struct dc_stream_state *stream) 1018 { 1019 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 1020 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 1021 const struct pwl_params *params = NULL; 1022 /* 1023 * program OGAM only for the top pipe 1024 * if there is a pipe split then fix diagnostic is required: 1025 * how to pass OGAM parameter for stream. 1026 * if programming for all pipes is required then remove condition 1027 * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic. 1028 */ 1029 if (mpc->funcs->power_on_mpc_mem_pwr) 1030 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 1031 if (pipe_ctx->top_pipe == NULL 1032 && mpc->funcs->set_output_gamma) { 1033 if (stream->out_transfer_func.type == TF_TYPE_HWPWL) 1034 params = &stream->out_transfer_func.pwl; 1035 else if (pipe_ctx->stream->out_transfer_func.type == 1036 TF_TYPE_DISTRIBUTED_POINTS && 1037 cm_helper_translate_curve_to_hw_format(dc->ctx, 1038 &stream->out_transfer_func, 1039 &mpc->blender_params, false)) 1040 params = &mpc->blender_params; 1041 /* 1042 * there is no ROM 1043 */ 1044 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) 1045 BREAK_TO_DEBUGGER(); 1046 } 1047 /* 1048 * if above if is not executed then 'params' equal to 0 and set in bypass 1049 */ 1050 if (mpc->funcs->set_output_gamma) 1051 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 1052 1053 return true; 1054 } 1055 1056 bool dcn20_set_blend_lut( 1057 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 1058 { 1059 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 1060 bool result = true; 1061 const struct pwl_params *blend_lut = NULL; 1062 1063 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 1064 blend_lut = &plane_state->blend_tf.pwl; 1065 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 1066 cm_helper_translate_curve_to_hw_format(plane_state->ctx, 1067 &plane_state->blend_tf, 1068 &dpp_base->regamma_params, false); 1069 blend_lut = &dpp_base->regamma_params; 1070 } 1071 result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); 1072 1073 return result; 1074 } 1075 1076 bool dcn20_set_shaper_3dlut( 1077 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 1078 { 1079 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 1080 bool result = true; 1081 const struct pwl_params *shaper_lut = NULL; 1082 1083 if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) 1084 shaper_lut = &plane_state->in_shaper_func.pwl; 1085 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 1086 cm_helper_translate_curve_to_hw_format(plane_state->ctx, 1087 &plane_state->in_shaper_func, 1088 &dpp_base->shaper_params, true); 1089 shaper_lut = &dpp_base->shaper_params; 1090 } 1091 1092 result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); 1093 if (plane_state->lut3d_func.state.bits.initialized == 1) 1094 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, 1095 &plane_state->lut3d_func.lut_3d); 1096 else 1097 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); 1098 1099 return result; 1100 } 1101 1102 bool dcn20_set_input_transfer_func(struct dc *dc, 1103 struct pipe_ctx *pipe_ctx, 1104 const struct dc_plane_state *plane_state) 1105 { 1106 struct dce_hwseq *hws = dc->hwseq; 1107 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 1108 const struct dc_transfer_func *tf = NULL; 1109 bool result = true; 1110 bool use_degamma_ram = false; 1111 1112 if (dpp_base == NULL || plane_state == NULL) 1113 return false; 1114 1115 hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); 1116 hws->funcs.set_blend_lut(pipe_ctx, plane_state); 1117 1118 tf = &plane_state->in_transfer_func; 1119 1120 if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) 1121 use_degamma_ram = true; 1122 1123 if (use_degamma_ram == true) { 1124 if (tf->type == TF_TYPE_HWPWL) 1125 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 1126 &tf->pwl); 1127 else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 1128 cm_helper_translate_curve_to_degamma_hw_format(tf, 1129 &dpp_base->degamma_params); 1130 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 1131 &dpp_base->degamma_params); 1132 } 1133 return true; 1134 } 1135 /* handle here the optimized cases when de-gamma ROM could be used. 1136 * 1137 */ 1138 if (tf->type == TF_TYPE_PREDEFINED) { 1139 switch (tf->tf) { 1140 case TRANSFER_FUNCTION_SRGB: 1141 dpp_base->funcs->dpp_set_degamma(dpp_base, 1142 IPP_DEGAMMA_MODE_HW_sRGB); 1143 break; 1144 case TRANSFER_FUNCTION_BT709: 1145 dpp_base->funcs->dpp_set_degamma(dpp_base, 1146 IPP_DEGAMMA_MODE_HW_xvYCC); 1147 break; 1148 case TRANSFER_FUNCTION_LINEAR: 1149 dpp_base->funcs->dpp_set_degamma(dpp_base, 1150 IPP_DEGAMMA_MODE_BYPASS); 1151 break; 1152 case TRANSFER_FUNCTION_PQ: 1153 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); 1154 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); 1155 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); 1156 result = true; 1157 break; 1158 default: 1159 result = false; 1160 break; 1161 } 1162 } else if (tf->type == TF_TYPE_BYPASS) 1163 dpp_base->funcs->dpp_set_degamma(dpp_base, 1164 IPP_DEGAMMA_MODE_BYPASS); 1165 else { 1166 /* 1167 * if we are here, we did not handle correctly. 1168 * fix is required for this use case 1169 */ 1170 BREAK_TO_DEBUGGER(); 1171 dpp_base->funcs->dpp_set_degamma(dpp_base, 1172 IPP_DEGAMMA_MODE_BYPASS); 1173 } 1174 1175 return result; 1176 } 1177 1178 void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1179 { 1180 (void)context; 1181 (void)dc; 1182 struct pipe_ctx *odm_pipe; 1183 int opp_cnt = 1; 1184 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 1185 int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); 1186 int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); 1187 1188 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1189 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 1190 opp_cnt++; 1191 } 1192 1193 if (opp_cnt > 1) 1194 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 1195 pipe_ctx->stream_res.tg, 1196 opp_inst, opp_cnt, 1197 odm_slice_width, last_odm_slice_width); 1198 else 1199 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 1200 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1201 } 1202 1203 void dcn20_blank_pixel_data( 1204 struct dc *dc, 1205 struct pipe_ctx *pipe_ctx, 1206 bool blank) 1207 { 1208 struct tg_color black_color = {0}; 1209 struct stream_resource *stream_res = &pipe_ctx->stream_res; 1210 struct dc_stream_state *stream = pipe_ctx->stream; 1211 enum dc_color_space color_space = stream->output_color_space; 1212 enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; 1213 enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; 1214 struct pipe_ctx *odm_pipe; 1215 struct rect odm_slice_src; 1216 1217 if (stream->link->test_pattern_enabled) 1218 return; 1219 1220 /* get opp dpg blank color */ 1221 color_space_to_black_color(dc, color_space, &black_color); 1222 1223 if (blank) { 1224 dc->hwss.set_abm_immediate_disable(pipe_ctx); 1225 1226 if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { 1227 test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; 1228 test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; 1229 } 1230 } else { 1231 test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; 1232 } 1233 1234 odm_pipe = pipe_ctx; 1235 1236 while (odm_pipe->next_odm_pipe) { 1237 odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); 1238 dc->hwss.set_disp_pattern_generator(dc, 1239 odm_pipe, 1240 test_pattern, 1241 test_pattern_color_space, 1242 stream->timing.display_color_depth, 1243 &black_color, 1244 odm_slice_src.width, 1245 odm_slice_src.height, 1246 odm_slice_src.x); 1247 odm_pipe = odm_pipe->next_odm_pipe; 1248 } 1249 1250 odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); 1251 dc->hwss.set_disp_pattern_generator(dc, 1252 odm_pipe, 1253 test_pattern, 1254 test_pattern_color_space, 1255 stream->timing.display_color_depth, 1256 &black_color, 1257 odm_slice_src.width, 1258 odm_slice_src.height, 1259 odm_slice_src.x); 1260 1261 if (!blank) 1262 if (stream_res->abm) { 1263 dc->hwss.set_pipe(pipe_ctx); 1264 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); 1265 } 1266 } 1267 1268 1269 static void dcn20_power_on_plane_resources( 1270 struct dce_hwseq *hws, 1271 struct pipe_ctx *pipe_ctx) 1272 { 1273 uint32_t org_ip_request_cntl = 0; 1274 1275 DC_LOGGER_INIT(hws->ctx->logger); 1276 1277 if (hws->funcs.dpp_root_clock_control) 1278 hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true); 1279 1280 if (REG(DC_IP_REQUEST_CNTL)) { 1281 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 1282 if (org_ip_request_cntl == 0) 1283 REG_SET(DC_IP_REQUEST_CNTL, 0, 1284 IP_REQUEST_EN, 1); 1285 1286 if (hws->funcs.dpp_pg_control) 1287 hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); 1288 1289 if (hws->funcs.hubp_pg_control) 1290 hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); 1291 1292 if (org_ip_request_cntl == 0) 1293 REG_SET(DC_IP_REQUEST_CNTL, 0, 1294 IP_REQUEST_EN, 0); 1295 1296 DC_LOG_DEBUG( 1297 "Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst); 1298 } 1299 } 1300 1301 void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, 1302 struct dc_state *context) 1303 { 1304 (void)context; 1305 //if (dc->debug.sanity_checks) { 1306 // dcn10_verify_allow_pstate_change_high(dc); 1307 //} 1308 dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx); 1309 1310 /* enable DCFCLK current DCHUB */ 1311 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); 1312 1313 /* initialize HUBP on power up */ 1314 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); 1315 1316 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 1317 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 1318 pipe_ctx->stream_res.opp, 1319 true); 1320 1321 /* TODO: enable/disable in dm as per update type. 1322 if (plane_state) { 1323 DC_LOG_DC(dc->ctx->logger, 1324 "Pipe:%d 0x%x: addr hi:0x%x, " 1325 "addr low:0x%x, " 1326 "src: %d, %d, %d," 1327 " %d; dst: %d, %d, %d, %d;\n", 1328 pipe_ctx->pipe_idx, 1329 plane_state, 1330 plane_state->address.grph.addr.high_part, 1331 plane_state->address.grph.addr.low_part, 1332 plane_state->src_rect.x, 1333 plane_state->src_rect.y, 1334 plane_state->src_rect.width, 1335 plane_state->src_rect.height, 1336 plane_state->dst_rect.x, 1337 plane_state->dst_rect.y, 1338 plane_state->dst_rect.width, 1339 plane_state->dst_rect.height); 1340 1341 DC_LOG_DC(dc->ctx->logger, 1342 "Pipe %d: width, height, x, y format:%d\n" 1343 "viewport:%d, %d, %d, %d\n" 1344 "recout: %d, %d, %d, %d\n", 1345 pipe_ctx->pipe_idx, 1346 plane_state->format, 1347 pipe_ctx->plane_res.scl_data.viewport.width, 1348 pipe_ctx->plane_res.scl_data.viewport.height, 1349 pipe_ctx->plane_res.scl_data.viewport.x, 1350 pipe_ctx->plane_res.scl_data.viewport.y, 1351 pipe_ctx->plane_res.scl_data.recout.width, 1352 pipe_ctx->plane_res.scl_data.recout.height, 1353 pipe_ctx->plane_res.scl_data.recout.x, 1354 pipe_ctx->plane_res.scl_data.recout.y); 1355 print_rq_dlg_ttu(dc, pipe_ctx); 1356 } 1357 */ 1358 if (dc->vm_pa_config.valid) { 1359 struct vm_system_aperture_param apt; 1360 1361 apt.sys_default.quad_part = 0; 1362 1363 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; 1364 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; 1365 1366 // Program system aperture settings 1367 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); 1368 } 1369 1370 if (!pipe_ctx->top_pipe 1371 && pipe_ctx->plane_state 1372 && pipe_ctx->plane_state->flip_int_enabled 1373 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) 1374 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); 1375 1376 // if (dc->debug.sanity_checks) { 1377 // dcn10_verify_allow_pstate_change_high(dc); 1378 // } 1379 } 1380 1381 void dcn20_pipe_control_lock( 1382 struct dc *dc, 1383 struct pipe_ctx *pipe, 1384 bool lock) 1385 { 1386 struct pipe_ctx *temp_pipe; 1387 bool flip_immediate = false; 1388 1389 /* use TG master update lock to lock everything on the TG 1390 * therefore only top pipe need to lock 1391 */ 1392 if (!pipe || pipe->top_pipe) 1393 return; 1394 1395 if (pipe->plane_state != NULL) 1396 flip_immediate = pipe->plane_state->flip_immediate; 1397 1398 if (pipe->stream_res.gsl_group > 0) { 1399 temp_pipe = pipe->bottom_pipe; 1400 while (!flip_immediate && temp_pipe) { 1401 if (temp_pipe->plane_state != NULL) 1402 flip_immediate = temp_pipe->plane_state->flip_immediate; 1403 temp_pipe = temp_pipe->bottom_pipe; 1404 } 1405 } 1406 1407 if (flip_immediate && lock) { 1408 const int TIMEOUT_FOR_FLIP_PENDING_US = 100000; 1409 unsigned int polling_interval_us = 1; 1410 int i; 1411 1412 temp_pipe = pipe; 1413 while (temp_pipe) { 1414 if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) { 1415 for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) { 1416 if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp)) 1417 break; 1418 udelay(polling_interval_us); 1419 } 1420 1421 /* no reason it should take this long for immediate flips */ 1422 ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US); 1423 } 1424 temp_pipe = temp_pipe->bottom_pipe; 1425 } 1426 } 1427 1428 /* In flip immediate and pipe splitting case, we need to use GSL 1429 * for synchronization. Only do setup on locking and on flip type change. 1430 */ 1431 if (lock && (pipe->bottom_pipe != NULL || !flip_immediate)) 1432 if ((flip_immediate && pipe->stream_res.gsl_group == 0) || 1433 (!flip_immediate && pipe->stream_res.gsl_group > 0)) 1434 dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate); 1435 1436 if (pipe->plane_state != NULL) 1437 flip_immediate = pipe->plane_state->flip_immediate; 1438 1439 temp_pipe = pipe->bottom_pipe; 1440 while (flip_immediate && temp_pipe) { 1441 if (temp_pipe->plane_state != NULL) 1442 flip_immediate = temp_pipe->plane_state->flip_immediate; 1443 temp_pipe = temp_pipe->bottom_pipe; 1444 } 1445 1446 if (!lock && pipe->stream_res.gsl_group > 0 && pipe->plane_state && 1447 !flip_immediate) 1448 dcn20_setup_gsl_group_as_lock(dc, pipe, false); 1449 1450 if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { 1451 union dmub_hw_lock_flags hw_locks = { 0 }; 1452 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 1453 1454 hw_locks.bits.lock_pipe = 1; 1455 inst_flags.otg_inst = pipe->stream_res.tg->inst; 1456 1457 if (pipe->plane_state != NULL) 1458 hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips; 1459 1460 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 1461 lock, 1462 &hw_locks, 1463 &inst_flags); 1464 } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) { 1465 if (lock) 1466 pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); 1467 else 1468 pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg); 1469 } else { 1470 if (lock) 1471 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1472 else { 1473 if (dc->hwseq->funcs.perform_3dlut_wa_unlock) 1474 dc->hwseq->funcs.perform_3dlut_wa_unlock(pipe); 1475 else 1476 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1477 } 1478 } 1479 } 1480 1481 void dcn20_detect_pipe_changes(struct dc_state *old_state, 1482 struct dc_state *new_state, 1483 struct pipe_ctx *old_pipe, 1484 struct pipe_ctx *new_pipe) 1485 { 1486 bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM; 1487 bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM; 1488 1489 new_pipe->update_flags.raw = 0; 1490 1491 /* If non-phantom pipe is being transitioned to a phantom pipe, 1492 * set disable and return immediately. This is because the pipe 1493 * that was previously in use must be fully disabled before we 1494 * can "enable" it as a phantom pipe (since the OTG will certainly 1495 * be different). The post_unlock sequence will set the correct 1496 * update flags to enable the phantom pipe. 1497 */ 1498 if (old_pipe->plane_state && !old_is_phantom && 1499 new_pipe->plane_state && new_is_phantom) { 1500 new_pipe->update_flags.bits.disable = 1; 1501 return; 1502 } 1503 1504 if (resource_is_pipe_type(new_pipe, OTG_MASTER) && 1505 resource_is_odm_topology_changed(new_pipe, old_pipe)) 1506 /* Detect odm changes */ 1507 new_pipe->update_flags.bits.odm = 1; 1508 1509 /* Exit on unchanged, unused pipe */ 1510 if (!old_pipe->plane_state && !new_pipe->plane_state) 1511 return; 1512 /* Detect pipe enable/disable */ 1513 if (!old_pipe->plane_state && new_pipe->plane_state) { 1514 new_pipe->update_flags.bits.enable = 1; 1515 new_pipe->update_flags.bits.mpcc = 1; 1516 new_pipe->update_flags.bits.dppclk = 1; 1517 new_pipe->update_flags.bits.hubp_interdependent = 1; 1518 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1519 new_pipe->update_flags.bits.unbounded_req = 1; 1520 new_pipe->update_flags.bits.gamut_remap = 1; 1521 new_pipe->update_flags.bits.scaler = 1; 1522 new_pipe->update_flags.bits.viewport = 1; 1523 new_pipe->update_flags.bits.det_size = 1; 1524 if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE && 1525 new_pipe->stream_res.test_pattern_params.width != 0 && 1526 new_pipe->stream_res.test_pattern_params.height != 0) 1527 new_pipe->update_flags.bits.test_pattern_changed = 1; 1528 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1529 new_pipe->update_flags.bits.odm = 1; 1530 new_pipe->update_flags.bits.global_sync = 1; 1531 } 1532 return; 1533 } 1534 1535 /* For SubVP we need to unconditionally enable because any phantom pipes are 1536 * always removed then newly added for every full updates whenever SubVP is in use. 1537 * The remove-add sequence of the phantom pipe always results in the pipe 1538 * being blanked in enable_stream_timing (DPG). 1539 */ 1540 if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM) 1541 new_pipe->update_flags.bits.enable = 1; 1542 1543 /* Phantom pipes are effectively disabled, if the pipe was previously phantom 1544 * we have to enable 1545 */ 1546 if (old_pipe->plane_state && old_is_phantom && 1547 new_pipe->plane_state && !new_is_phantom) 1548 new_pipe->update_flags.bits.enable = 1; 1549 1550 if (old_pipe->plane_state && !new_pipe->plane_state) { 1551 new_pipe->update_flags.bits.disable = 1; 1552 return; 1553 } 1554 1555 /* Detect plane change */ 1556 if (old_pipe->plane_state != new_pipe->plane_state) { 1557 new_pipe->update_flags.bits.plane_changed = true; 1558 } 1559 1560 /* Detect top pipe only changes */ 1561 if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { 1562 /* Detect global sync changes */ 1563 if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset 1564 || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start 1565 || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset 1566 || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) 1567 new_pipe->update_flags.bits.global_sync = 1; 1568 } 1569 1570 if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb) 1571 new_pipe->update_flags.bits.det_size = 1; 1572 1573 /* 1574 * Detect opp / tg change, only set on change, not on enable 1575 * Assume mpcc inst = pipe index, if not this code needs to be updated 1576 * since mpcc is what is affected by these. In fact all of our sequence 1577 * makes this assumption at the moment with how hubp reset is matched to 1578 * same index mpcc reset. 1579 */ 1580 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1581 new_pipe->update_flags.bits.opp_changed = 1; 1582 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) 1583 new_pipe->update_flags.bits.tg_changed = 1; 1584 1585 /* 1586 * Detect mpcc blending changes, only dpp inst and opp matter here, 1587 * mpccs getting removed/inserted update connected ones during their own 1588 * programming 1589 */ 1590 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp 1591 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1592 new_pipe->update_flags.bits.mpcc = 1; 1593 1594 /* Detect dppclk change */ 1595 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) 1596 new_pipe->update_flags.bits.dppclk = 1; 1597 1598 /* Check for scl update */ 1599 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) 1600 new_pipe->update_flags.bits.scaler = 1; 1601 /* Check for vp update */ 1602 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) 1603 || memcmp(&old_pipe->plane_res.scl_data.viewport_c, 1604 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) 1605 new_pipe->update_flags.bits.viewport = 1; 1606 1607 /* Detect dlg/ttu/rq updates */ 1608 { 1609 struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; 1610 struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; 1611 struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; 1612 struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; 1613 1614 /* Detect pipe interdependent updates */ 1615 if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || 1616 old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || 1617 old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || 1618 old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || 1619 old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || 1620 old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || 1621 old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || 1622 old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || 1623 old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || 1624 old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || 1625 old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || 1626 old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || 1627 old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || 1628 old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || 1629 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || 1630 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || 1631 old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || 1632 old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { 1633 old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; 1634 old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; 1635 old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; 1636 old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; 1637 old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; 1638 old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; 1639 old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; 1640 old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; 1641 old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; 1642 old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; 1643 old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; 1644 old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; 1645 old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; 1646 old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; 1647 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; 1648 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; 1649 old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; 1650 old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; 1651 new_pipe->update_flags.bits.hubp_interdependent = 1; 1652 } 1653 /* Detect any other updates to ttu/rq/dlg */ 1654 if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || 1655 memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || 1656 memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) 1657 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1658 } 1659 1660 if (old_pipe->unbounded_req != new_pipe->unbounded_req) 1661 new_pipe->update_flags.bits.unbounded_req = 1; 1662 1663 if (memcmp(&old_pipe->stream_res.test_pattern_params, 1664 &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) { 1665 new_pipe->update_flags.bits.test_pattern_changed = 1; 1666 } 1667 } 1668 1669 void dcn20_update_dchubp_dpp( 1670 struct dc *dc, 1671 struct pipe_ctx *pipe_ctx, 1672 struct dc_state *context) 1673 { 1674 struct dce_hwseq *hws = dc->hwseq; 1675 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1676 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1677 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1678 struct dccg *dccg = dc->res_pool->dccg; 1679 bool viewport_changed = false; 1680 enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); 1681 1682 if (pipe_ctx->update_flags.bits.dppclk) 1683 dpp->funcs->dpp_dppclk_control(dpp, false, true); 1684 1685 if (pipe_ctx->update_flags.bits.enable) 1686 dccg->funcs->update_dpp_dto(dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz); 1687 1688 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1689 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. 1690 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG 1691 */ 1692 1693 if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { 1694 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); 1695 1696 if (hubp->funcs->hubp_setup2) { 1697 hubp->funcs->hubp_setup2( 1698 hubp, 1699 &pipe_ctx->hubp_regs, 1700 &pipe_ctx->global_sync, 1701 &pipe_ctx->stream->timing); 1702 } else { 1703 hubp->funcs->hubp_setup( 1704 hubp, 1705 &pipe_ctx->dlg_regs, 1706 &pipe_ctx->ttu_regs, 1707 &pipe_ctx->rq_regs, 1708 &pipe_ctx->pipe_dlg_param); 1709 } 1710 } 1711 1712 if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) 1713 hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); 1714 1715 if (pipe_ctx->update_flags.bits.hubp_interdependent) { 1716 if (hubp->funcs->hubp_setup_interdependent2) { 1717 hubp->funcs->hubp_setup_interdependent2( 1718 hubp, 1719 &pipe_ctx->hubp_regs); 1720 } else { 1721 hubp->funcs->hubp_setup_interdependent( 1722 hubp, 1723 &pipe_ctx->dlg_regs, 1724 &pipe_ctx->ttu_regs); 1725 } 1726 } 1727 1728 if (pipe_ctx->update_flags.bits.enable || 1729 pipe_ctx->update_flags.bits.plane_changed || 1730 plane_state->update_flags.bits.bpp_change || 1731 plane_state->update_flags.bits.input_csc_change || 1732 plane_state->update_flags.bits.color_space_change || 1733 plane_state->update_flags.bits.coeff_reduction_change) { 1734 struct dc_bias_and_scale bns_params = plane_state->bias_and_scale; 1735 1736 // program the input csc 1737 dpp->funcs->dpp_setup(dpp, 1738 plane_state->format, 1739 EXPANSION_MODE_ZERO, 1740 plane_state->input_csc_color_matrix, 1741 plane_state->color_space, 1742 NULL); 1743 1744 if (dpp->funcs->set_cursor_matrix) { 1745 dpp->funcs->set_cursor_matrix(dpp, 1746 plane_state->color_space, 1747 plane_state->cursor_csc_color_matrix); 1748 } 1749 if (dpp->funcs->dpp_program_bias_and_scale) { 1750 //TODO :for CNVC set scale and bias registers if necessary 1751 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 1752 } 1753 } 1754 1755 if (pipe_ctx->update_flags.bits.mpcc 1756 || pipe_ctx->update_flags.bits.plane_changed 1757 || plane_state->update_flags.bits.global_alpha_change 1758 || plane_state->update_flags.bits.per_pixel_alpha_change) { 1759 // MPCC inst is equal to pipe index in practice 1760 hws->funcs.update_mpcc(dc, pipe_ctx); 1761 } 1762 1763 if (pipe_ctx->update_flags.bits.scaler || 1764 plane_state->update_flags.bits.scaling_change || 1765 plane_state->update_flags.bits.position_change || 1766 plane_state->update_flags.bits.per_pixel_alpha_change || 1767 pipe_ctx->stream->update_flags.bits.scaling) { 1768 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; 1769 ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP); 1770 /* scaler configuration */ 1771 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( 1772 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); 1773 } 1774 1775 if (pipe_ctx->update_flags.bits.viewport || 1776 (context == dc->current_state && plane_state->update_flags.bits.position_change) || 1777 (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || 1778 (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { 1779 1780 hubp->funcs->mem_program_viewport( 1781 hubp, 1782 &pipe_ctx->plane_res.scl_data.viewport, 1783 &pipe_ctx->plane_res.scl_data.viewport_c); 1784 viewport_changed = true; 1785 } 1786 1787 if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate) 1788 hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, &pipe_ctx->mcache_regs); 1789 1790 /* Any updates are handled in dc interface, just need to apply existing for plane enable */ 1791 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || 1792 pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && 1793 pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { 1794 if (dc->hwss.abort_cursor_offload_update) 1795 dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); 1796 1797 dc->hwss.set_cursor_attribute(pipe_ctx); 1798 dc->hwss.set_cursor_position(pipe_ctx); 1799 1800 if (dc->hwss.set_cursor_sdr_white_level) 1801 dc->hwss.set_cursor_sdr_white_level(pipe_ctx); 1802 } 1803 1804 /* Any updates are handled in dc interface, just need 1805 * to apply existing for plane enable / opp change */ 1806 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed 1807 || pipe_ctx->update_flags.bits.plane_changed 1808 || pipe_ctx->stream->update_flags.bits.gamut_remap 1809 || plane_state->update_flags.bits.gamut_remap_change 1810 || pipe_ctx->stream->update_flags.bits.out_csc) { 1811 /* dpp/cm gamut remap*/ 1812 dc->hwss.program_gamut_remap(pipe_ctx); 1813 1814 /*call the dcn2 method which uses mpc csc*/ 1815 dc->hwss.program_output_csc(dc, 1816 pipe_ctx, 1817 pipe_ctx->stream->output_color_space, 1818 pipe_ctx->stream->csc_color_matrix.matrix, 1819 hubp->opp_id); 1820 } 1821 1822 if (pipe_ctx->update_flags.bits.enable || 1823 pipe_ctx->update_flags.bits.plane_changed || 1824 pipe_ctx->update_flags.bits.opp_changed || 1825 plane_state->update_flags.bits.pixel_format_change || 1826 plane_state->update_flags.bits.horizontal_mirror_change || 1827 plane_state->update_flags.bits.rotation_change || 1828 plane_state->update_flags.bits.swizzle_change || 1829 plane_state->update_flags.bits.dcc_change || 1830 plane_state->update_flags.bits.bpp_change || 1831 plane_state->update_flags.bits.scaling_change || 1832 plane_state->update_flags.bits.plane_size_change) { 1833 struct plane_size size = plane_state->plane_size; 1834 1835 size.surface_size = pipe_ctx->plane_res.scl_data.viewport; 1836 hubp->funcs->hubp_program_surface_config( 1837 hubp, 1838 plane_state->format, 1839 &plane_state->tiling_info, 1840 &size, 1841 plane_state->rotation, 1842 &plane_state->dcc, 1843 plane_state->horizontal_mirror, 1844 0); 1845 hubp->power_gated = false; 1846 } 1847 1848 if (pipe_ctx->update_flags.bits.enable || 1849 pipe_ctx->update_flags.bits.plane_changed || 1850 plane_state->update_flags.bits.addr_update) { 1851 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && 1852 pipe_mall_type == SUBVP_MAIN) { 1853 union block_sequence_params params; 1854 1855 params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; 1856 params.subvp_save_surf_addr.addr = &pipe_ctx->plane_state->address; 1857 params.subvp_save_surf_addr.subvp_index = pipe_ctx->subvp_index; 1858 hwss_subvp_save_surf_addr(¶ms); 1859 } 1860 dc->hwss.update_plane_addr(dc, pipe_ctx); 1861 } 1862 1863 if (pipe_ctx->update_flags.bits.enable) 1864 hubp->funcs->set_blank(hubp, false); 1865 /* If the stream paired with this plane is phantom, the plane is also phantom */ 1866 if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) 1867 hubp->funcs->phantom_hubp_post_enable(hubp); 1868 } 1869 1870 static int dcn20_calculate_vready_offset_for_group(struct pipe_ctx *pipe) 1871 { 1872 struct pipe_ctx *other_pipe; 1873 int vready_offset = pipe->pipe_dlg_param.vready_offset; 1874 1875 /* Always use the largest vready_offset of all connected pipes */ 1876 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { 1877 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 1878 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 1879 } 1880 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { 1881 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 1882 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 1883 } 1884 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { 1885 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 1886 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 1887 } 1888 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { 1889 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 1890 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 1891 } 1892 1893 return vready_offset; 1894 } 1895 1896 static void dcn20_program_tg( 1897 struct dc *dc, 1898 struct pipe_ctx *pipe_ctx, 1899 struct dc_state *context, 1900 struct dce_hwseq *hws) 1901 { 1902 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1903 pipe_ctx->stream_res.tg, 1904 dcn20_calculate_vready_offset_for_group(pipe_ctx), 1905 pipe_ctx->pipe_dlg_param.vstartup_start, 1906 pipe_ctx->pipe_dlg_param.vupdate_offset, 1907 pipe_ctx->pipe_dlg_param.vupdate_width, 1908 pipe_ctx->pipe_dlg_param.pstate_keepout); 1909 1910 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) 1911 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 1912 1913 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1914 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 1915 1916 if (hws->funcs.setup_vupdate_interrupt) 1917 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 1918 } 1919 1920 static void dcn20_program_pipe( 1921 struct dc *dc, 1922 struct pipe_ctx *pipe_ctx, 1923 struct dc_state *context) 1924 { 1925 struct dce_hwseq *hws = dc->hwseq; 1926 1927 /* Only need to unblank on top pipe */ 1928 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { 1929 if (pipe_ctx->update_flags.bits.enable || 1930 pipe_ctx->update_flags.bits.odm || 1931 pipe_ctx->stream->update_flags.bits.abm_level) 1932 hws->funcs.blank_pixel_data(dc, pipe_ctx, 1933 !pipe_ctx->plane_state || 1934 !pipe_ctx->plane_state->visible); 1935 } 1936 1937 /* Only update TG on top pipe */ 1938 if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe 1939 && !pipe_ctx->prev_odm_pipe) 1940 dcn20_program_tg(dc, pipe_ctx, context, hws); 1941 1942 if (pipe_ctx->update_flags.bits.odm) 1943 hws->funcs.update_odm(dc, context, pipe_ctx); 1944 1945 if (pipe_ctx->update_flags.bits.enable) { 1946 if (hws->funcs.enable_plane) 1947 hws->funcs.enable_plane(dc, pipe_ctx, context); 1948 else 1949 dcn20_enable_plane(dc, pipe_ctx, context); 1950 1951 if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes) 1952 dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); 1953 } 1954 1955 if (pipe_ctx->update_flags.bits.det_size) { 1956 if (dc->res_pool->hubbub->funcs->program_det_size) 1957 dc->res_pool->hubbub->funcs->program_det_size( 1958 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); 1959 1960 if (dc->res_pool->hubbub->funcs->program_det_segments) 1961 dc->res_pool->hubbub->funcs->program_det_segments( 1962 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); 1963 } 1964 1965 if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || 1966 pipe_ctx->plane_state->update_flags.raw || 1967 pipe_ctx->stream->update_flags.raw)) 1968 dcn20_update_dchubp_dpp(dc, pipe_ctx, context); 1969 1970 if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || 1971 pipe_ctx->plane_state->update_flags.bits.hdr_mult)) 1972 hws->funcs.set_hdr_multiplier(pipe_ctx); 1973 1974 if (pipe_ctx->plane_state && 1975 (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1976 pipe_ctx->plane_state->update_flags.bits.gamma_change || 1977 pipe_ctx->plane_state->update_flags.bits.lut_3d || 1978 pipe_ctx->update_flags.bits.enable)) 1979 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); 1980 1981 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 1982 * only do gamma programming for powering on, internal memcmp to avoid 1983 * updating on slave planes 1984 */ 1985 if (pipe_ctx->update_flags.bits.enable || 1986 pipe_ctx->update_flags.bits.plane_changed || 1987 pipe_ctx->stream->update_flags.bits.out_tf) 1988 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); 1989 1990 /* If the pipe has been enabled or has a different opp, we 1991 * should reprogram the fmt. This deals with cases where 1992 * interation between mpc and odm combine on different streams 1993 * causes a different pipe to be chosen to odm combine with. 1994 */ 1995 if (pipe_ctx->update_flags.bits.enable 1996 || pipe_ctx->update_flags.bits.opp_changed) { 1997 1998 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 1999 pipe_ctx->stream_res.opp, 2000 COLOR_SPACE_YCBCR601, 2001 pipe_ctx->stream->timing.display_color_depth, 2002 pipe_ctx->stream->signal); 2003 2004 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 2005 pipe_ctx->stream_res.opp, 2006 &pipe_ctx->stream->bit_depth_params, 2007 &pipe_ctx->stream->clamping); 2008 } 2009 2010 /* Set ABM pipe after other pipe configurations done */ 2011 if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { 2012 if (pipe_ctx->stream_res.abm) { 2013 dc->hwss.set_pipe(pipe_ctx); 2014 pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm, 2015 pipe_ctx->stream->abm_level); 2016 } 2017 } 2018 2019 if (pipe_ctx->update_flags.bits.test_pattern_changed) { 2020 struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp; 2021 struct bit_depth_reduction_params params; 2022 2023 memset(¶ms, 0, sizeof(params)); 2024 odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); 2025 dc->hwss.set_disp_pattern_generator(dc, 2026 pipe_ctx, 2027 pipe_ctx->stream_res.test_pattern_params.test_pattern, 2028 pipe_ctx->stream_res.test_pattern_params.color_space, 2029 pipe_ctx->stream_res.test_pattern_params.color_depth, 2030 NULL, 2031 pipe_ctx->stream_res.test_pattern_params.width, 2032 pipe_ctx->stream_res.test_pattern_params.height, 2033 pipe_ctx->stream_res.test_pattern_params.offset); 2034 } 2035 } 2036 2037 void dcn20_program_front_end_for_ctx( 2038 struct dc *dc, 2039 struct dc_state *context) 2040 { 2041 int i; 2042 unsigned int prev_hubp_count = 0; 2043 unsigned int hubp_count = 0; 2044 struct dce_hwseq *hws = dc->hwseq; 2045 struct pipe_ctx *pipe = NULL; 2046 2047 DC_LOGGER_INIT(dc->ctx->logger); 2048 2049 if (resource_is_pipe_topology_changed(dc->current_state, context)) 2050 resource_log_pipe_topology_update(dc, context); 2051 2052 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2053 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2054 pipe = &context->res_ctx.pipe_ctx[i]; 2055 2056 if (pipe->plane_state) { 2057 ASSERT(!pipe->plane_state->triplebuffer_flips); 2058 /*turn off triple buffer for full update*/ 2059 dc->hwss.program_triplebuffer( 2060 dc, pipe, pipe->plane_state->triplebuffer_flips); 2061 } 2062 } 2063 } 2064 2065 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2066 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 2067 prev_hubp_count++; 2068 if (context->res_ctx.pipe_ctx[i].plane_state) 2069 hubp_count++; 2070 } 2071 2072 if (prev_hubp_count == 0 && hubp_count > 0) { 2073 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 2074 dc->res_pool->hubbub->funcs->force_pstate_change_control( 2075 dc->res_pool->hubbub, true, false); 2076 udelay(500); 2077 } 2078 2079 /* Set pipe update flags and lock pipes */ 2080 for (i = 0; i < dc->res_pool->pipe_count; i++) 2081 dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i], 2082 &context->res_ctx.pipe_ctx[i]); 2083 2084 /* When disabling phantom pipes, turn on phantom OTG first (so we can get double 2085 * buffer updates properly) 2086 */ 2087 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2088 struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; 2089 2090 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2091 2092 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && 2093 dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 2094 struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; 2095 2096 if (tg->funcs->enable_crtc) { 2097 if (dc->hwseq->funcs.blank_pixel_data) 2098 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); 2099 2100 tg->funcs->enable_crtc(tg); 2101 } 2102 } 2103 } 2104 /* OTG blank before disabling all front ends */ 2105 for (i = 0; i < dc->res_pool->pipe_count; i++) 2106 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 2107 && !context->res_ctx.pipe_ctx[i].top_pipe 2108 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe 2109 && context->res_ctx.pipe_ctx[i].stream) 2110 hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 2111 2112 /* Disconnect mpcc */ 2113 for (i = 0; i < dc->res_pool->pipe_count; i++) 2114 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 2115 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { 2116 struct hubbub *hubbub = dc->res_pool->hubbub; 2117 2118 /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom 2119 * then we want to do the programming here (effectively it's being disabled). If we do 2120 * the programming later the DET won't be updated until the OTG for the phantom pipe is 2121 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with 2122 * DET allocation. 2123 */ 2124 if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable || 2125 (context->res_ctx.pipe_ctx[i].plane_state && 2126 dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) 2127 == SUBVP_PHANTOM))) { 2128 if (hubbub->funcs->program_det_size) 2129 hubbub->funcs->program_det_size(hubbub, 2130 dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2131 if (dc->res_pool->hubbub->funcs->program_det_segments) 2132 dc->res_pool->hubbub->funcs->program_det_segments( 2133 hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2134 } 2135 hws->funcs.plane_atomic_disconnect(dc, dc->current_state, 2136 &dc->current_state->res_ctx.pipe_ctx[i]); 2137 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); 2138 } 2139 2140 /* update ODM for blanked OTG master pipes */ 2141 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2142 pipe = &context->res_ctx.pipe_ctx[i]; 2143 if (resource_is_pipe_type(pipe, OTG_MASTER) && 2144 !resource_is_pipe_type(pipe, DPP_PIPE) && 2145 pipe->update_flags.bits.odm && 2146 hws->funcs.update_odm) 2147 hws->funcs.update_odm(dc, context, pipe); 2148 } 2149 2150 /* 2151 * Program all updated pipes, order matters for mpcc setup. Start with 2152 * top pipe and program all pipes that follow in order 2153 */ 2154 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2155 pipe = &context->res_ctx.pipe_ctx[i]; 2156 2157 if (pipe->plane_state && !pipe->top_pipe) { 2158 while (pipe) { 2159 if (hws->funcs.program_pipe) 2160 hws->funcs.program_pipe(dc, pipe, context); 2161 else { 2162 /* Don't program phantom pipes in the regular front end programming sequence. 2163 * There is an MPO transition case where a pipe being used by a video plane is 2164 * transitioned directly to be a phantom pipe when closing the MPO video. 2165 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place 2166 * right away) but the MPO still exists until the double buffered update of the 2167 * main pipe so we will get a frame of underflow if the phantom pipe is 2168 * programmed here. 2169 */ 2170 if (pipe->stream && 2171 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) 2172 dcn20_program_pipe(dc, pipe, context); 2173 } 2174 2175 pipe = pipe->bottom_pipe; 2176 } 2177 } 2178 2179 /* Program secondary blending tree and writeback pipes */ 2180 pipe = &context->res_ctx.pipe_ctx[i]; 2181 if (!pipe->top_pipe && !pipe->prev_odm_pipe 2182 && pipe->stream && pipe->stream->num_wb_info > 0 2183 && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw) 2184 || pipe->stream->update_flags.raw) 2185 && hws->funcs.program_all_writeback_pipes_in_tree) 2186 hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); 2187 2188 /* Avoid underflow by check of pipe line read when adding 2nd plane. */ 2189 if (hws->wa.wait_hubpret_read_start_during_mpo_transition && 2190 !pipe->top_pipe && 2191 pipe->stream && 2192 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && 2193 dc->current_state->stream_status[0].plane_count == 1 && 2194 context->stream_status[0].plane_count > 1) { 2195 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); 2196 } 2197 } 2198 } 2199 2200 /* post_unlock_reset_opp - the function wait for corresponding double 2201 * buffered pending status clear and reset opp head pipe's none double buffered 2202 * registers to their initial state. 2203 */ 2204 void dcn20_post_unlock_reset_opp(struct dc *dc, 2205 struct pipe_ctx *opp_head) 2206 { 2207 struct display_stream_compressor *dsc = opp_head->stream_res.dsc; 2208 struct dccg *dccg = dc->res_pool->dccg; 2209 2210 /* 2211 * wait for all DPP pipes in current mpc blending tree completes double 2212 * buffered disconnection before resetting OPP 2213 */ 2214 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, opp_head); 2215 2216 if (dsc) { 2217 bool is_dsc_ungated = false; 2218 2219 if (dc->hwseq->funcs.dsc_pg_status) 2220 is_dsc_ungated = dc->hwseq->funcs.dsc_pg_status(dc->hwseq, dsc->inst); 2221 2222 if (is_dsc_ungated) { 2223 /* 2224 * seamless update specific where we will postpone non 2225 * double buffered DSCCLK disable logic in post unlock 2226 * sequence after DSC is disconnected from OPP but not 2227 * yet power gated. 2228 */ 2229 dsc->funcs->dsc_wait_disconnect_pending_clear(dsc); 2230 dsc->funcs->dsc_disable(dsc); 2231 if (dccg->funcs->set_ref_dscclk) 2232 dccg->funcs->set_ref_dscclk(dccg, dsc->inst); 2233 } 2234 } 2235 } 2236 2237 void dcn20_post_unlock_program_front_end( 2238 struct dc *dc, 2239 struct dc_state *context) 2240 { 2241 // Timeout for pipe enable 2242 unsigned int timeout_us = 100000; 2243 unsigned int polling_interval_us = 1; 2244 struct dce_hwseq *hwseq = dc->hwseq; 2245 int i; 2246 2247 for (i = 0; i < dc->res_pool->pipe_count; i++) 2248 if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) && 2249 !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD)) 2250 dcn20_post_unlock_reset_opp(dc, 2251 &dc->current_state->res_ctx.pipe_ctx[i]); 2252 2253 for (i = 0; i < dc->res_pool->pipe_count; i++) 2254 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 2255 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); 2256 2257 /* 2258 * If we are enabling a pipe, we need to wait for pending clear as this is a critical 2259 * part of the enable operation otherwise, DM may request an immediate flip which 2260 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which 2261 * is unsupported on DCN. 2262 */ 2263 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2264 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2265 // Don't check flip pending on phantom pipes 2266 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && 2267 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2268 struct hubp *hubp = pipe->plane_res.hubp; 2269 int j = 0; 2270 2271 for (j = 0; j < timeout_us / polling_interval_us 2272 && hubp->funcs->hubp_is_flip_pending(hubp); j++) 2273 udelay(polling_interval_us); 2274 } 2275 } 2276 2277 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2278 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2279 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2280 2281 /* When going from a smaller ODM slice count to larger, we must ensure double 2282 * buffer update completes before we return to ensure we don't reduce DISPCLK 2283 * before we've transitioned to 2:1 or 4:1 2284 */ 2285 if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) && 2286 resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) && 2287 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2288 int j = 0; 2289 struct timing_generator *tg = pipe->stream_res.tg; 2290 2291 if (tg->funcs->get_optc_double_buffer_pending) { 2292 for (j = 0; j < timeout_us / polling_interval_us 2293 && tg->funcs->get_optc_double_buffer_pending(tg); j++) 2294 udelay(polling_interval_us); 2295 } 2296 } 2297 } 2298 2299 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 2300 dc->res_pool->hubbub->funcs->force_pstate_change_control( 2301 dc->res_pool->hubbub, false, false); 2302 2303 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2304 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2305 2306 if (pipe->plane_state && !pipe->top_pipe) { 2307 /* Program phantom pipe here to prevent a frame of underflow in the MPO transition 2308 * case (if a pipe being used for a video plane transitions to a phantom pipe, it 2309 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end 2310 * programming sequence). 2311 */ 2312 while (pipe) { 2313 if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2314 /* When turning on the phantom pipe we want to run through the 2315 * entire enable sequence, so apply all the "enable" flags. 2316 */ 2317 if (dc->hwss.apply_update_flags_for_phantom) 2318 dc->hwss.apply_update_flags_for_phantom(pipe); 2319 if (dc->hwss.update_phantom_vp_position) 2320 dc->hwss.update_phantom_vp_position(dc, context, pipe); 2321 dcn20_program_pipe(dc, pipe, context); 2322 } 2323 pipe = pipe->bottom_pipe; 2324 } 2325 } 2326 } 2327 2328 if (!hwseq) 2329 return; 2330 2331 /* P-State support transitions: 2332 * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe 2333 * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally) 2334 * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe 2335 * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe 2336 * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes 2337 */ 2338 if (hwseq->funcs.update_force_pstate) 2339 dc->hwseq->funcs.update_force_pstate(dc, context); 2340 2341 /* Only program the MALL registers after all the main and phantom pipes 2342 * are done programming. 2343 */ 2344 if (hwseq->funcs.program_mall_pipe_config) 2345 hwseq->funcs.program_mall_pipe_config(dc, context); 2346 2347 /* WA to apply WM setting*/ 2348 if (hwseq->wa.DEGVIDCN21) 2349 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); 2350 2351 /* WA for stutter underflow during MPO transitions when adding 2nd plane */ 2352 if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) { 2353 2354 if (dc->current_state->stream_status[0].plane_count == 1 && 2355 context->stream_status[0].plane_count > 1) { 2356 2357 struct timing_generator *tg = dc->res_pool->timing_generators[0]; 2358 2359 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false); 2360 2361 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true; 2362 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg); 2363 } 2364 } 2365 } 2366 2367 void dcn20_prepare_bandwidth( 2368 struct dc *dc, 2369 struct dc_state *context) 2370 { 2371 struct hubbub *hubbub = dc->res_pool->hubbub; 2372 unsigned int compbuf_size_kb = 0; 2373 unsigned int cache_wm_a = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns; 2374 unsigned int i; 2375 2376 dc->clk_mgr->funcs->update_clocks( 2377 dc->clk_mgr, 2378 context, 2379 false); 2380 2381 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2382 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2383 2384 // At optimize don't restore the original watermark value 2385 if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { 2386 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; 2387 break; 2388 } 2389 } 2390 2391 /* program dchubbub watermarks: 2392 * For assigning optimized_required, use |= operator since we don't want 2393 * to clear the value if the optimize has not happened yet 2394 */ 2395 dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, 2396 &context->bw_ctx.bw.dcn.watermarks, 2397 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 2398 false); 2399 2400 // Restore the real watermark so we can commit the value to DMCUB 2401 // DMCUB uses the "original" watermark value in SubVP MCLK switch 2402 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = cache_wm_a; 2403 2404 /* decrease compbuf size */ 2405 if (hubbub->funcs->program_compbuf_size) { 2406 if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) { 2407 compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; 2408 dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); 2409 } else { 2410 compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; 2411 dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); 2412 } 2413 2414 hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false); 2415 } 2416 } 2417 2418 void dcn20_optimize_bandwidth( 2419 struct dc *dc, 2420 struct dc_state *context) 2421 { 2422 struct hubbub *hubbub = dc->res_pool->hubbub; 2423 int i; 2424 2425 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2426 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2427 2428 // At optimize don't need to restore the original watermark value 2429 if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { 2430 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; 2431 break; 2432 } 2433 } 2434 2435 /* program dchubbub watermarks */ 2436 hubbub->funcs->program_watermarks(hubbub, 2437 &context->bw_ctx.bw.dcn.watermarks, 2438 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 2439 true); 2440 2441 if (dc->clk_mgr->dc_mode_softmax_enabled) 2442 if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && 2443 context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) 2444 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk); 2445 2446 /* increase compbuf size */ 2447 if (hubbub->funcs->program_compbuf_size) 2448 hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); 2449 2450 if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { 2451 dc_dmub_srv_p_state_delegate(dc, 2452 true, context); 2453 context->bw_ctx.bw.dcn.clk.p_state_change_support = true; 2454 dc->clk_mgr->clks.fw_based_mclk_switching = true; 2455 } else { 2456 dc->clk_mgr->clks.fw_based_mclk_switching = false; 2457 } 2458 2459 dc->clk_mgr->funcs->update_clocks( 2460 dc->clk_mgr, 2461 context, 2462 true); 2463 if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && 2464 !dc->debug.disable_extblankadj) { 2465 for (i = 0; i < dc->res_pool->pipe_count; ++i) { 2466 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2467 2468 if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank 2469 && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max 2470 && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total) 2471 pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp, 2472 pipe_ctx->dlg_regs.min_dst_y_next_start); 2473 } 2474 } 2475 } 2476 2477 bool dcn20_update_bandwidth( 2478 struct dc *dc, 2479 struct dc_state *context) 2480 { 2481 int i; 2482 struct dce_hwseq *hws = dc->hwseq; 2483 2484 /* recalculate DML parameters */ 2485 if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) 2486 return false; 2487 2488 /* apply updated bandwidth parameters */ 2489 dc->hwss.prepare_bandwidth(dc, context); 2490 2491 /* update hubp configs for all pipes */ 2492 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2493 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2494 2495 if (pipe_ctx->plane_state == NULL) 2496 continue; 2497 2498 if (pipe_ctx->top_pipe == NULL) { 2499 bool blank = !is_pipe_tree_visible(pipe_ctx); 2500 2501 pipe_ctx->stream_res.tg->funcs->program_global_sync( 2502 pipe_ctx->stream_res.tg, 2503 dcn20_calculate_vready_offset_for_group(pipe_ctx), 2504 pipe_ctx->pipe_dlg_param.vstartup_start, 2505 pipe_ctx->pipe_dlg_param.vupdate_offset, 2506 pipe_ctx->pipe_dlg_param.vupdate_width, 2507 pipe_ctx->pipe_dlg_param.pstate_keepout); 2508 2509 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2510 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 2511 2512 if (pipe_ctx->prev_odm_pipe == NULL) 2513 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); 2514 2515 if (hws->funcs.setup_vupdate_interrupt) 2516 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 2517 } 2518 2519 pipe_ctx->plane_res.hubp->funcs->hubp_setup( 2520 pipe_ctx->plane_res.hubp, 2521 &pipe_ctx->dlg_regs, 2522 &pipe_ctx->ttu_regs, 2523 &pipe_ctx->rq_regs, 2524 &pipe_ctx->pipe_dlg_param); 2525 } 2526 2527 return true; 2528 } 2529 2530 void dcn20_enable_writeback( 2531 struct dc *dc, 2532 struct dc_writeback_info *wb_info, 2533 struct dc_state *context) 2534 { 2535 struct dwbc *dwb; 2536 struct mcif_wb *mcif_wb; 2537 struct timing_generator *optc; 2538 2539 ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES); 2540 ASSERT(wb_info->wb_enabled); 2541 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 2542 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 2543 2544 /* set the OPTC source mux */ 2545 optc = dc->res_pool->timing_generators[dwb->otg_inst]; 2546 optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); 2547 /* set MCIF_WB buffer and arbitration configuration */ 2548 mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); 2549 mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); 2550 /* Enable MCIF_WB */ 2551 mcif_wb->funcs->enable_mcif(mcif_wb); 2552 /* Enable DWB */ 2553 dwb->funcs->enable(dwb, &wb_info->dwb_params); 2554 /* TODO: add sequence to enable/disable warmup */ 2555 } 2556 2557 void dcn20_disable_writeback( 2558 struct dc *dc, 2559 unsigned int dwb_pipe_inst) 2560 { 2561 struct dwbc *dwb; 2562 struct mcif_wb *mcif_wb; 2563 2564 ASSERT(dwb_pipe_inst < MAX_DWB_PIPES); 2565 dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 2566 mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; 2567 2568 dwb->funcs->disable(dwb); 2569 mcif_wb->funcs->disable_mcif(mcif_wb); 2570 } 2571 2572 bool dcn20_wait_for_blank_complete( 2573 struct output_pixel_processor *opp) 2574 { 2575 int counter; 2576 2577 if (!opp) 2578 return false; 2579 2580 for (counter = 0; counter < 1000; counter++) { 2581 if (!opp->funcs->dpg_is_pending(opp)) 2582 break; 2583 2584 udelay(100); 2585 } 2586 2587 if (counter == 1000) { 2588 dm_error("DC: failed to blank crtc!\n"); 2589 return false; 2590 } 2591 2592 return opp->funcs->dpg_is_blanked(opp); 2593 } 2594 2595 bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) 2596 { 2597 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2598 2599 if (!hubp) 2600 return false; 2601 return hubp->funcs->dmdata_status_done(hubp); 2602 } 2603 2604 void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 2605 { 2606 struct dce_hwseq *hws = dc->hwseq; 2607 2608 if (pipe_ctx->stream_res.dsc) { 2609 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 2610 2611 hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); 2612 while (odm_pipe) { 2613 hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); 2614 odm_pipe = odm_pipe->next_odm_pipe; 2615 } 2616 } 2617 } 2618 2619 void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 2620 { 2621 struct dce_hwseq *hws = dc->hwseq; 2622 2623 if (pipe_ctx->stream_res.dsc) { 2624 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 2625 2626 hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); 2627 while (odm_pipe) { 2628 hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); 2629 odm_pipe = odm_pipe->next_odm_pipe; 2630 } 2631 } 2632 } 2633 2634 void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) 2635 { 2636 struct dc_dmdata_attributes attr = { 0 }; 2637 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2638 2639 attr.dmdata_mode = DMDATA_HW_MODE; 2640 attr.dmdata_size = 2641 dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36; 2642 attr.address.quad_part = 2643 pipe_ctx->stream->dmdata_address.quad_part; 2644 attr.dmdata_dl_delta = 0; 2645 attr.dmdata_qos_mode = 0; 2646 attr.dmdata_qos_level = 0; 2647 attr.dmdata_repeat = 1; /* always repeat */ 2648 attr.dmdata_updated = 1; 2649 attr.dmdata_sw_data = NULL; 2650 2651 hubp->funcs->dmdata_set_attributes(hubp, &attr); 2652 } 2653 2654 void dcn20_init_vm_ctx( 2655 struct dce_hwseq *hws, 2656 struct dc *dc, 2657 struct dc_virtual_addr_space_config *va_config, 2658 int vmid) 2659 { 2660 (void)hws; 2661 struct dcn_hubbub_virt_addr_config config; 2662 2663 if (vmid == 0) { 2664 ASSERT(0); /* VMID cannot be 0 for vm context */ 2665 return; 2666 } 2667 2668 config.page_table_start_addr = va_config->page_table_start_addr; 2669 config.page_table_end_addr = va_config->page_table_end_addr; 2670 config.page_table_block_size = va_config->page_table_block_size_in_bytes; 2671 config.page_table_depth = va_config->page_table_depth; 2672 config.page_table_base_addr = va_config->page_table_base_addr; 2673 2674 dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); 2675 } 2676 2677 int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 2678 { 2679 (void)hws; 2680 struct dcn_hubbub_phys_addr_config config; 2681 2682 config.system_aperture.fb_top = pa_config->system_aperture.fb_top; 2683 config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; 2684 config.system_aperture.fb_base = pa_config->system_aperture.fb_base; 2685 config.system_aperture.agp_top = pa_config->system_aperture.agp_top; 2686 config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; 2687 config.system_aperture.agp_base = pa_config->system_aperture.agp_base; 2688 config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; 2689 config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; 2690 config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; 2691 config.page_table_default_page_addr = pa_config->page_table_default_page_addr; 2692 2693 return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); 2694 } 2695 2696 static bool patch_address_for_sbs_tb_stereo( 2697 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr) 2698 { 2699 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2700 bool sec_split = pipe_ctx->top_pipe && 2701 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; 2702 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && 2703 (pipe_ctx->stream->timing.timing_3d_format == 2704 TIMING_3D_FORMAT_SIDE_BY_SIDE || 2705 pipe_ctx->stream->timing.timing_3d_format == 2706 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { 2707 *addr = plane_state->address.grph_stereo.left_addr; 2708 plane_state->address.grph_stereo.left_addr = 2709 plane_state->address.grph_stereo.right_addr; 2710 return true; 2711 } 2712 2713 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && 2714 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { 2715 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; 2716 plane_state->address.grph_stereo.right_addr = 2717 plane_state->address.grph_stereo.left_addr; 2718 plane_state->address.grph_stereo.right_meta_addr = 2719 plane_state->address.grph_stereo.left_meta_addr; 2720 } 2721 return false; 2722 } 2723 2724 void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) 2725 { 2726 bool addr_patched = false; 2727 PHYSICAL_ADDRESS_LOC addr; 2728 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2729 2730 if (plane_state == NULL) 2731 return; 2732 2733 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); 2734 2735 // Call Helper to track VMID use 2736 vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst); 2737 2738 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 2739 pipe_ctx->plane_res.hubp, 2740 &plane_state->address, 2741 plane_state->flip_immediate); 2742 2743 plane_state->status.requested_address = plane_state->address; 2744 2745 if (plane_state->flip_immediate) 2746 plane_state->status.current_address = plane_state->address; 2747 2748 if (addr_patched) 2749 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; 2750 } 2751 2752 void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, 2753 struct dc_link_settings *link_settings) 2754 { 2755 struct encoder_unblank_param params = {0}; 2756 struct dc_stream_state *stream = pipe_ctx->stream; 2757 struct dc_link *link = stream->link; 2758 struct dce_hwseq *hws = link->dc->hwseq; 2759 struct pipe_ctx *odm_pipe; 2760 bool is_two_pixels_per_container = 2761 pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing); 2762 2763 params.opp_cnt = 1; 2764 2765 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 2766 params.opp_cnt++; 2767 } 2768 /* only 3 items below are used by unblank */ 2769 params.timing = pipe_ctx->stream->timing; 2770 2771 params.link_settings.link_rate = link_settings->link_rate; 2772 2773 if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 2774 /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ 2775 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( 2776 pipe_ctx->stream_res.hpo_dp_stream_enc, 2777 pipe_ctx->stream_res.tg->inst); 2778 } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 2779 if (is_two_pixels_per_container || params.opp_cnt > 1) 2780 params.timing.pix_clk_100hz /= 2; 2781 if (pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine) 2782 pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( 2783 pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); 2784 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); 2785 } 2786 2787 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 2788 hws->funcs.edp_backlight_control(link, true); 2789 } 2790 } 2791 2792 void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) 2793 { 2794 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2795 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); 2796 2797 if (start_line < 0) 2798 start_line = 0; 2799 2800 if (tg->funcs->setup_vertical_interrupt2) 2801 tg->funcs->setup_vertical_interrupt2(tg, start_line); 2802 } 2803 2804 void dcn20_reset_back_end_for_pipe( 2805 struct dc *dc, 2806 struct pipe_ctx *pipe_ctx, 2807 struct dc_state *context) 2808 { 2809 (void)context; 2810 struct dc_link *link = pipe_ctx->stream->link; 2811 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 2812 struct dccg *dccg = dc->res_pool->dccg; 2813 struct dtbclk_dto_params dto_params = {0}; 2814 2815 DC_LOGGER_INIT(dc->ctx->logger); 2816 if (pipe_ctx->stream_res.stream_enc == NULL) { 2817 pipe_ctx->stream = NULL; 2818 return; 2819 } 2820 2821 /* DPMS may already disable or */ 2822 /* dpms_off status is incorrect due to fastboot 2823 * feature. When system resume from S4 with second 2824 * screen only, the dpms_off would be true but 2825 * VBIOS lit up eDP, so check link status too. 2826 */ 2827 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 2828 dc->link_srv->set_dpms_off(pipe_ctx); 2829 else if (pipe_ctx->stream_res.audio) 2830 dc->hwss.disable_audio_stream(pipe_ctx); 2831 2832 /* free acquired resources */ 2833 if (pipe_ctx->stream_res.audio) { 2834 /*disable az_endpoint*/ 2835 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 2836 2837 /*free audio*/ 2838 if (dc->caps.dynamic_audio == true) { 2839 /*we have to dynamic arbitrate the audio endpoints*/ 2840 /*we free the resource, need reset is_audio_acquired*/ 2841 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 2842 pipe_ctx->stream_res.audio, false); 2843 pipe_ctx->stream_res.audio = NULL; 2844 } 2845 } 2846 2847 /* by upper caller loop, parent pipe: pipe0, will be reset last. 2848 * back end share by all pipes and will be disable only when disable 2849 * parent pipe. 2850 */ 2851 if (pipe_ctx->top_pipe == NULL) { 2852 2853 dc->hwss.set_abm_immediate_disable(pipe_ctx); 2854 2855 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 2856 2857 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 2858 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) 2859 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 2860 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 2861 2862 set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); 2863 /* TODO - convert symclk_ref_cnts for otg to a bit map to solve 2864 * the case where the same symclk is shared across multiple otg 2865 * instances 2866 */ 2867 if (dc_is_tmds_signal(pipe_ctx->stream->signal)) 2868 link->phy_state.symclk_ref_cnts.otg = 0; 2869 if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { 2870 link_hwss->disable_link_output(link, 2871 &pipe_ctx->link_res, pipe_ctx->stream->signal); 2872 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; 2873 } 2874 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg 2875 && dc->ctx->dce_version >= DCN_VERSION_3_5) { 2876 dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; 2877 dto_params.timing = &pipe_ctx->stream->timing; 2878 if (dccg && dccg->funcs->set_dtbclk_dto) 2879 dccg->funcs->set_dtbclk_dto(dccg, &dto_params); 2880 } 2881 } 2882 2883 /* 2884 * In case of a dangling plane, setting this to NULL unconditionally 2885 * causes failures during reset hw ctx where, if stream is NULL, 2886 * it is expected that the pipe_ctx pointers to pipes and plane are NULL. 2887 */ 2888 pipe_ctx->stream = NULL; 2889 pipe_ctx->top_pipe = NULL; 2890 pipe_ctx->bottom_pipe = NULL; 2891 pipe_ctx->next_odm_pipe = NULL; 2892 pipe_ctx->prev_odm_pipe = NULL; 2893 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 2894 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 2895 } 2896 2897 void dcn20_reset_hw_ctx_wrap( 2898 struct dc *dc, 2899 struct dc_state *context) 2900 { 2901 int i; 2902 struct dce_hwseq *hws = dc->hwseq; 2903 2904 /* Reset Back End*/ 2905 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 2906 struct pipe_ctx *pipe_ctx_old = 2907 &dc->current_state->res_ctx.pipe_ctx[i]; 2908 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2909 2910 if (!pipe_ctx_old->stream) 2911 continue; 2912 2913 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 2914 continue; 2915 2916 if (!pipe_ctx->stream || 2917 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 2918 struct clock_source *old_clk = pipe_ctx_old->clock_source; 2919 2920 dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 2921 if (hws->funcs.enable_stream_gating) 2922 hws->funcs.enable_stream_gating(dc, pipe_ctx_old); 2923 if (old_clk) 2924 old_clk->funcs->cs_power_down(old_clk); 2925 } 2926 } 2927 } 2928 2929 void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) 2930 { 2931 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2932 struct mpcc_blnd_cfg blnd_cfg = {0}; 2933 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; 2934 int mpcc_id; 2935 struct mpcc *new_mpcc; 2936 struct mpc *mpc = dc->res_pool->mpc; 2937 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); 2938 2939 blnd_cfg.overlap_only = false; 2940 blnd_cfg.global_gain = 0xff; 2941 2942 if (per_pixel_alpha) { 2943 blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha; 2944 if (pipe_ctx->plane_state->global_alpha) { 2945 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; 2946 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; 2947 } else { 2948 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; 2949 } 2950 } else { 2951 blnd_cfg.pre_multiplied_alpha = false; 2952 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; 2953 } 2954 2955 if (pipe_ctx->plane_state->global_alpha) 2956 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; 2957 else 2958 blnd_cfg.global_alpha = 0xff; 2959 2960 blnd_cfg.background_color_bpc = 4; 2961 blnd_cfg.bottom_gain_mode = 0; 2962 blnd_cfg.top_gain = 0x1f000; 2963 blnd_cfg.bottom_inside_gain = 0x1f000; 2964 blnd_cfg.bottom_outside_gain = 0x1f000; 2965 2966 if (pipe_ctx->plane_state->format 2967 == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA) 2968 blnd_cfg.pre_multiplied_alpha = false; 2969 2970 /* 2971 * TODO: remove hack 2972 * Note: currently there is a bug in init_hw such that 2973 * on resume from hibernate, BIOS sets up MPCC0, and 2974 * we do mpcc_remove but the mpcc cannot go to idle 2975 * after remove. This cause us to pick mpcc1 here, 2976 * which causes a pstate hang for yet unknown reason. 2977 */ 2978 mpcc_id = hubp->inst; 2979 2980 /* If there is no full update, don't need to touch MPC tree*/ 2981 if (!pipe_ctx->plane_state->update_flags.bits.full_update && 2982 !pipe_ctx->update_flags.bits.mpcc) { 2983 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); 2984 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); 2985 return; 2986 } 2987 2988 /* check if this MPCC is already being used */ 2989 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); 2990 /* remove MPCC if being used */ 2991 if (new_mpcc != NULL) 2992 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc); 2993 else 2994 if (dc->debug.sanity_checks) 2995 mpc->funcs->assert_mpcc_idle_before_connect( 2996 dc->res_pool->mpc, mpcc_id); 2997 2998 /* Call MPC to insert new plane */ 2999 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, 3000 mpc_tree_params, 3001 &blnd_cfg, 3002 NULL, 3003 NULL, 3004 hubp->inst, 3005 mpcc_id); 3006 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); 3007 3008 ASSERT(new_mpcc != NULL); 3009 hubp->opp_id = pipe_ctx->stream_res.opp->inst; 3010 hubp->mpcc_id = mpcc_id; 3011 } 3012 3013 void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) 3014 { 3015 enum dc_lane_count lane_count = 3016 pipe_ctx->stream->link->cur_link_settings.lane_count; 3017 3018 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 3019 struct dc_link *link = pipe_ctx->stream->link; 3020 3021 uint32_t active_total_with_borders; 3022 uint32_t early_control = 0; 3023 struct timing_generator *tg = pipe_ctx->stream_res.tg; 3024 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 3025 struct dc *dc = pipe_ctx->stream->ctx->dc; 3026 struct dtbclk_dto_params dto_params = {0}; 3027 struct dccg *dccg = dc->res_pool->dccg; 3028 enum phyd32clk_clock_source phyd32clk; 3029 int dp_hpo_inst; 3030 3031 struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc; 3032 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 3033 3034 if (!dc->config.unify_link_enc_assignment) 3035 link_enc = link_enc_cfg_get_link_enc(link); 3036 3037 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 3038 dto_params.otg_inst = tg->inst; 3039 dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; 3040 dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); 3041 dto_params.timing = &pipe_ctx->stream->timing; 3042 dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); 3043 dccg->funcs->set_dtbclk_dto(dccg, &dto_params); 3044 dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; 3045 dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); 3046 3047 phyd32clk = get_phyd32clk_src(link); 3048 if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 3049 dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 3050 } else { 3051 dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 3052 } 3053 } else { 3054 if (dccg->funcs->enable_symclk_se && link_enc) { 3055 if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA 3056 && link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN 3057 && !link->link_status.link_active) { 3058 if (dccg->funcs->disable_symclk_se) 3059 dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, 3060 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 3061 } else 3062 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, 3063 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 3064 } 3065 } 3066 3067 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) 3068 dc->res_pool->dccg->funcs->set_pixel_rate_div( 3069 dc->res_pool->dccg, 3070 pipe_ctx->stream_res.tg->inst, 3071 pipe_ctx->pixel_rate_divider.div_factor1, 3072 pipe_ctx->pixel_rate_divider.div_factor2); 3073 3074 link_hwss->setup_stream_encoder(pipe_ctx); 3075 3076 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { 3077 if (dc->hwss.program_dmdata_engine) 3078 dc->hwss.program_dmdata_engine(pipe_ctx); 3079 } 3080 3081 dc->hwss.update_info_frame(pipe_ctx); 3082 3083 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3084 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3085 3086 /* enable early control to avoid corruption on DP monitor*/ 3087 active_total_with_borders = 3088 timing->h_addressable 3089 + timing->h_border_left 3090 + timing->h_border_right; 3091 3092 if (lane_count != 0) 3093 early_control = active_total_with_borders % lane_count; 3094 3095 if (early_control == 0) 3096 early_control = lane_count; 3097 3098 tg->funcs->set_early_control(tg, early_control); 3099 } 3100 3101 void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) 3102 { 3103 struct dc_stream_state *stream = pipe_ctx->stream; 3104 struct hubp *hubp = pipe_ctx->plane_res.hubp; 3105 bool enable = false; 3106 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 3107 enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) 3108 ? dmdata_dp 3109 : dmdata_hdmi; 3110 3111 /* if using dynamic meta, don't set up generic infopackets */ 3112 if (pipe_ctx->stream->dmdata_address.quad_part != 0) { 3113 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; 3114 enable = true; 3115 } 3116 3117 if (!hubp) 3118 return; 3119 3120 if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) 3121 return; 3122 3123 stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, 3124 hubp->inst, mode); 3125 } 3126 3127 void dcn20_fpga_init_hw(struct dc *dc) 3128 { 3129 int i, j; 3130 struct dce_hwseq *hws = dc->hwseq; 3131 struct resource_pool *res_pool = dc->res_pool; 3132 struct dc_state *context = dc->current_state; 3133 3134 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 3135 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 3136 3137 // Initialize the dccg 3138 if (res_pool->dccg->funcs->dccg_init) 3139 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 3140 3141 //Enable ability to power gate / don't force power on permanently 3142 if (hws->funcs.enable_power_gating_plane) 3143 hws->funcs.enable_power_gating_plane(hws, true); 3144 3145 // Specific to FPGA dccg and registers 3146 REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); 3147 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 3148 3149 dcn10_hubbub_global_timer_enable(dc->res_pool->hubbub, true, 2); 3150 3151 if (hws->funcs.dccg_init) 3152 hws->funcs.dccg_init(hws); 3153 3154 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->refclk_setup) 3155 dc->res_pool->dccg->funcs->refclk_setup(dc->res_pool->dccg); 3156 // 3157 3158 3159 /* Blank pixel data with OPP DPG */ 3160 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 3161 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 3162 3163 if (tg->funcs->is_tg_enabled(tg)) 3164 dcn20_init_blank(dc, tg); 3165 } 3166 3167 for (i = 0; i < res_pool->timing_generator_count; i++) { 3168 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 3169 3170 if (tg->funcs->is_tg_enabled(tg)) 3171 tg->funcs->lock(tg); 3172 } 3173 3174 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3175 struct dpp *dpp = res_pool->dpps[i]; 3176 3177 dpp->funcs->dpp_reset(dpp); 3178 } 3179 3180 /* Reset all MPCC muxes */ 3181 res_pool->mpc->funcs->mpc_init(res_pool->mpc); 3182 3183 /* initialize OPP mpc_tree parameter */ 3184 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3185 res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst; 3186 res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 3187 for (j = 0; j < MAX_PIPES; j++) 3188 res_pool->opps[i]->mpcc_disconnect_pending[j] = false; 3189 } 3190 3191 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3192 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 3193 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 3194 struct hubp *hubp = dc->res_pool->hubps[i]; 3195 struct dpp *dpp = dc->res_pool->dpps[i]; 3196 3197 pipe_ctx->stream_res.tg = tg; 3198 pipe_ctx->pipe_idx = i; 3199 3200 pipe_ctx->plane_res.hubp = hubp; 3201 pipe_ctx->plane_res.dpp = dpp; 3202 pipe_ctx->plane_res.mpcc_inst = dpp->inst; 3203 hubp->mpcc_id = dpp->inst; 3204 hubp->opp_id = OPP_ID_INVALID; 3205 hubp->power_gated = false; 3206 pipe_ctx->stream_res.opp = NULL; 3207 3208 hubp->funcs->hubp_init(hubp); 3209 3210 //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 3211 //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 3212 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 3213 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 3214 /*to do*/ 3215 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); 3216 } 3217 3218 /* initialize DWB pointer to MCIF_WB */ 3219 for (i = 0; i < res_pool->res_cap->num_dwb; i++) 3220 res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i]; 3221 3222 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 3223 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 3224 3225 if (tg->funcs->is_tg_enabled(tg)) 3226 tg->funcs->unlock(tg); 3227 } 3228 3229 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3230 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 3231 3232 dc->hwss.disable_plane(dc, context, pipe_ctx); 3233 3234 pipe_ctx->stream_res.tg = NULL; 3235 pipe_ctx->plane_res.hubp = NULL; 3236 } 3237 3238 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 3239 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 3240 3241 tg->funcs->tg_init(tg); 3242 } 3243 3244 if (dc->res_pool->hubbub->funcs->init_crb) 3245 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); 3246 } 3247 3248 void dcn20_set_disp_pattern_generator(const struct dc *dc, 3249 struct pipe_ctx *pipe_ctx, 3250 enum controller_dp_test_pattern test_pattern, 3251 enum controller_dp_color_space color_space, 3252 enum dc_color_depth color_depth, 3253 const struct tg_color *solid_color, 3254 int width, int height, int offset) 3255 { 3256 (void)dc; 3257 pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern, 3258 color_space, color_depth, solid_color, width, height, offset); 3259 } 3260